LSTM的使用

import torch
import torch.nn as nn
import torch.autograd as autograd
from torch import optim
def prepare_sequence(seq,to_ix):
    idxs = [to_ix[w] for w in seq]
    tensor = torch.LongTensor(idxs)
    return autograd.Variable(tensor)
training_data = [('the dog ate the apple'.split(),['DET','NN','V','DET','NN']),
                ('Everybody read that book'.split(),['NN','V','DET','NN'])
                ]
word_to_ix = {
    
    }
for sent,tags in training_data:
    for word in sent:
        if word not in word_to_ix:
            word_to_ix[word] = len(word_to_ix)
print(word_to_ix)
{'the': 0, 'dog': 1, 'ate': 2, 'apple': 3, 'Everybody': 4, 'read': 5, 'that': 6, 'book': 7}
EMBEDDING_DIM = 6
HIDDEN_DIM = 6
class LSTMTagger(nn.Module):
    def __init__(self,embedding_dim,hidden_dim,vocab_size,tagset_size):
        super(LSTMTagger,self).__init__()
        self.hidden_dim = hidden_dim
        
        self.word_embeddings = nn.Embedding(vocab_size,embedding_dim)
        
        self.lstm = nn.LSTM(embedding_dim,hidden_dim)
        
        self.hidden2tag = nn.Linear(hidden_dim,tagset_size)
        self.hidden = self.init_hidden()
        
    def init_hidden(self):
        return (autograd.Variable(torch.zeros(1,1,self.hidden_dim)),autograd.Variable(torch.zeros(1,1,self.hidden_dim)))
    def forward(self,sentence):
        embeds = self.word_embeddings(sentence)
        lstm_out,self.hidden = self.lstm(embeds.view(len(sentence),1,-1,self.hidden))
        tag_space = self.hiddend2tag(lstm_out.view(len(sentence),-1))
        tag_scores = F.log_softmax(tag_space,dim = 1)
        return tag_scores
loss_function = nn.NLLLoss()
optimizer = optim.SGD(model.parameters(),lr = 0.1)

猜你喜欢

转载自blog.csdn.net/qq_42830971/article/details/126589059