pythonCopy code
corpus = [
'I love machine learning',
'I love deep learning',
'Deep learning is a subfield of machine learning',
'AI is fascinating',
'Machine learning is fascinating'
]
pythonCopy code
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
# Tokenization
tokenizer = Tokenizer()
tokenizer.fit_on_texts(corpus)
total_words = len(tokenizer.word_index) + 1
# Convert text to sequence of integers
input_sequences = []
for line in corpus:
token_list = tokenizer.texts_to_sequences([line])[0]
for i in range(1, len(token_list)):
n_gram_sequence = token_list[:i+1]
input_sequences.append(n_gram_sequence)
# Pad sequences for equal length
max_sequence_len = max([len(x) for x in input_sequences])
input_sequences = pad_sequences(input_sequences, maxlen=max_sequence_len, padding='pre')
pythonCopy code
from tensorflow.keras.layers import Embedding, LSTM, Dense
from tensorflow.keras.models import Sequential
model = Sequential()
model.add(Embedding(total_words, 10, input_length=max_sequence_len-1)) # Embedding layer
model.add(LSTM(50))
model.add(Dense(total_words, activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
pythonCopy code
from tensorflow.keras.utils import to_categorical
# Splitting data into predictors and label
X = input_sequences[:,:-1]
y = input_sequences[:,-1]
# One-hot encoding the labels
y = to_categorical(y, num_classes=total_words)
# Training the model
model.fit(X, y, epochs=200, verbose=1)
pythonCopy code
embedding_layer = model.layers[0]
weights = embedding_layer.get_weights()[0]
# Create a dictionary to store the embeddings
word_embeddings = {}
for word, i in tokenizer.word_index.items():
word_embeddings[word] = weights[i]
pythonCopy code
print(word_embeddings['machine'])