Code: Select all
embedding_model= Word2Vec()
for index, sentence_list in df.iterrows():
embedding_model = Word2Vec(sentence_list, size=100, window=5, min_count=2, workers=2)
embedding_model.train(tokenized_contents, total_examples=len(tsentence_list), epochs=10)