sentences embedding 代码详解

下载数据集

kaggle 链接下载地址

如果下载速度慢或者总是出现错误,参见博客服务器快速下载 kaggle 数据集攻略

程序文件说明

  • get_all_data.py
  • model.py
  • main.py

数据预处理

  • 切词:使用 nltk 工具将文档切分成一个一个的词
  • 统计单词:统计预料中出现的单词频率并根据频率构建词表
  • 分配ID:为每一个单词分配一个 ID
  • 编号表示:将程序所需数据集文本转化为用单词编号的形式表示
  • 窗口数据集构建:按照窗口大小构建本次模型训练所需要的数据集

以下代码存放在 get_all_data.py 文件中

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
import os
# from nltk.book import *
import nltk
import numpy as np
import random
from collections import Counter
import pickle


class Dataset():

def __init__(self):
if not os.path.exists("./data/train_word_datas"):
train_datas, train_labels, train_unsup, test_datas, test_labels = self.get_all_datas()
word2id = self.get_all_words(train_datas, train_unsup)
train_datas = self.convert_data_word_to_id(word2id, train_datas)
train_unsup = self.convert_data_word_to_id(word2id, train_unsup)
test_datas = self.convert_data_word_to_id(word2id, test_datas)
self.train_datas = train_datas
self.train_labels = train_labels
self.train_unsup = train_unsup
self.test_datas = test_datas
self.test_labels = test_labels
# 这里可以只是self.train_datas,也可以是self.train_datas+self.train_unsup
new_word_datas, new_papr_datas, new_labels = self.convert_data_to_new_data(self.train_datas)
self.train_word_datas = new_word_datas
self.train_para_datas = new_papr_datas
self.train_new_labels = new_labels
new_word_datas, new_papr_datas, new_labels = self.convert_data_to_new_data(self.test_datas)
self.test_word_datas = new_word_datas
self.test_para_datas = new_papr_datas
self.test_new_labels = new_labels
pickle.dump(self.train_word_datas, open("./data/train_word_datas", "wb"))
pickle.dump(self.train_para_datas, open("./data/train_para_datas", "wb"))
pickle.dump(self.train_new_labels, open("./data/train_new_labels", "wb"))
pickle.dump(self.train_labels, open("./data/train_labels", "wb"))
pickle.dump(self.test_word_datas, open("./data/test_word_datas", "wb"))
pickle.dump(self.test_para_datas, open("./data/test_para_datas", "wb"))
pickle.dump(self.test_new_labels, open("./data/test_new_labels", "wb"))
pickle.dump(self.test_labels, open("./data/test_labels", "wb"))
else:
self.train_word_datas = pickle.load(open("./data/train_word_datas", "rb"))
self.train_para_datas = pickle.load(open("./data/train_para_datas", "rb"))
self.train_para_datas = self.train_para_datas.reshape([self.train_para_datas.shape[0],1])
self.train_new_labels = pickle.load(open("./data/train_new_labels", "rb"))
self.train_labels = pickle.load(open("./data/train_labels", "rb"))
self.test_word_datas = pickle.load(open("./data/test_word_datas", "rb"))
self.test_para_datas = pickle.load(open("./data/test_para_datas", "rb"))
self.test_para_datas = self.test_para_datas.reshape([self.test_para_datas.shape[0], 1])
self.test_new_labels = pickle.load(open("./data/test_new_labels", "rb"))
self.test_labels = pickle.load(open("./data/test_labels", "rb"))

def get_data(self, path):
'''
根据文件的路径读取文件路径下的所有文件
:param path: 文件路径
:return: 所有的文本数据
nltk如果出现错误,可以先装nltk,然后在python中输入nltk.download("popular")
'''
datas = []
paths = os.listdir(path)
paths = [path +file_name for file_name in paths]
for i, file in enumerate(paths):
if i%1000==0:
print (i, len(paths))
data = open(file, "r").read()
data = data.lower()
data = nltk.word_tokenize(data)
datas.append(data)
return datas

def get_all_datas(self):
'''
得到所有的训练句子,无监督句子和测试句子。
:return: 返回训练句子,训练标签,无监督句子,测试句子,测试标签
'''
train_neg_datas = self.get_data(path="data/aclImdb/train/neg/")
train_pos_datas = self.get_data(path="data/aclImdb/train/pos/")
train_unsup = self.get_data(path="data/aclImdb/train/unsup/")
test_neg_datas = self.get_data(path = "data/aclImdb/test/neg/")
test_pos_datas = self.get_data(path="data/aclImdb/test/pos/")
train_datas = train_neg_datas+train_pos_datas
train_labels = [0]*len(train_neg_datas)+[1]*len(train_pos_datas)
test_datas = test_neg_datas+train_pos_datas
test_labels = [0]*len(test_neg_datas)+[1]*len(test_pos_datas)
tmp = list(zip(train_datas, train_labels))
random.shuffle(tmp)
train_datas[:], train_labels[:] = zip(*tmp)
tmp = list(zip(test_datas, test_labels))
random.shuffle(tmp)
test_datas[:], test_labels[:] = zip(*tmp)
print(len(train_datas), len(train_labels))
print(len(train_unsup))
print(len(test_datas), len(test_labels))
return train_datas, train_labels, train_unsup, test_datas, test_labels

def get_all_words(self, train_datas, train_unsup):
'''
从训练句子和无监督句子中统计所有出现过的词以及它们的频率并取出现频率最高的29998个词加上pad和unk构建一个30000大小的词表
:param train_datas: 所有的训练句子
:param train_unsup: 所有的无监督句子
:return: 30000大小的词典,每个词对应一个id
'''
all_words = []
for sentence in train_datas:
all_words.extend(sentence)
for sentence in train_unsup:
all_words.extend(sentence)
count = Counter(all_words)
count = dict(count.most_common(29998))
word2id = {"<pad>": 0, "<unk>": 1}
for word in count:
word2id[word] = len(word2id)
return word2id

def convert_data_word_to_id(self, word2id, datas):
'''
将datas里面的词都转化正对应的id
:param word2id: 30000大小的词典
:param datas: 需要转化的数据
:return: 返回转化完的数据
'''
for i, sentence in enumerate(datas):
for j, word in enumerate(sentence):
datas[i][j] = word2id.get(word, 1)
return datas

def convert_data_to_new_data(self, datas):
'''
根据句子生成窗口大小为10的语言模型训练集,当句子长度不够10时需要在前面补pad。
:param datas: 句子,可以只使用训练句子,也可以使用训练句子+无监督句子,后续需要训练更久。
:return: 返回窗口大小为10的训练集,句子id和词标签。
'''
new_word_datas = []
new_papr_datas = []
new_labels = []
for i, data in enumerate(datas):
if i%1000==0:
print(i, len(datas))
for j in range(len(data)):
if len(data)<10: # 如果句子长度不够10,开始pad
tmp_words = [0]*(10-len(data))+data[0:-1]
if set(tmp_words)=={1}: #同样,连续9个词都是unk就舍去
break
new_word_datas.append(tmp_words)
new_papr_datas.append(i)
new_labels.append(data[-1])
break
tmp_words = data[j: j+9]
if set(tmp_words)=={1}: # 开始发现存在连续出现unk的句子,这种句子没有意义,所以连续9个词都是unk,那么就舍去
continue
new_papr_datas.append(i)
new_word_datas.append(tmp_words)
new_labels.append(data[j+9])
if j+9+1==len(data): # 到最后10个单词break
break
new_word_datas = np.array(new_word_datas)
new_papr_datas = np.array(new_papr_datas)
new_labels = np.array(new_labels)
print(new_word_datas.shape)
print(new_papr_datas.shape)
print(new_labels.shape)
return new_word_datas, new_papr_datas, new_labels


if __name__=="__main__":
# nltk.download('punkt')
data = Dataset()

运行代码后自动生成预处理好的数据集。这里强调一点,代码可能包错,需要下载 nltk 的工具包,若按照官方文档的下载方式一定会出问题,建议在网上找别人下载好的数据包,自己解压到指定目录即可。(nltk 工具包链接后面会给出)

这里主要用的是 punkt 包中的 english.pickle 文件,我放的目录路径是 home/luoteng/nltk_data/tokenizers/punkt/PY3/english.pickle 。文件存放目录不对,程序会报错。

模型构建

以下代码存放在 model.py 文件中

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
import tensorflow as tf
from sklearn.metrics import accuracy_score

class model():
def __init__(self, train_first, train_second):
self.window = 10 # 使用连续的9个词预测下一个词
self.para_num = 75000
self.create_placeholder()
self.model(train_first, train_second)

def create_placeholder(self):
'''
创建图的输入placeholder
self.word_input: n-gram前n-1个词的输入
self.para_input:篇章id的输入
self.word_label: 语言模型预测下一个词的词标签
self.label:这句话属于正类还是负类的类别标签
:return:
'''
self.word_input = tf.placeholder(dtype=tf.int32, shape=[None, self.window-1])
self.para_input = tf.placeholder(dtype=tf.int32, shape=[None, 1])
self.word_label = tf.placeholder(dtype=tf.int32, shape=[None])
self.label = tf.placeholder(dtype=tf.int32, shape=[None])

def model(self, train_first, train_second):
'''
:param train_first: 当train_first为True时,表示训练训练集的词向量和句向量
:param train_second: 当train_second为True时,表示固定词向量和句向量,开始训练单隐层神经网络分类器用于情感分类
当train_first和train_second都是False的时候表示测试阶段
:return:
'''
with tf.variable_scope("train_parameters"):
self.train_para_embedding = tf.Variable(initial_value=tf.truncated_normal(shape=[self.para_num, 400]),
trainable=True, name="train_para_embedding")
self.word_embedding = tf.Variable(initial_value=tf.truncated_normal(shape=[30000, 400]),
trainable=True, name="word_embedding")
with tf.variable_scope("test_parameters"):
self.test_para_embedding = tf.Variable(initial_value=tf.truncated_normal(shape=[25000, 400]), trainable=True,
name="test_para_embedding")
if train_first or train_second:
para_input = tf.nn.embedding_lookup(self.train_para_embedding, self.para_input) # batch_size*1*400
else:
para_input = tf.nn.embedding_lookup(self.test_para_embedding, self.para_input)
word_inut = tf.nn.embedding_lookup(self.word_embedding, self.word_input) #batch_size*9*400

input = tf.concat([word_inut, para_input], axis=1) #batch_size*10*400
input = tf.layers.flatten(input) #batch_size*4000

with tf.variable_scope("train_parameters"):
output = tf.layers.dense(input, units=30000, name="word_output")
labels = tf.one_hot(self.word_label, 30000)
train_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "train_parameters")
test_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "test_parameters")
reg = tf.contrib.layers.apply_regularization(tf.contrib.layers.l2_regularizer(1e-10), tf.trainable_variables())
self.loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=output))+reg

self.train_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(self.loss_op, var_list=train_var)
self.test_op = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(self.loss_op, var_list=test_var)

mlp_input = tf.reshape(para_input, [-1, 400])
with tf.variable_scope("classification_parameters"):
h1 = tf.layers.dense(mlp_input, units=50, activation=tf.nn.relu, trainable=True, name="h1")
mlp_output = tf.layers.dense(h1, 2, trainable=True, name="mlp_output")
mlp_labels = tf.one_hot(self.label, 2)
self.mlp_loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(labels=mlp_labels, logits=mlp_output))
classification_var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, "classification_parameters")
self.mlp_train_op = tf.train.AdamOptimizer(learning_rate=0.02).minimize(self.mlp_loss_op, var_list=classification_var)
self.predict_op = tf.argmax(mlp_output, axis=1)

def train(self, sess, word_datas, para_datas, word_label, batch_size, is_train=True):
'''
:param sess: tensorflow的Session,用来运行计算图
:param word_datas: 所有的训练集word词组,大小为m*9,m为样本个数
:param para_datas: 所有的训练集段落id组,大小为m
:param word_label: 所有的词标签,大小为m
:param batch_size: batch_size,是一个标量
:param is_train: 训练的时候和测试的时候都使用这个函数,所以这是一个标志位,标注是训练还是测试
:return: 无
'''
index = 0
while index<len(word_datas):
word_data_batch = word_datas[index:index+batch_size]
para_data_batch = para_datas[index:index+batch_size]
word_label_batch = word_label[index:index+batch_size]
if is_train:
loss, _ = sess.run([self.loss_op, self.train_op], feed_dict={self.word_input:word_data_batch, self.para_input:para_data_batch,
self.word_label: word_label_batch})
else:
loss, _ = sess.run([self.loss_op, self.test_op],
feed_dict={self.word_input: word_data_batch, self.para_input: para_data_batch,
self.word_label: word_label_batch})
if index%(batch_size*100)==0:
print("Train loss is:", loss)
print(index, len(word_datas))
if loss<1:
print(word_data_batch)
print(word_label_batch)
index += batch_size

def train_mlp(self, sess, para_datas, labels, batch_size):
'''
:param sess: tensorflow的Session
:param para_datas: 所有训练句子id,大小为25000
:param labels: 所有句子的情感标签,大小为25000
:param batch_size: 标量
:return: 无
'''
index = 0
while index < len(para_datas):
para_data_batch = para_datas[index:index + batch_size]
label_batch = labels[index:index+batch_size]
loss, _ = sess.run([self.mlp_loss_op, self.mlp_train_op], feed_dict={self.para_input: para_data_batch,
self.label: label_batch})
if index%(batch_size*100)==0:
#print ("Train loss is:",loss)
#print (index,len(para_datas))
pass
index+=batch_size

def test_mlp(self, sess, para_datas, labels, batch_size):
'''
:param sess: tensorflow的Session
:param para_datas: 所有的测试句子id,大小为25000维的向量
:param labels: 所有的测试句子标签,大小为25000维的向量,用来测试模型结果
:param batch_size: 标量。
:return: 无
'''
index=0
result = []
while index < len(para_datas):
para_data_batch = para_datas[index:index + batch_size]
pred = sess.run(self.predict_op, feed_dict={self.para_input: para_data_batch})
result += list(pred)
index += batch_size

acc = accuracy_score(y_true=labels, y_pred=result)
print("Test acc is:", acc)

训练并预测

主函数入口,以下代码存放在 main.py 文件中

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
from get_all_data import Dataset
from model import model
import tensorflow as tf
import os
import numpy as np

os.environ['CUDA_VISIBLE_DEVICES'] = str(1) #设置gpu num,没有gpu随便设置会自动使用cpu
data = Dataset()

session_config = tf.ConfigProto(
log_device_placement=False,
inter_op_parallelism_threads=0,
intra_op_parallelism_threads=0,
allow_soft_placement=True)
session_config.gpu_options.allow_growth = True # 使tensorflow能顾动态申请显存,而不是一下占满
m = model(train_first=True, train_second=False)
with tf.Session(config=session_config) as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
# 将这句话注释去掉可以续跑,因为训练句向量训练很多轮,50以上,并且非常慢,所以如果觉得训练的不够多,可以继续训练,而不是从头训练
# saver.restore(sess, "./model/result.ckpt")
for i in range(50):
m.train(sess, data.train_word_datas, data.train_para_datas, data.train_new_labels, 512)
saver.save(sess,"./model/result.ckpt")
tf.reset_default_graph() #每一次都清空计算图并重新创建。
m = model(train_first=False, train_second=True)
with tf.Session(config=session_config) as sess:
saver = tf.train.Saver()
saver.restore(sess,"./model/result.ckpt")
train_para = np.reshape(np.array(range(25000)),[25000,1])
for i in range(100):
m.train_mlp(sess, train_para[0:20000], data.train_labels[0:20000], 32) # 训练
m.test_mlp(sess, train_para[20000:], data.train_labels[20000:], 32) # 验证
saver.save(sess,"./model/result.ckpt")
tf.reset_default_graph()
m = model(train_first=False, train_second=False)
with tf.Session(config=session_config) as sess:
saver = tf.train.Saver()
saver.restore(sess,"./model/result.ckpt")
for i in range(50):
m.train(sess, data.test_word_datas, data.test_para_datas, data.test_new_labels, 512, is_train=False)
saver.save(sess,"./model/result.ckpt")
tf.reset_default_graph()
m = model(train_first=False, train_second=False)
with tf.Session(config=session_config) as sess:
sess.run(tf.global_variables_initializer())
saver = tf.train.Saver()
saver.restore(sess, "./model/result.ckpt")
test_para = np.reshape(np.array(range(25000)), [25000, 1])
m.test_mlp(sess, test_para, data.test_labels, 32)
saver.save(sess, "./model/result.ckpt")

参考链接

【1】本文复现论文:Distributed Representations of Sentences and Documents
提取码:zue5

【2】nltk 数据集工具包:nltk_data.zip
提取码:jpd6

坚持原创技术分享,您的支持将鼓励我继续创作!