import tensorflow as tf
import numpy as np
class TextCNN(object):
"""
A CNN for text classification.
Uses an embedding layer, followed by a convolutional, max-pooling and softmax layer.
"""
# sequence_length-最长词汇数
# num_classes-分类数
# vocab_size-总词汇数
# embedding_size-词向量长度
# filter_sizes-卷积核尺寸3,4,5
# num_filters-卷积核数量
# l2_reg_lambda-l2正则化系数
def __init__(
self, sequence_length, num_classes, vocab_size,
embedding_size, filter_sizes, num_filters, l2_reg_lambda=0.0):
# Placeholders for input, output and dropout
self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name="input_x")
self.input_y = tf.placeholder(tf.float32, [None, num_classes], name="input_y")
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
# Keeping track of l2 regularization loss (optional)
l2_loss = tf.constant(0.0)
# Embedding layer
with tf.device('/cpu:0'), tf.name_scope("embedding"):
self.W = tf.Variable(
#19758 128
tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),
name="W")
#input_x %len(w)
self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)
#add one vector
# 添加一个维度,[batch_size, sequence_length, embedding_size, 1]
self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)
# Create a convolution + maxpool layer for each filter size
pooled_outputs = []
#3 4 5
for i, filter_size in enumerate(filter_sizes):
with tf.name_scope("conv-maxpool-%s" % filter_size):
# Convolution Layer 3,128,1,2
filter_shape = [filter_size, embedding_size, 1, num_filters]
#随机生成正太分布
W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name="W")
#
# 2
b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name="b")
conv = tf.nn.conv2d(
self.embedded_chars_expanded,
W,
strides=[1, 1, 1, 1],
padding="VALID",
name="conv")
# Apply nonlinearity
h = tf.nn.relu(tf.nn.bias_add(conv, b), name="relu")
# Maxpooling over the outputs
# 56 -(3,4,5) + 1
pooled = tf.nn.max_pool(
h,
ksize=[1, sequence_length - filter_size + 1, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name="pool")
pooled_outputs.append(pooled)
# Combine all the pooled features
#将pooled_outputs中的值全部取出来然后reshape成[len(input_x),num_filters*len(filters_size)],然后进行了dropout层防止过拟合,
#最后再添加了一层全连接层与softmax层将特征映射成不同类别上的概率
#2 3 把池化层输出变成一维向量
num_filters_total = num_filters * len(filter_sizes)
self.h_pool = tf.concat(pooled_outputs, 3)
# , 6
self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])
# Add dropout
with tf.name_scope("dropout"):
self.h_drop = tf.nn.dropout(self.h_pool_flat, self.dropout_keep_prob)
# Final (unnormalized) scores and predictions
with tf.name_scope("output"):
#6,2
W = tf.get_variable(
"W",
shape=[num_filters_total, num_classes],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name="b")
#l2 way
l2_loss += tf.nn.l2_loss(W)
l2_loss += tf.nn.l2_loss(b)
#computes matmul(x, weights) + biases.
self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name="scores")
self.predictions = tf.argmax(self.scores, 1, name="predictions")
# CalculateMean cross-entropy loss
with tf.name_scope("loss"):
#Computes softmax cross entropy between `logits` and `labels`.
losses = tf.nn.softmax_cross_entropy_with_logits(logits=self.scores, labels=self.input_y)
#计算张量的尺寸的元素平均值。
self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss
# Accuracy
with tf.name_scope("accuracy"):
correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))
self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, "float"), name="accuracy")
(
一
) flag
:
分测试和验证集:
0.1
文件路径
model:
embedding128
fileter 3,4,5
num_filters 128
dropout 0.5
L20.0
trainpaameters:
batch_size 64
num_epochs 200
evalute_every 100
checkpoint
(保存模型用的)
100
num_checkpoints number of checkpoints to save 5
misc parameters
soft_placement true
log_device false
加载数据。
建造词库。
一句话最长的句子
56
vocab_processor (
相当于
wordembedding)
打乱数据。
分测试和验证集
获得最长的 句子
56
y
有两种结果
vocab_size 18758
设置
global_step
optimizer
gradients
去计算
loss
设置变量去跟踪梯度。
把
x,y
合在一起,然后去分批
在分解。
给
train
模型
需要
x,y,dropout
调用
textcnn
模型
x
:
1066,56
y: 1066 ,2
dropout: 0.5
input_x:
w : 19758,128
为什么
embedded-chars
:
106656 128
W=[19758,128]
属于
-1,1
之间。
Embedded_chars:1066 56 128
embedded_chars_expanded 1066 56 128 1
conv2d
函数:
embedded_charss_expanded1066 56 128 1
w: 3,128,1,2
得到( ,
2
)
[,12,256,1]
W=[3,256,1,10] 10表示10个过滤层,相当于红绿黄.
变为 [,10,1,10]
12-3+1=10,
10 过滤层.
在经过 max_pool [1,4,1,1]
变为 [,3,1,10] 10/4+1=3
在reshape 变为 [,3,10]
由于有三个过滤层[3,4,5],所以有三个 [,3,10]
在经过融合 变为 [,3,30]
然后上面的结果 第一维度分为三个 [,1,30]
经过tf.squeeze 删除维度为1的维度, 变成三个 [,30] ==>[3,,30]
之后经过lstm cell= 256 的 staic _rnn 输出 三个[,256]
传递调优和
loss
梯度,
global
,记录梯度,
loss
,准确率