22. 損失関数:学習の「コーチ」
rps_labels = tf.placeholder(tf.float32)
loss = tf.losses.softmax_cross_entropy(onehot_labels=rps_labels,
logits=rps_data)
Loss func = Softmax + Cross entropy
error between the logit and ground truth
rps_labels holds the ground truth
26. # define an optimizer
optimizer =
tf.train.GradientDescentOptimizer(learning_rate=
1).minimize(loss)
# create a Session
sess = tf.Session()
initializer = tf.global_variables_initializer()
sess.run(initializer)
# train the model
for i in range(10000):
sess.run(optimizer, {glove_data: g_data,
rps_labels: g_label})