Successfully reported this slideshow.
We use your LinkedIn profile and activity data to personalize ads and to show you more relevant ads. You can change your ad preferences anytime.
https://drive.google.com/file/d/0BxKBnD5y2M8NREZod0tVdW5FLTQ/view
http://kaiminghe.com/ilsvrc15/ilsvrc2015_deep_residual_learning_kaiminghe.pdf
Kaiming He, Xiangyu Zhang, Shaoqing Ren, & Jian Sun. “Deep Residual Learning for Image Recognition”. arXiv 2015
Kaiming He, Xiangyu Zhang, Shaoqing Ren, & Jian Sun. “Deep Residual Learning for Image Recognition”. arXiv 2015
Kaiming He, Xiangyu Zhang, Shaoqing Ren, & Jian Sun. “Identity Mappings in Deep Residual Networks”.arXiv 2015
https://devblogs.nvidia.com/parallelforall/nvidia-ibm-cloud-support-imagenet-large-scale-visual-recognition-challenge/
•
•
•
•
•
•
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder(tf.float32,name='input')
y = tf.placeholder(tf.float32,name...
with tf.variable_scope('conv1'):
W_conv1 = weight_variable(
[5, 5, 1, 32])
b_conv1 = bias_variable([32])
h_conv1 = tf.nn.r...
cross_entropy = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=y_conv))
train_step = tf.train.Ada...
with tf.Session(graph=graph) as sess:
from tf.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets...
def conv_layer_resnet_im(inpt, filter_shape, stride,
phase,name=''):
filter_ = weight_variable(filter_shape,
name=name + '...
def residual_block_im(inpt, output_depth,phase,
name=''):
input_depth = inpt.get_shape().as_list()[3]
conv1 = conv_layer_r...
num_blocks = 4
num_filters = 128
for i in range(num_blocks):
with tf.variable_scope('conv%d_%d' %
(num_filters, i + 1)):
c...
•
•
•
self.data_examples =
tf.placeholder(dtype=tf.float32,
shape=[batch_size,img_size,img_size,3])
self.data_labels =
tf.placeh...
def put_inputs(self, sess):
for data_examples, data_labels
in self.iterator(...):
sess.run(self.enqueue_op,feed_dict={
sel...
for i in xrange(num_GPU):
with tf.device('/gpu:%d' % i):
with tf.name_scope('%s_%d' % ('GPU', i)) as name_scope:
train_ima...
grads = average_gradients(tower_grads)
loss = tf.reduce_mean(loss_list)
with tf.variable_scope(tf.get_variable_scope(),
re...
jeans
DataScience Lab 2017_Мониторинг модных трендов с помощью глубокого обучения и TensorFlow_Ольга Романюк
DataScience Lab 2017_Мониторинг модных трендов с помощью глубокого обучения и TensorFlow_Ольга Романюк
DataScience Lab 2017_Мониторинг модных трендов с помощью глубокого обучения и TensorFlow_Ольга Романюк
DataScience Lab 2017_Мониторинг модных трендов с помощью глубокого обучения и TensorFlow_Ольга Романюк
DataScience Lab 2017_Мониторинг модных трендов с помощью глубокого обучения и TensorFlow_Ольга Романюк
DataScience Lab 2017_Мониторинг модных трендов с помощью глубокого обучения и TensorFlow_Ольга Романюк
Upcoming SlideShare
Loading in …5
×

DataScience Lab 2017_Мониторинг модных трендов с помощью глубокого обучения и TensorFlow_Ольга Романюк

42 views

Published on

DataScience Lab, 13 мая 2017
Мониторинг модных трендов с помощью глубокого обучения и TensorFlow, Ольга Романюк (Data Scientist at Eleks)
В течении последних 8 месяцев мы в Eleks работали над системой отслеживания модных трендов, основанной на глубинной остаточной нейронной сети с тождественным отображением. При тренировке сети мы использовали онлайн увеличение объема данных, а также распараллеливание данных по двум картам GPU. Мы создали эту систему с нуля при помощи TensorFlow. В презентации я расскажу о практической стороне проекта, нюансах реализации и подводных камнях, с которыми мы столкнулись во время работы.

Все материалы: http://datascience.in.ua/report2017

Published in: Technology
  • Be the first to comment

DataScience Lab 2017_Мониторинг модных трендов с помощью глубокого обучения и TensorFlow_Ольга Романюк

  1. 1. https://drive.google.com/file/d/0BxKBnD5y2M8NREZod0tVdW5FLTQ/view
  2. 2. http://kaiminghe.com/ilsvrc15/ilsvrc2015_deep_residual_learning_kaiminghe.pdf
  3. 3. Kaiming He, Xiangyu Zhang, Shaoqing Ren, & Jian Sun. “Deep Residual Learning for Image Recognition”. arXiv 2015
  4. 4. Kaiming He, Xiangyu Zhang, Shaoqing Ren, & Jian Sun. “Deep Residual Learning for Image Recognition”. arXiv 2015
  5. 5. Kaiming He, Xiangyu Zhang, Shaoqing Ren, & Jian Sun. “Identity Mappings in Deep Residual Networks”.arXiv 2015
  6. 6. https://devblogs.nvidia.com/parallelforall/nvidia-ibm-cloud-support-imagenet-large-scale-visual-recognition-challenge/
  7. 7. • • • • • •
  8. 8. graph = tf.Graph() with graph.as_default(): x = tf.placeholder(tf.float32,name='input') y = tf.placeholder(tf.float32,name='labels') x_image = tf.reshape(x, [-1, 28, 28, 1]) ...
  9. 9. with tf.variable_scope('conv1'): W_conv1 = weight_variable( [5, 5, 1, 32]) b_conv1 = bias_variable([32]) h_conv1 = tf.nn.relu( conv2d(x_image, W_conv1) + b_conv1) h_pool1 = max_pool_2x2(h_conv1) with tf.variable_scope('conv2'): ...
  10. 10. cross_entropy = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(labels=y,logits=y_conv)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  11. 11. with tf.Session(graph=graph) as sess: from tf.examples.tutorials.mnist import input_data mnist = input_data.read_data_sets('MNIST_data', one_hot=True) writer = tf.summary.FileWriter(logdir, sess.graph) sess.run(tf.global_variables_initializer()) for i in range(20000): batch = mnist.train.next_batch(50) _,train_accuracy = sess.run([train_step,accuracy], feed_dict={x: batch[0], y: batch[1]}) if i % 100 == 0: print("step %d, training accuracy %g" % (i, train_accuracy)) print("test accuracy %g" % sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels}))
  12. 12. def conv_layer_resnet_im(inpt, filter_shape, stride, phase,name=''): filter_ = weight_variable(filter_shape, name=name + '_weights') normalized = batch_norm(inpt, phase,name=name) activated = tf.nn.relu(normalized) conv = tf.nn.conv2d(activated, filter=filter_, strides=[1, stride, stride, 1], padding="SAME") return conv
  13. 13. def residual_block_im(inpt, output_depth,phase, name=''): input_depth = inpt.get_shape().as_list()[3] conv1 = conv_layer_resnet_im(inpt, [3, 3,input_depth, output_depth], 1,phase, name=name+'_conv1') conv2 = conv_layer_resnet_im(conv1, [3, 3, output_depth, output_depth], 1,phase, name=name+'_conv2') if input_depth != output_depth: input_layer = tf.pad(inpt, [[0,0], [0,0], [0,0], [0, output_depth - input_depth]]) else: input_layer = inpt return conv2 + input_layer
  14. 14. num_blocks = 4 num_filters = 128 for i in range(num_blocks): with tf.variable_scope('conv%d_%d' % (num_filters, i + 1)): conv = residual_block_deep_im(layers[-1], num_filters, phase) layers.append(conv) assert conv.get_shape().as_list()[1:] == [56, 56, num_filters] ...
  15. 15. • • •
  16. 16. self.data_examples = tf.placeholder(dtype=tf.float32, shape=[batch_size,img_size,img_size,3]) self.data_labels = tf.placeholder(dtype=tf.float32, shape=[batch_size, num_labels]) self.queue = tf.RandomShuffleQueue(shapes=[[img_size, img_size, 3],[num_labels,]], dtypes=[tf.float32, tf.float32], capacity=capacity, min_after_dequeue=0) self.enqueue_op = self.queue.enqueue_many([self.data_examples, self.data_labels)
  17. 17. def put_inputs(self, sess): for data_examples, data_labels in self.iterator(...): sess.run(self.enqueue_op,feed_dict={ self.data_examples:data_examples, self.data_labels:data_labels}) def get_inputs(self): return self.queue.dequeue_many(self.batch_size)
  18. 18. for i in xrange(num_GPU): with tf.device('/gpu:%d' % i): with tf.name_scope('%s_%d' % ('GPU', i)) as name_scope: train_images_batch, train_labels_batch = train_runner.get_inputs() tower_loss = get_tower_loss(name_scope, train_images_batch, train_labels_batch,phase_train) tf.get_variable_scope().reuse_variables() loss_list.append(tower_loss) grads = optimizer.compute_gradients(tower_loss) tower_grads.append(grads)
  19. 19. grads = average_gradients(tower_grads) loss = tf.reduce_mean(loss_list) with tf.variable_scope(tf.get_variable_scope(), reuse=False): train_op = optimizer.apply_gradients(grads, global_step=global_step)
  20. 20. jeans

×