Copyright © 2017, edureka and/or its affiliates. All rights reserved.
www.edureka.co
Theano
Theano can be defined as a library for
Scientific Computing
Multi-Dimensional Array CPU and GPU
www.edureka.co
www.edureka.co
TensorFlow
TensorFlow is an open-source software
library by Google Brain
DataFlow Graphs
www.edureka.co
www.edureka.co
Theano vs TensorFlow
www.edureka.co
Theano vs TensorFlow
SPEED
Single Core
Multi Core
www.edureka.co
Theano vs TensorFlow
Technology
Benefits
Supports a wide range of
Operations
Still has to come at par with
Theano
Computes the gradient when
determining the
Backpropagation error
That’s not the case for
TensorFlow
Full control over Optimizers
Gives access to lots of good
Optimizers out of the box
www.edureka.co
Theano vs TensorFlow
Compatibility
Integration with High Level API
like Keras
It’s not quite there yet.
It has Native Windows Support.
Currently, TensorFlow lacks this
Support.
Supports High-Level Wrappers
like Lasagne.
No Support for Lasagne.
www.edureka.co
Theano vs TensorFlow
Community
Support
www.edureka.co
Theano vs TensorFlow
Documentation
www.edureka.co
Theano vs TensorFlow
Code Readability
www.edureka.co
Code Readability
import theano
import theano.tensor as T
import numpy
# Again, make 100 points in numpy
x_data = numpy.float32(numpy.random.rand(2, 100))
y_data = numpy.dot([0.100, 0.200], x_data) + 0.3
# Intialise the Theano model
X = T.matrix()
Y = T.vector()
b = theano.shared(numpy.random.uniform(-1, 1), name="b")
W = theano.shared(numpy.random.uniform(-1.0, 1.0, (1, 2)), name="W")
y = W.dot(X) + b
# Compute the gradients WRT the mean-squared-error for each parameter
cost = T.mean(T.sqr(y - Y))
gradientW = T.grad(cost=cost, wrt=W)
gradientB = T.grad(cost=cost, wrt=b)
updates = [[W, W - gradientW * 0.5], [b, b - gradientB * 0.5]]
train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True)
for i in xrange(0, 201):
train(x_data, y_data)
print W.get_value(), b.get_value()
import tensorflow as tf
import numpy as np
# Make 100 phony data points in NumPy.
x_data = np.float32(np.random.rand(2, 100)) # Random input
y_data = np.dot([0.100, 0.200], x_data) + 0.300
# Construct a linear model.
b = tf.Variable(tf.zeros([1]))
W = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0))
y = tf.matmul(W, x_data) + b
# Minimize the squared errors.
loss = tf.reduce_mean(tf.square(y - y_data))
optimizer = tf.train.GradientDescentOptimizer(0.5)
train = optimizer.minimize(loss)
# For initializing the variables.
init = tf.initialize_all_variables()
# Launch the graph
sess = tf.Session()
sess.run(init)
# Fit the plane.
for step in xrange(0, 201):
sess.run(train)
if step % 20 == 0:
print step, sess.run(W), sess.run(b)
# Learns best fit is W: [[0.100 0.200]], b: [0.300]
www.edureka.co
Code Readability
# TensorFlow
b = tf.Variable(tf.zeros([1]))
W = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0))
y = tf.matmul(W, x_data) + b
# Theano
X = T.matrix()
Y = T.vector()
b = theano.shared(numpy.random.uniform(-1, 1), name="b")
W = theano.shared(numpy.random.uniform(-1.0, 1.0, (1, 2)), name="W")
y = W.dot(X) + b
www.edureka.co
Code Readability
# Tensorflow
loss = tf.reduce_mean(tf.square(y - y_data)) # (1)
optimizer = tf.train.GradientDescentOptimizer(0.5) # (2)
train = optimizer.minimize(loss) # (3)
# Theano
cost = T.mean(T.sqr(y - Y)) # (1)
gradientW = T.grad(cost=cost, wrt=W) # (2)
gradientB = T.grad(cost=cost, wrt=b) # (2)
updates = [[W, W - gradientW * 0.5], [b, b - gradientB * 0.5]] # (2)
train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True) # (3)
www.edureka.co
Code Readability
# TensorFlow
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
for step in xrange(0, 201):
sess.run(train)
# Theano
for i in xrange(0, 201):
train(x_data, y_data)
print W.get_value(), b.get_value()
www.edureka.co
Theano vs TensorFlow
www.edureka.co
Copyright © 2017, edureka and/or its affiliates. All rights reserved.
www.edureka.co
www.edureka.co

Theano vs TensorFlow | Edureka

  • 1.
    Copyright © 2017,edureka and/or its affiliates. All rights reserved.
  • 2.
  • 3.
    Theano Theano can bedefined as a library for Scientific Computing Multi-Dimensional Array CPU and GPU www.edureka.co
  • 4.
  • 5.
    TensorFlow TensorFlow is anopen-source software library by Google Brain DataFlow Graphs www.edureka.co
  • 6.
  • 7.
  • 8.
    Theano vs TensorFlow SPEED SingleCore Multi Core www.edureka.co
  • 9.
    Theano vs TensorFlow Technology Benefits Supportsa wide range of Operations Still has to come at par with Theano Computes the gradient when determining the Backpropagation error That’s not the case for TensorFlow Full control over Optimizers Gives access to lots of good Optimizers out of the box www.edureka.co
  • 10.
    Theano vs TensorFlow Compatibility Integrationwith High Level API like Keras It’s not quite there yet. It has Native Windows Support. Currently, TensorFlow lacks this Support. Supports High-Level Wrappers like Lasagne. No Support for Lasagne. www.edureka.co
  • 11.
  • 12.
  • 13.
    Theano vs TensorFlow CodeReadability www.edureka.co
  • 14.
    Code Readability import theano importtheano.tensor as T import numpy # Again, make 100 points in numpy x_data = numpy.float32(numpy.random.rand(2, 100)) y_data = numpy.dot([0.100, 0.200], x_data) + 0.3 # Intialise the Theano model X = T.matrix() Y = T.vector() b = theano.shared(numpy.random.uniform(-1, 1), name="b") W = theano.shared(numpy.random.uniform(-1.0, 1.0, (1, 2)), name="W") y = W.dot(X) + b # Compute the gradients WRT the mean-squared-error for each parameter cost = T.mean(T.sqr(y - Y)) gradientW = T.grad(cost=cost, wrt=W) gradientB = T.grad(cost=cost, wrt=b) updates = [[W, W - gradientW * 0.5], [b, b - gradientB * 0.5]] train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True) for i in xrange(0, 201): train(x_data, y_data) print W.get_value(), b.get_value() import tensorflow as tf import numpy as np # Make 100 phony data points in NumPy. x_data = np.float32(np.random.rand(2, 100)) # Random input y_data = np.dot([0.100, 0.200], x_data) + 0.300 # Construct a linear model. b = tf.Variable(tf.zeros([1])) W = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0)) y = tf.matmul(W, x_data) + b # Minimize the squared errors. loss = tf.reduce_mean(tf.square(y - y_data)) optimizer = tf.train.GradientDescentOptimizer(0.5) train = optimizer.minimize(loss) # For initializing the variables. init = tf.initialize_all_variables() # Launch the graph sess = tf.Session() sess.run(init) # Fit the plane. for step in xrange(0, 201): sess.run(train) if step % 20 == 0: print step, sess.run(W), sess.run(b) # Learns best fit is W: [[0.100 0.200]], b: [0.300] www.edureka.co
  • 15.
    Code Readability # TensorFlow b= tf.Variable(tf.zeros([1])) W = tf.Variable(tf.random_uniform([1, 2], -1.0, 1.0)) y = tf.matmul(W, x_data) + b # Theano X = T.matrix() Y = T.vector() b = theano.shared(numpy.random.uniform(-1, 1), name="b") W = theano.shared(numpy.random.uniform(-1.0, 1.0, (1, 2)), name="W") y = W.dot(X) + b www.edureka.co
  • 16.
    Code Readability # Tensorflow loss= tf.reduce_mean(tf.square(y - y_data)) # (1) optimizer = tf.train.GradientDescentOptimizer(0.5) # (2) train = optimizer.minimize(loss) # (3) # Theano cost = T.mean(T.sqr(y - Y)) # (1) gradientW = T.grad(cost=cost, wrt=W) # (2) gradientB = T.grad(cost=cost, wrt=b) # (2) updates = [[W, W - gradientW * 0.5], [b, b - gradientB * 0.5]] # (2) train = theano.function(inputs=[X, Y], outputs=cost, updates=updates, allow_input_downcast=True) # (3) www.edureka.co
  • 17.
    Code Readability # TensorFlow init= tf.initialize_all_variables() sess = tf.Session() sess.run(init) for step in xrange(0, 201): sess.run(train) # Theano for i in xrange(0, 201): train(x_data, y_data) print W.get_value(), b.get_value() www.edureka.co
  • 18.
  • 19.
    Copyright © 2017,edureka and/or its affiliates. All rights reserved. www.edureka.co
  • 20.