= tf.Variable(tf.random_normal([1])) b = tf.Variable(tf.random_normal([1])) y = m * x + b with tf.Session() as sess: sess.run(tf.initialize_all_variables()) print(sess.run(y, feed_dict={x: x_in})) Build the graph Prepare execution env Initialize variables Run the computation
tf.train.GradientDescentOptimizer(0.5) train = optimizer.minimize(loss) with tf.Session() as sess: sess.run(tf.global_variable_initalizer()) for i in range(1000): sess.run(train, feed_dict={x: x_data[i], y_label: y_data[i]}) Define a loss Create an optimizer Op to minimize the loss Iteratively run the training op Initialize variables
with TensorFlow Udacity class goo.gl/iHssII Stanford’s CS231n cs231n.github.io Udacity’s Machine Learning Nanodegree goo.gl/ODpXj4 Totally new to ML? Recipes goo.gl/KewA03