06. Linear Regression with TensorFlow

linear regression with tensorflow

tensorflow

TOC

Cost Function with TensorFlow

import tensorflow as tf
import matplotlib.pyplot as plt

# Y = X
X = [1,2,3]
Y = [1,2,3]

# Weight
W = tf.placeholder(tf.float32)

# Simplified hypothesis for linear model X * W
hypothesis = X * W

# Cost function
cost = tf.reduce_mean(tf.square(hypothesis - Y))
# Launch the graph in a session
sess = tf.Session()

# Initializes global variables in the graph
sess.run(tf.global_variables_initializer())
# Variables for plotting cost function
W_val = []
cost_val = []

for i in range(-30, 50):
    feed_W = i * 0.1
    curr_cost, curr_W = sess.run([cost, W], feed_dict={W:feed_W})
    W_val.append(curr_W)
    cost_val.append(curr_cost)

# Show the cost function
plt.plot(W_val, cost_val)
plt.title("Cost function")
plt.xlabel("W")
plt.ylabel("Cost(W)")
plt.show()
output_1_0
Image 1. Cost function with TensorFlow

Gradient Descent with TensorFlow

import tensorflow as tf
import matplotlib.pyplot as plt

# y = x
x_data = [1,2,3]
y_data = [1,2,3]

# Weight
W = tf.Variable(tf.random_normal([1]), name="weight")
# [1] means its rank is 1, and the number of data is 1.

# Placeholder for X & Y
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)

# Simplified hypothesis
hypothesis = X * W

# Cost function: Mean Square Error
cost = tf.reduce_sum(tf.square(hypothesis - Y))

# Minimize error
# Gradient descent using derivative
# W -= learning_rate * dereivative
learning_rate = 0.01
gradient = tf.reduce_mean((W * X - Y) * X)
descent = W - learning_rate * gradient
update = W.assign(descent)
# It can be relpaced by
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
# train = optimizer.minimize(cost)

# Launch the graph in a session
sess = tf.Session()
# Initializes global variables in the graph
sess.run(tf.global_variables_initializer())

# For graph
steps = []
outputs = {"cost" : [], "weight" : []}

# Train
for step in range(51):
    # W is a variable, update is an operation to update W
    # After update, W will be updated.
    sess.run(update, feed_dict={X: x_data,Y: y_data})

    _cost = sess.run(cost, feed_dict={X: x_data, Y: y_data})
    _W = sess.run(W)

    steps.append(step)
    outputs["cost"].append(_cost)
    outputs["weight"].append(_W)

    if step in (1, 10, 20, 30, 40, 50):
        print("Step: {0:4d}, Cost: {1:13.10f}, Weight: {2:13.10f}".\
              format(step, _cost, _weight[0]))

# Draw graph
for k, v in outputs.items():
    plt.plot(steps, v)
    plt.title(k)
    plt.xlabel("step")
    plt.ylabel(k)
    plt.show()
Step:    1, Cost:  3.6350340843, Weight:  1.0379939079
Step:   10, Cost:  1.5378515720, Weight:  1.0379939079
Step:   20, Cost:  0.5913023949, Weight:  1.0379939079
Step:   30, Cost:  0.2273551524, Weight:  1.0379939079
Step:   40, Cost:  0.0874179304, Weight:  1.0379939079
Step:   50, Cost:  0.0336120725, Weight:  1.0379939079
output_3_1
Image 2. Weight
output_3_2
Image 3. Cost

Modify Gradient Descent with TensorFlow

import tensorflow as tf
import matplotlib.pyplot as plt

# y = x
X = [1,2,3]
Y = [1,2,3]

# Weight for manual gradient and default gradient, default weight is 5
m_W = tf.Variable(5.)
d_W = tf.Variable(5.)

# Simplified hypothesis for manual gradient and default gradient
m_hypo = X * m_W
d_hypo = X * d_W

# Manual gradient
# This multiplies 2 to what we used before.
manual_gradient = tf.reduce_mean((m_W * X - Y) * X) * 2
descent = m_W - learning_rate * manual_gradient
update = m_W.assign(descent)

# Cost function for default gradient
d_cost = tf.reduce_mean(tf.square(d_hypo - Y))

# Define default gradient descent node
d_optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)

# Get default gradient descent node
gd = d_optimizer.compute_gradients(d_cost)

# gd can be modified, but we use default gd
# d_W will be update with do_gradients
do_gradients = d_optimizer.apply_gradients(gd)

# Launch the graph in a session
sess = tf.Session()
# Initializes global variables in the graph
sess.run(tf.global_variables_initializer())

# For graph
steps = []
outputs = {"manual" : [], "default": []}

for step in range(101):
    M = sess.run(m_W)
    D = sess.run(d_W)

    steps.append(step)
    outputs["manual"].append(M)
    outputs["default"].append(D)

    if step in (1, 10, 100):
        print(\
            "Step: {0:4d}, Manual Weight: {1:13.10f}, Default Weight: {2:13.10f}".\
              format(step, M, D))

    sess.run(update)
    sess.run(do_gradients)

# Draw graph
for k, v in outputs.items():
    plt.plot(steps, v, label=k)

plt.title("Weights")
plt.xlabel("step")
plt.ylabel("Weight")
plt.legend()
plt.show()
Step:    1, Manual Weight:  4.6266665459, Default Weight:  4.6266665459
Step:   10, Manual Weight:  2.5015385151, Default Weight:  2.5015385151
Step:  100, Manual Weight:  1.0002222061, Default Weight:  1.0002222061
output_5_1
Image 4. Gradient descent

Simple Linear Regression with TensorFlow

import tensorflow as tf
import matplotlib.pyplot as plt

# y = x
x_train = [1,2,3]
y_train = [1,2,3]

# Variable is trainable variable for tensorflow.
# It will be upated automatically by tensorflow.
W = tf.Variable(tf.random_normal([1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
# [1] means its rank is 1, and the number of data is 1.

# Hypothesis: W * x + b
hypothesis = x_train * W + b

# reduce_mean calculates average.
cost = tf.reduce_mean(tf.square(hypothesis - y_train))

# Training system
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train = optimizer.minimize(cost)

#Until this, we build graph.

# Launch the graph in a session
sess = tf.Session()
# Initialize global variables in the grpah.
sess.run(tf.global_variables_initializer())

# For graph
steps = []
outputs = {"cost" : [], "weight" : [], "bias" : []}


# Train
for step in range(1001):
    sess.run(train)

    _cost = sess.run(cost)
    _weight = sess.run(W)
    _bias = sess.run(b)

    steps.append(step)
    outputs["cost"].append(_cost)
    outputs["weight"].append(_weight[0])
    outputs["bias"].append(_bias[0])

    if step in (1, 10, 100, 1000):
        print("Step: {0:4d}, Cost: {1:13.10f}, Weight: {2:13.10f}, Bias: {3:13.10f}".\
              format(step, _cost, _weight[0], _bias[0]))

# Draw graph
for k, v in outputs.items():
    plt.plot(steps, v)
    plt.title(k)
    plt.xlabel("step")
    plt.ylabel(k)
    plt.show()
Step:    1, Cost: 13.5835304260, Weight: -0.0048628896, Bias: -1.5833736658
Step:   10, Cost:  1.7461298704, Weight:  0.9168848991, Bias: -1.1534380913
Step:  100, Cost:  0.0814563707, Weight:  1.3314682245, Bias: -0.7535393834
Step: 1000, Cost:  0.0010701300, Weight:  1.0379939079, Bias: -0.0863692015
output_7_1
Image 5. Bias
output_7_2
Image 6. Weight
output_7_3
Image 7. Cost

Linear Regression with Placeholder in TensorFlow

import tensorflow as tf
import matplotlib.pyplot as plt

# Placeholder can be used as input data
X = tf.placeholder(tf.float32, shape=[None])
Y = tf.placeholder(tf.float32, shape=[None])
# [None] means it is 1 deimensional tensor
#  and its number of values is not determined.

# Variable is trainable variable for tensorflow.
# It will be upated automatically by tensorflow.
W = tf.Variable(tf.random_normal([1]), name='weight')
b = tf.Variable(tf.random_normal([1]), name='bias')
# [1] means its rank is 1, and its total count is 1.

# Hypothesis: W * x + b
hypothesis = X * W + b

# reduce_mean calculates average.
cost = tf.reduce_mean(tf.square(hypothesis - Y))

# Training system
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train = optimizer.minimize(cost)

#Until this, we build graph.

# Launch the graph in a session
sess = tf.Session()
# Initialize global variables in the grpah.
sess.run(tf.global_variables_initializer())

# For graph
steps = []
outputs = {"cost" : [], "weight" : [], "bias" : []}

# Train
for step in range(1001):
    # Return order is the same as the input order of run
    _cost, _weight, _bias, _ = sess.run([cost, W, b, train],\
                                        feed_dict={X:[1,2,3], Y:[1,2,3]})

    steps.append(step)
    outputs["cost"].append(_cost)
    outputs["weight"].append(_weight[0])
    outputs["bias"].append(_bias[0])

    if step in (1, 10, 100, 1000):
        print(\
            "Step: {0:4d}, Cost: {1:13.10f}, Weight: {2:13.10f}, Bias: {3:13.10f}".\
              format(step, _cost, _weight[0], _bias[0]))

# After training, we can test hypothesis with new input
print(sess.run(hypothesis, feed_dict={X:[5]}))
print(sess.run(hypothesis, feed_dict={X:[1.8, 3.2]}))

# Draw graph
for k, v in outputs.items():
    plt.plot(steps, v)
    plt.title(k)
    plt.xlabel("step")
    plt.ylabel(k)
    plt.show()
Step:    1, Cost:  0.1927358508, Weight:  0.8746519685, Bias:  0.6404360533
Step:   10, Cost:  0.0642042160, Weight:  0.7945960760, Bias:  0.5903141499
Step:  100, Cost:  0.0304005835, Weight:  0.7979824543, Bias:  0.4592365921
Step: 1000, Cost:  0.0003993845, Weight:  0.9768448472, Bias:  0.0526370667
[ 4.93686152]
[ 1.81095779  3.17854071]
output_9_1
Image 8. Bias
output_9_2
Image 9. Weight
output_9_3
Image 10. Cost

COMMENTS

Name

0 weights,1,abstract class,1,active function,3,adam,2,Adapter,1,affine,2,argmax,1,back propagation,3,binary classification,3,blog,2,Bucket list,1,C++,11,Casting,1,cee,1,checkButton,1,cnn,3,col2im,1,columnspan,1,comboBox,1,concrete class,1,convolution,2,cost function,6,data preprocessing,2,data set,1,deep learning,31,Design Pattern,12,DIP,1,django,1,dnn,2,Don't Repeat Your code,1,drop out,2,ensemble,2,epoch,2,favicon,1,fcn,1,frame,1,gradient descent,5,gru,1,he,1,identify function,1,im2col,1,initialization,1,Lab,9,learning rate,2,LifeLog,1,linear regression,6,logistic function,1,logistic regression,3,logit,3,LSP,1,lstm,1,machine learning,31,matplotlib,1,menu,1,message box,1,mnist,3,mse,1,multinomial classification,3,mutli layer neural network,1,Non Virtual Interface,1,normalization,2,Note,21,numpy,4,one-hot encoding,3,OOP Principles,2,Open Close Principle,1,optimization,1,overfitting,1,padding,2,partial derivative,2,pooling,2,Prototype,1,pure virtual function,1,queue runner,1,radioButton,1,RBM,1,regularization,1,relu,2,reshape,1,restricted boltzmann machine,1,rnn,2,scrolledText,1,sigmoid,2,sigmoid function,1,single layer neural network,1,softmax,6,softmax classification,3,softmax cross entropy with logits,1,softmax function,2,softmax regression,3,softmax-with-loss,2,spinBox,1,SRP,1,standardization,1,sticky,1,stride,1,tab,1,Template Method,1,TensorFlow,31,testing data,1,this,2,tkinter,5,tooltip,1,Toplevel,1,training data,1,vanishing gradient,1,Virtual Copy Constructor,1,Virtual Destructor,1,Virtual Function,1,weight decay,1,xavier,2,xor,3,
ltr
item
Universe In Computer: 06. Linear Regression with TensorFlow
06. Linear Regression with TensorFlow
linear regression with tensorflow
https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiE9QfIQg9MqxmXv8wo1jRHrMgva3N0n9uaoJIHiM44Vt8k6nlufCwcOrXM4piATO-QqQmLgh_JEZUv2KXJVRIATvdu0xwckn-JPaRyfJpu9tFP929dbQgKHcd0zfVFfe9EjSkH18A4MxU4/s0/
https://blogger.googleusercontent.com/img/b/R29vZ2xl/AVvXsEiE9QfIQg9MqxmXv8wo1jRHrMgva3N0n9uaoJIHiM44Vt8k6nlufCwcOrXM4piATO-QqQmLgh_JEZUv2KXJVRIATvdu0xwckn-JPaRyfJpu9tFP929dbQgKHcd0zfVFfe9EjSkH18A4MxU4/s72-c/
Universe In Computer
https://kunicom.blogspot.com/2017/06/06-linear-regression-with-tensorflow.html
https://kunicom.blogspot.com/
https://kunicom.blogspot.com/
https://kunicom.blogspot.com/2017/06/06-linear-regression-with-tensorflow.html
true
2543631451419919204
UTF-8
Loaded All Posts Not found any posts VIEW ALL Readmore Reply Cancel reply Delete By Home PAGES POSTS View All RECOMMENDED FOR YOU LABEL ARCHIVE SEARCH ALL POSTS Not found any post match with your request Back Home Sunday Monday Tuesday Wednesday Thursday Friday Saturday Sun Mon Tue Wed Thu Fri Sat January February March April May June July August September October November December Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec just now 1 minute ago $$1$$ minutes ago 1 hour ago $$1$$ hours ago Yesterday $$1$$ days ago $$1$$ weeks ago more than 5 weeks ago Followers Follow THIS CONTENT IS PREMIUM Please share to unlock Copy All Code Select All Code All codes were copied to your clipboard Can not copy the codes / texts, please press [CTRL]+[C] (or CMD+C with Mac) to copy