In [1]:
import numpy as np
import pandas as pd
import tensorflow as tf
In [2]:
data = pd.read_csv("./datas/classification_ex.csv", dtype="float32").values
In [3]:
x_data = data[:, 0:2]
y_data = data[:, 2:]
In [4]:
global_step = tf.Variable(0, trainable=False, name="global_step")
model setting¶
In [5]:
# ground truth
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
# weight, bias
W1 = tf.Variable(tf.random_normal([2, 10], mean=0, stddev=1))
W2 = tf.Variable(tf.random_normal([10, 20], mean=0, stddev=1))
W3 = tf.Variable(tf.random_normal([20, 3], mean=0, stddev=1))
b1 = tf.zeros([10])
b2 = tf.zeros([20])
b3 = tf.zeros([3])
# Layer 1
with tf.name_scope("Layer_1"):
L1 = tf.add(tf.matmul(X, W1), b1)
L1 = tf.nn.sigmoid(L1)
# Layer 2
with tf.name_scope("Layer_2"):
L2 = tf.add(tf.matmul(L1, W2), b2)
L2 = tf.nn.sigmoid(L2)
# Layer 3
with tf.name_scope("Layer_3"):
model = tf.add(tf.matmul(L2, W3), b3)
model = tf.nn.softmax(model)
# cost function
with tf.name_scope("optimizer"):
cost = tf.reduce_mean(-tf.reduce_sum(Y*tf.log(model) + (1-Y)*tf.log(1-model)))
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(cost, global_step=global_step)
tf.summary.scalar("cost", cost)
tf.summary.histogram("Weight1", W1)
tf.summary.histogram("Weight2", W2)
tf.summary.histogram("Weight3", W3)
tf.summary.histogram("bais1", b1)
tf.summary.histogram("bais2", b2)
tf.summary.histogram("bais3", b3)
create session¶
In [6]:
sess = tf.Session()
saver = tf.train.Saver(tf.global_variables())
tf.global_variables()
는 앞서 정의한 변수들을 가져오는 함수
make checkpoint¶
In [7]:
ckpt = tf.train.get_checkpoint_state("./model")
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
saver.restore(sess, ckpt.model_checkpoint_path)
else:
sess.run(tf.global_variables_initializer())
tf.summary.merge_all 함수로 앞서 지정한 텐서들을 수집한 다음 tf.summary.FileWriter 함수를 이용해 그래프와 텐서들의 값을 저장할 디렉토리를 설정
In [8]:
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("./logs", sess.graph)
running model¶
In [9]:
for step in range(1000):
sess.run(train_op, feed_dict={X:x_data, Y:y_data})
if (step+1) % 50 == 0:
print("step: {}, cost: {:.5f}".\
format(sess.run(global_step),
sess.run(cost, feed_dict={X:x_data, Y:y_data})))
summary = sess.run(merged, feed_dict={X:x_data, Y:y_data})
writer.add_summary(summary, global_step=sess.run(global_step))
In [10]:
saver.save(sess, "./model/dnn.ckpt", global_step=global_step)
Out[10]:
In [11]:
prediction = tf.argmax(model, 1)
target = tf.argmax(Y, 1)
print("prediction: \t{}".format(sess.run(prediction, feed_dict={X: x_data})))
print("target: \t{}".format(sess.run(target, feed_dict={Y:y_data})))
is_correct = tf.equal(prediction, target)
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
print("\naccuracy: \t{:.3f}%".format(sess.run(accuracy*100, feed_dict={X: x_data, Y: y_data})))
In [12]:
# tensorboard --logdir=./logs
In [13]:
from IPython.core.display import display, HTML
display(HTML("<style> .container{width:100% !important;}</style>"))
'Deep_Learning' 카테고리의 다른 글
08.tensorboard03_example (0) | 2018.12.09 |
---|---|
07.tensorboard02_example (0) | 2018.12.09 |
05.deep_neural_net_Costfun2 (0) | 2018.12.09 |
04.deep_neural_net_Costfun1 (0) | 2018.12.09 |
03.classification (0) | 2018.12.09 |