04_deep_neural_net_Costfun1

review

In [1]:
import tensorflow as tf
import numpy as np
In [2]:
# data_define
# [털, 날개]
x_data = np.array([
    [0, 0],
    [1, 0],
    [1, 1],
    [0, 0],
    [0, 0],
    [0, 1]
])

# [기타, 포유류, 조류]
y_data = np.array([
    [1, 0, 0], # 기타
    [0, 1, 0], # 포유류
    [0, 0, 1], # 조류
    [1, 0, 0], # 기타
    [1, 0, 0], # 기타
    [0, 0, 1]  # 조류
])

## model setting

X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)


add layer

In [3]:
# weighted
W1 = tf.Variable(tf.random_normal([2, 10], mean=0, stddev=1)) # [특징, hidden_layer]
W2 = tf.Variable(tf.random_normal([10, 3], mean=0, stddev=1)) # [hidden_layer, output]

# biased
b1 = tf.Variable(tf.zeros([10])) # ["hidden layer"]
b2 = tf.Variable(tf.zeros([3]))  # [output]

L1 = tf.add(tf.matmul(X, W1), b1)
L1 = tf.nn.sigmoid(L1)

model = tf.add(tf.matmul(L1, W2), b2)
model = tf.nn.softmax(model)


cost function

  • one-hot encoding을 이용한 대부분의 모델에서는 cross-entropy를 사용
    $E(w,\quad b)\quad =\quad -\sum _{ n=1 }^{ N } \left\{ { t_{ n }logy_{ n }+(1-t_{ n })log(1-y_{ n }) } \right\} $
In [4]:
cost = tf.reduce_mean(-tf.reduce_sum(Y*tf.log(model) + (1-Y)*tf.log(1-model), axis=1))


modeling

In [5]:
optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(cost)
In [6]:
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

for step in range(1000):
    sess.run(train_op, feed_dict={X:x_data, Y:y_data})
    
    if (step+1) % 50 == 0:
        print("{}, {:.5f}".format(step+1, 
                                 sess.run(cost, feed_dict={X: x_data, Y: y_data})))
50, 1.66444
100, 1.45811
150, 1.34925
200, 1.26442
250, 1.19221
300, 1.12926
350, 1.07365
400, 1.02398
450, 0.97921
500, 0.93851
550, 0.90123
600, 0.86685
650, 0.83496
700, 0.80520
750, 0.77730
800, 0.75101
850, 0.72614
900, 0.70253
950, 0.68003
1000, 0.65854


output

In [7]:
sess.run(model, feed_dict={X:x_data})
Out[7]:
array([[0.86290747, 0.08460085, 0.05249172],
       [0.408934  , 0.25658685, 0.33447912],
       [0.03734298, 0.09417698, 0.86848   ],
       [0.86290747, 0.08460085, 0.05249172],
       [0.86290747, 0.08460083, 0.05249173],
       [0.12566108, 0.106323  , 0.7680159 ]], dtype=float32)
In [8]:
prediction = tf.argmax(model, axis=1)
target = tf.argmax(Y, axis=1)

print("prediction: \t{}".format(sess.run(prediction, feed_dict={X:x_data})))
print("target: \t{}".format(sess.run(target, feed_dict={Y:y_data})))
prediction: 	[0 0 2 0 0 2]
target: 	[0 1 2 0 0 2]


accuracy

In [9]:
is_correct = tf.equal(prediction, target)
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))
print("accuracy: {:.3f}".format(sess.run(accuracy, feed_dict={X:x_data, Y:y_data})))
accuracy: 0.833
In [10]:
from IPython.core.display import display, HTML
display(HTML("<style> .container{width:100% !important;}</style>"))

'Deep_Learning' 카테고리의 다른 글

06.tensorboard01_example  (0) 2018.12.09
05.deep_neural_net_Costfun2  (0) 2018.12.09
03.classification  (0) 2018.12.09
02.linear_regression  (0) 2018.12.09
01.tesnsor_and_graph  (0) 2018.12.09

+ Recent posts