07_tensorboard02_example
In [1]:
#!/anaconda3/envs/py36/bin/python3
import numpy as np
import pandas as pd
import tensorflow as tf

import warnings
warnings.filterwarnings("ignore")
In [2]:
iris = pd.read_csv("./datas/iris.csv")
In [3]:
iris_target_names = np.unique(iris["species"].values)
In [4]:
for i, n in enumerate(iris_target_names):
    iris.replace(to_replace=iris_target_names[i], value=i, inplace=True)
In [5]:
from sklearn.model_selection import train_test_split

train_set, test_set = train_test_split(iris, test_size=0.2, random_state=42)

train_labels = train_set["species"].values
test_labels = test_set["species"].values
In [6]:
import matplotlib.pyplot as plt

%matplotlib inline
train_set.plot(kind="scatter", x="sepal_length", y="sepal_width", alpha=0.4, figsize=(10, 8), c="species", cmap=plt.get_cmap("jet"), colorbar=True, sharex=False)
plt.show()
In [7]:
train_set.hist(bins=50, figsize=(20, 15))
plt.show()
In [8]:
from sklearn.preprocessing import OneHotEncoder, StandardScaler, LabelEncoder
from sklearn.impute import SimpleImputer
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline, FeatureUnion

class DataFrameSelector(BaseEstimator, TransformerMixin):
    def __init__(self, lists):
        self.lists = lists
        
    def fit(self, X, y=None):
        return self
    
    def transform(self, X):
        return X.iloc[:, self.lists].values
    
# class LabelEncoders(BaseEstimator, TransformerMixin):
#     def __init__(self):
#         self.encoder = LabelEncoder()
        
#     def fit(self, X, y=None):
#         self.encoder.fit(X)
#         return self
    
#     def transform(self, X, y=None):
#         return self.encoder.transform(X)
In [9]:
n_list = [0, 1, 2, 3]
e_list = [4]
In [10]:
train_set["species"] = train_set["species"].astype(str)
In [11]:
num_pipeline = Pipeline([
    ["selector", DataFrameSelector(lists=n_list)],
    ["imputer" , SimpleImputer(strategy="median")],
    ["scaler"  , StandardScaler()]
])

encoding_pipeline = Pipeline([
    ["selector", DataFrameSelector(lists=e_list)],
    ["encoder", OneHotEncoder(sparse=False, categories="auto")]
])

full_pipeline = FeatureUnion(transformer_list=[
    ["nums", num_pipeline],
    ["encoding", encoding_pipeline]
])
In [12]:
scaled_train = full_pipeline.fit_transform(train_set)
scaled_test  = full_pipeline.fit_transform(test_set)
In [13]:
x_train, y_train = scaled_train[:, :4], scaled_train[:, 4:]
x_test, y_test = scaled_test[:, :4], scaled_test[:, 4:]
In [14]:
global_step = tf.Variable(0, trainable=False, name="global_step")
In [15]:
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)
In [16]:
# 4, 1
W1 = tf.Variable(tf.random_normal([4, 10], mean=0, stddev=1))
W2 = tf.Variable(tf.random_normal([10, 100], mean=0, stddev=1))
W3 = tf.Variable(tf.random_normal([100, 3], mean=0, stddev=1))

b1 = tf.zeros([10])
b2 = tf.zeros([100])
b3 = tf.zeros([3])
In [17]:
# Layer 1
with tf.name_scope("Layer1"):
    L1 = tf.add(tf.matmul(X, W1), b1)
    L1 = tf.nn.sigmoid(L1)
    
with tf.name_scope("Layer2"):
    L2 = tf.add(tf.matmul(L1, W2), b2)
    L2 = tf.nn.sigmoid(L2)
    
with tf.name_scope("Layer3"):
    model = tf.add(tf.matmul(L2, W3), b3)
    model = tf.nn.sigmoid(model)
In [18]:
with tf.name_scope("optiimizer"):
    cost = tf.reduce_mean(-tf.reduce_sum(Y*tf.log(model) + (1-Y)*tf.log(1-model)))
    optimizer = tf.train.AdadeltaOptimizer(learning_rate=0.01)
    train_op = optimizer.minimize(cost, global_step=global_step)
    
    tf.summary.scalar("cost", cost)
    tf.summary.histogram("Weight1", W1)
    tf.summary.histogram("Weight2", W2)
    tf.summary.histogram("Weight3", W3)
    tf.summary.histogram("bias1", b1)
    tf.summary.histogram("bias2", b2)
    tf.summary.histogram("bias3", b3)
In [19]:
sess = tf.Session()
saver = tf.train.Saver(tf.global_variables())
In [20]:
ckpt = tf.train.get_checkpoint_state("./model/iris")
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
    saver.restore(sess, ckpt.model_checkpoint_path)
    
else:
    sess.run(tf.global_variables_initializer())
INFO:tensorflow:Restoring parameters from ./model/iris/dnn.ckpt-5000
In [21]:
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter("./logs/iris", sess.graph)
In [22]:
for step in range(5000):
    sess.run(train_op, feed_dict={X:x_train, Y:y_train})
    
    if (step+1) % 200 == 0:
        print("step: {}, cost: {:.5f}".\
             format(sess.run(global_step),
                    sess.run(cost, feed_dict={X:x_train, Y:y_train})))
        
    summary = sess.run(merged, feed_dict={X:x_train, Y:y_train})
    writer.add_summary(summary, global_step=sess.run(global_step))
    
saver.save(sess, "./model/iris/dnn.ckpt", global_step=global_step)
step: 5200, cost: 550.79474
step: 5400, cost: 511.44778
step: 5600, cost: 472.62866
step: 5800, cost: 434.81265
step: 6000, cost: 398.32742
step: 6200, cost: 362.83173
step: 6400, cost: 326.22653
step: 6600, cost: 287.97693
step: 6800, cost: 256.19949
step: 7000, cost: 231.45538
step: 7200, cost: 211.00574
step: 7400, cost: 193.48444
step: 7600, cost: 178.29463
step: 7800, cost: 165.20464
step: 8000, cost: 154.06615
step: 8200, cost: 144.64468
step: 8400, cost: 136.65207
step: 8600, cost: 129.83101
step: 8800, cost: 123.97330
step: 9000, cost: 118.91176
step: 9200, cost: 114.51005
step: 9400, cost: 110.65558
step: 9600, cost: 107.25186
step: 9800, cost: 104.21504
step: 10000, cost: 101.47087
Out[22]:
'./model/iris/dnn.ckpt-10000'
In [23]:
# print("{}".format(sess.run(model, feed_dict={X:x_scaled_test, Y:y_test})))

prediction = tf.argmax(model, 1)
target = tf.argmax(Y, 1)
is_correct = tf.equal(prediction, target)
accuracy = tf.reduce_mean(tf.cast(is_correct, tf.float32))


print("====================================")
print("train_prediction: \n{}".format(sess.run(prediction, feed_dict={X: x_train})))
print("train_target: \n{}\n".format(sess.run(target, feed_dict={Y:y_train})))
print("\naccuracy: \n{:.3f}%".format(sess.run(accuracy*100, 
                                              feed_dict={X: x_train, Y:y_train})))

print("\n====================================")
print("test_prediction: \n{}".format(sess.run(prediction, feed_dict={X: x_test})))
print("test_target: \n{}".format(sess.run(target, feed_dict={Y:y_test})))


print("\naccuracy: \n{:.3f}%".format(sess.run(accuracy*100, 
                                              feed_dict={X: x_test, Y:y_test})))
====================================
train_prediction: 
[0 0 2 0 0 2 2 0 0 0 2 2 2 0 0 1 2 2 2 2 1 2 1 0 2 1 0 0 0 1 2 0 0 0 1 0 1
 2 0 1 2 0 2 2 1 1 2 1 0 1 2 0 0 1 2 0 2 0 0 2 1 2 1 1 2 1 0 0 1 2 0 0 0 1
 2 0 2 2 0 1 2 2 2 2 0 2 1 2 1 1 2 1 2 1 0 1 2 2 0 1 2 2 0 2 0 2 2 2 1 2 1
 1 1 2 0 1 1 0 1 2]
train_target: 
[0 0 1 0 0 2 1 0 0 0 2 1 1 0 0 1 2 2 1 2 1 2 1 0 2 1 0 0 0 1 2 0 0 0 1 0 1
 2 0 1 2 0 2 2 1 1 2 1 0 1 2 0 0 1 1 0 2 0 0 1 1 2 1 2 2 1 0 0 2 2 0 0 0 1
 2 0 2 2 0 1 1 2 1 2 0 2 1 2 1 1 1 0 1 1 0 1 2 2 0 1 2 2 0 2 0 1 2 2 1 2 1
 1 2 2 0 1 2 0 1 2]


accuracy: 
85.833%

====================================
test_prediction: 
[1 0 2 1 1 0 1 2 1 1 2 0 0 0 0 2 2 1 1 2 0 2 0 2 2 2 2 2 0 0]
test_target: 
[1 0 2 1 1 0 1 2 1 1 2 0 0 0 0 1 2 1 1 2 0 2 0 2 2 2 2 2 0 0]

accuracy: 
96.667%
In [24]:
# tensorboard --logdir=./logs/iris
In [25]:
from IPython.core.display import display, HTML

display(HTML("<style> .container{width:100% !important;}</style>"))

'Deep_Learning' 카테고리의 다른 글

09.mnist_01_minibatch  (0) 2018.12.09
08.tensorboard03_example  (0) 2018.12.09
06.tensorboard01_example  (0) 2018.12.09
05.deep_neural_net_Costfun2  (0) 2018.12.09
04.deep_neural_net_Costfun1  (0) 2018.12.09

+ Recent posts