728x90
케라스를 활용한 CNN으로 MNIST 인식하기¶
In [1]:
from IPython.core.display import display, HTML
display(HTML("<style>.container {width:90% !important;}</style>"))
In [3]:
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from keras.datasets import mnist
from keras.utils import np_utils
from keras import layers
from keras import models
from keras.callbacks import ModelCheckpoint, EarlyStopping
import warnings
warnings.filterwarnings('ignore')
import cv2
In [4]:
mnist = mnist.load_data()
In [5]:
mnist
Out[5]:
In [6]:
(X_train, y_train), (X_test, y_test) = mnist
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
28*28의 데이터
In [7]:
seed = 2019
np.random.seed(seed)
tf.set_random_seed(seed)
reshape(총 샘플 수, 차원 속성 수)
In [8]:
# 흑백이미지이므로 채널 1
X_train = X_train.reshape(60000, 28, 28, 1).astype('float32') / 255
X_test = X_test.reshape(10000, 28, 28, 1).astype('float32') / 255
# 레이블 원핫인코딩
y_train = np_utils.to_categorical(y_train)
y_test = np_utils.to_categorical(y_test)
convolutional 모델 생성
컨볼루션 신경망을 3개 거친 후, 맥스 풀링, 드롭아웃(20%끔)을 하겠습니다.
그 후, 1차원으로 차원변경한 후 150개의 노드를 지닌 기본 층에 연결하고 마지막으로 드롭아웃(35%끔)한 후 마치겠습니다.
In [12]:
model = models.Sequential()
# model.add(layers.Conv2D(필터 수, kernel_size(행,열), input_shape=(행,열,색상채널_1 or 3)))
# 첫 번째 컨볼루션 신경망의 필터는 30개, 두 번째 필터는 40개, 세번 째 필터는 50개로 지정했습니다.
model.add(layers.Conv2D(30, (3,3), input_shape=(28,28,1), activation='relu'))
model.add(layers.Conv2D(40, (3,3), activation='relu', padding='SAME', strides=[1,1]))
model.add(layers.Conv2D(50, (3,3), activation='relu', padding='SAME'))
model.add(layers.MaxPooling2D(pool_size=(2,2)))
model.add(layers.Dropout(0.2))
model.add(layers.Flatten())
model.add(layers.Dense(150, activation='relu'))
model.add(layers.Dropout(0.35))
model.add(layers.Dense(10, activation='softmax'))
model.summary()
모델 컴파일
In [17]:
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
In [13]:
import os
dir = 'C:/Users/seob/Desktop/ml,dl/mnist_CNN/'
if not os.path.exists(dir):
os.mkdir(dir)
In [25]:
path = 'C:/Users/seob/Desktop/ml,dl/mnist_CNN/{epoch:02d} - {val_loss:.4f}.hdf5'
checkpoint = ModelCheckpoint(filepath=path, monitor='val_loss', verbose=1, save_best_only=True)
stop = EarlyStopping(monitor='val_loss', patience=30)
모델 학습
In [27]:
history = model.fit(X_train, y_train, batch_size=600, epochs=10, verbose=1, callbacks=[stop, checkpoint], validation_data=(X_test, y_test))
acc = history.history['acc']
v_acc = history.history['val_acc']
loss = history.history['loss']
v_loss = history.history['val_loss']
x_len = np.arange(len(acc))
plt.plot(x_len, acc, c='y', label='Train set acc')
plt.plot(x_len, v_acc, c='g', label='Test set acc')
plt.plot(x_len, loss, c='b', label='Train set loss')
plt.plot(x_len, v_loss, c='r', label='Test set loss')
plt.legend(loc='best')
plt.xlabel('epoch')
plt.show()
반응형
LIST
'Legacy > [Legacy] Machine Learning' 카테고리의 다른 글
타이타닉으로 그리드서치(GridSearch) (0) | 2019.06.03 |
---|---|
LabelEncoder & OneHotEncoder (0) | 2019.02.27 |
Expected 2D array, got 1D array instead 에러 : flatten(), reshape(), ravel() 알아보기 (0) | 2019.02.23 |
교차검증 (cross-validation) (2) | 2019.02.15 |
feature engineering 특성공학(원핫인코딩, get_dummies, RFE) (0) | 2019.02.13 |