from IPython.display import display, Image
REF : https://towardsdatascience.com/types-of-convolutions-in-deep-learning-717013397f4d
display(Image(filename="img/cnn.png"))
display(Image(filename="img/multikernel.png"))
import tensorflow as tf
print(tf.__version__)
2.11.0
# 이미지 처리 분야에서 가장 유명한 신경망 모델인 CNN 을 이용
from tensorflow.keras import layers
from tensorflow.keras import models
from tensorflow.keras.datasets import mnist
from tensorflow.keras.utils import to_categorical
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.reshape((60000, 28, 28, 1))
train_images = train_images.astype('float32') / 255
test_images = test_images.reshape((10000, 28, 28, 1))
test_images = test_images.astype('float32') / 255
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
# 기존 모델에서는 입력 값을 28x28 하나의 차원으로 구성하였으나,
# CNN 모델을 사용하기 위해 2차원 평면과 특성치의 형태를 갖는 구조로 만듭니다.
# None는 한번의 학습당 사용할 입력데이터의 개수,
# 마지막 차원 1은 특징의 개수. MNIST는 회색조의 이미지로 색상이 한개
model = models.Sequential()
### L1 계층
model.add(layers.Conv2D(filters=32, kernel_size=(3, 3),
activation='relu',
padding="same", # 기본값 : valid
input_shape=(28, 28, 1)))
model.add(layers.MaxPooling2D((2, 2)))
### L2 계층
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3),
activation='relu',
padding="same") )
model.add(layers.MaxPooling2D((2, 2)))
### L3 계층
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3),
activation='relu',
padding="same"))
model.add(layers.MaxPooling2D((2, 2)))
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 28, 28, 32) 320 max_pooling2d (MaxPooling2D (None, 14, 14, 32) 0 ) conv2d_1 (Conv2D) (None, 14, 14, 64) 18496 max_pooling2d_1 (MaxPooling (None, 7, 7, 64) 0 2D) conv2d_2 (Conv2D) (None, 7, 7, 64) 36928 max_pooling2d_2 (MaxPooling (None, 3, 3, 64) 0 2D) ================================================================= Total params: 55,744 Trainable params: 55,744 Non-trainable params: 0 _________________________________________________________________
model.add(layers.Conv2D(32, (3, 3),
activation='relu',
padding="same",
input_shape=(28, 28, 1)))
model.add(layers.MaxPooling2D((2, 2)))
display(Image(filename="img/L1_Cnn.png"))
display(Image(filename="img/L2_Cnn.png"))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv2d (Conv2D) (None, 28, 28, 32) 320 max_pooling2d (MaxPooling2D (None, 14, 14, 32) 0 ) conv2d_1 (Conv2D) (None, 14, 14, 64) 18496 max_pooling2d_1 (MaxPooling (None, 7, 7, 64) 0 2D) conv2d_2 (Conv2D) (None, 7, 7, 64) 36928 max_pooling2d_2 (MaxPooling (None, 3, 3, 64) 0 2D) flatten (Flatten) (None, 576) 0 dense (Dense) (None, 64) 36928 dense_1 (Dense) (None, 10) 650 ================================================================= Total params: 93,322 Trainable params: 93,322 Non-trainable params: 0 _________________________________________________________________
model.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=5, batch_size=64)
Epoch 1/5 938/938 [==============================] - 54s 57ms/step - loss: 0.1911 - accuracy: 0.9398 Epoch 2/5 938/938 [==============================] - 63s 68ms/step - loss: 0.0484 - accuracy: 0.9851 Epoch 3/5 938/938 [==============================] - 62s 66ms/step - loss: 0.0333 - accuracy: 0.9895 Epoch 4/5 938/938 [==============================] - 60s 64ms/step - loss: 0.0248 - accuracy: 0.9923 Epoch 5/5 938/938 [==============================] - 52s 55ms/step - loss: 0.0198 - accuracy: 0.9941
<keras.callbacks.History at 0x230079cf0d0>
test_loss, test_acc = model.evaluate(test_images, test_labels)
print(test_acc)
313/313 [==============================] - 4s 12ms/step - loss: 0.0308 - accuracy: 0.9891 0.9890999794006348