import os, shutil
### 드라이브 마운트
from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive
#!cp -r '/content/drive/My Drive/dataset/cats_dogs' '/content/'
#!ls -ls '/content/cats_dogs'
# 압축풀기
#!rm -rf '/content/datasets/'
#!unzip '/content/cats_dogs/test1.zip' -d '/content/datasets/'
#!unzip '/content/cats_dogs/train.zip' -d '/content/datasets/'
!ls -al '/content/datasets/train' | head -5
!ls -l '/content/datasets/train' | grep ^- | wc -l
!ls -al '/content/datasets/test1' | head -5
!ls -l '/content/datasets/test1' | grep ^- | wc -l
total 609256 drwxr-xr-x 2 root root 765952 Sep 20 2013 . drwxr-xr-x 4 root root 4096 Nov 10 16:19 .. -rw-r--r-- 1 root root 12414 Sep 20 2013 cat.0.jpg -rw-r--r-- 1 root root 21944 Sep 20 2013 cat.10000.jpg 25000 total 304280 drwxr-xr-x 2 root root 290816 Sep 20 2013 . drwxr-xr-x 4 root root 4096 Nov 10 16:19 .. -rw-r--r-- 1 root root 54902 Sep 20 2013 10000.jpg -rw-r--r-- 1 root root 21671 Sep 20 2013 10001.jpg 12500
# 원본 데이터셋을 압축 해제한 디렉터리 경로
ori_dataset_dir = './datasets/train'
# 소규모 데이터셋을 저장할 디렉터리
base_dir = './datasets/cats_and_dogs_small'
# 반복실행을 위해 디렉터리 삭제
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.mkdir(base_dir)
# 훈련, 검증, 테스트 분할을 위한 디렉터리
train_dir = os.path.join(base_dir, 'train')
os.mkdir(train_dir)
val_dir = os.path.join(base_dir, 'validation')
os.mkdir(val_dir)
test_dir = os.path.join(base_dir, 'test')
os.mkdir(test_dir)
# 훈련용 고양이 사진 디렉터리
train_cats_dir = os.path.join(train_dir, 'cats')
os.mkdir(train_cats_dir)
# 훈련용 강아지 사진 디렉터리
train_dogs_dir = os.path.join(train_dir, 'dogs')
os.mkdir(train_dogs_dir)
# 검증용 고양이 사진 디렉터리
val_cats_dir = os.path.join(val_dir, 'cats')
os.mkdir(val_cats_dir)
# 검증용 강아지 사진 디렉터리
val_dogs_dir = os.path.join(val_dir, 'dogs')
os.mkdir(val_dogs_dir)
# 테스트용 고양이 사진 디렉터리
test_cats_dir = os.path.join(test_dir, 'cats')
os.mkdir(test_cats_dir)
# 테스트용 강아지 사진 디렉터리
test_dogs_dir = os.path.join(test_dir, 'dogs')
os.mkdir(test_dogs_dir)
# 처음 1,000개의 고양이 이미지를 train_cats_dir에 복사합니다
fnames = ['cat.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(ori_dataset_dir, fname)
dst = os.path.join(train_cats_dir, fname)
shutil.copyfile(src, dst)
# 다음 500개 고양이 이미지를 validation_cats_dir에 복사합니다
fnames = ['cat.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(ori_dataset_dir, fname)
dst = os.path.join(val_cats_dir, fname)
shutil.copyfile(src, dst)
# 다음 500개 고양이 이미지를 test_cats_dir에 복사합니다
fnames = ['cat.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(ori_dataset_dir, fname)
dst = os.path.join(test_cats_dir, fname)
shutil.copyfile(src, dst)
# 처음 1,000개의 강아지 이미지를 train_dogs_dir에 복사합니다
fnames = ['dog.{}.jpg'.format(i) for i in range(1000)]
for fname in fnames:
src = os.path.join(ori_dataset_dir, fname)
dst = os.path.join(train_dogs_dir, fname)
shutil.copyfile(src, dst)
# 다음 500개 강아지 이미지를 validation_dogs_dir에 복사합니다
fnames = ['dog.{}.jpg'.format(i) for i in range(1000, 1500)]
for fname in fnames:
src = os.path.join(ori_dataset_dir, fname)
dst = os.path.join(val_dogs_dir, fname)
shutil.copyfile(src, dst)
# 다음 500개 강아지 이미지를 test_dogs_dir에 복사합니다
fnames = ['dog.{}.jpg'.format(i) for i in range(1500, 2000)]
for fname in fnames:
src = os.path.join(ori_dataset_dir, fname)
dst = os.path.join(test_dogs_dir, fname)
shutil.copyfile(src, dst)
print('훈련용 고양이 이미지 전체 개수:', len(os.listdir(train_cats_dir)))
print('훈련용 강아지 이미지 전체 개수:', len(os.listdir(train_dogs_dir)))
print('검증용 고양이 이미지 전체 개수:', len(os.listdir(val_cats_dir)))
print('검증용 강아지 이미지 전체 개수:', len(os.listdir(val_dogs_dir)))
print('테스트용 고양이 이미지 전체 개수:', len(os.listdir(test_cats_dir)))
print('테스트용 강아지 이미지 전체 개수:', len(os.listdir(test_dogs_dir)))
훈련용 고양이 이미지 전체 개수: 1000 훈련용 강아지 이미지 전체 개수: 1000 검증용 고양이 이미지 전체 개수: 500 검증용 강아지 이미지 전체 개수: 500 테스트용 고양이 이미지 전체 개수: 500 테스트용 강아지 이미지 전체 개수: 500
## 경로에 이미지 데이터의 개수
num_cats_tr = len(os.listdir(train_cats_dir))
num_dogs_tr = len(os.listdir(train_dogs_dir))
num_cats_val = len(os.listdir(val_cats_dir))
num_dogs_val = len(os.listdir(val_dogs_dir))
total_train = num_cats_tr + num_dogs_tr
total_val = num_cats_val + num_dogs_val
print("학습용 데이터 : ", total_train)
print("검증용 데이터 : ", total_val)
학습용 데이터 : 2000 검증용 데이터 : 1000
from tensorflow.keras.applications import VGG16
conv_base = VGG16(weights='imagenet',
include_top=False,
input_shape=(150, 150, 3))
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5 58892288/58889256 [==============================] - 1s 0us/step 58900480/58889256 [==============================] - 1s 0us/step
from tensorflow.keras import models
from tensorflow.keras import layers
model = models.Sequential()
model.add(conv_base)
model.add(layers.Flatten())
model.add(layers.Dense(256, activation='relu'))
model.add(layers.Dense(1, activation='sigmoid'))
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= vgg16 (Functional) (None, 4, 4, 512) 14714688 flatten (Flatten) (None, 8192) 0 dense (Dense) (None, 256) 2097408 dense_1 (Dense) (None, 1) 257 ================================================================= Total params: 16,812,353 Trainable params: 16,812,353 Non-trainable params: 0 _________________________________________________________________
print('conv_base를 동결 전 훈련되는 가중치의 수:', len(model.trainable_weights))
conv_base.trainable = False
print('conv_base를 동결 후 훈련되는 가중치의 수:', len(model.trainable_weights))
conv_base를 동결 전 훈련되는 가중치의 수: 30 conv_base를 동결 후 훈련되는 가중치의 수: 4
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import optimizers
batch_size = 20
epochs = 30
train_datagen = ImageDataGenerator(
rescale=1./255,
rotation_range=20,
width_shift_range=0.1,
height_shift_range=0.1,
shear_range=0.1,
zoom_range=0.1,
horizontal_flip=True,
fill_mode='nearest')
train_generator = train_datagen.flow_from_directory(
# 타깃 디렉터리
train_dir,
# 모든 이미지의 크기를 150 × 150로 변경합니다
target_size=(150, 150),
batch_size=20,
# binary_crossentropy 손실을 사용하므로 이진 레이블이 필요합니다
class_mode='binary')
# 검증 데이터는 증식되어서는 안 됩니다!
test_datagen = ImageDataGenerator(rescale=1./255)
validation_generator = test_datagen.flow_from_directory(
val_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
Found 2000 images belonging to 2 classes. Found 1000 images belonging to 2 classes.
%%time
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=2e-5),
metrics=['acc'])
history = model.fit_generator(
train_generator,
steps_per_epoch=100, # total_train // batch_size
epochs=30,
validation_data=validation_generator,
validation_steps=50, # total_val // batch_size
verbose=2)
/usr/local/lib/python3.7/dist-packages/keras/optimizer_v2/rmsprop.py:130: UserWarning: The `lr` argument is deprecated, use `learning_rate` instead. super(RMSprop, self).__init__(name, **kwargs) /usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:11: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators. # This is added back by InteractiveShellApp.init_path()
Epoch 1/30 100/100 - 59s - loss: 0.5314 - acc: 0.7560 - val_loss: 0.4081 - val_acc: 0.8210 - 59s/epoch - 589ms/step Epoch 2/30 100/100 - 26s - loss: 0.4006 - acc: 0.8345 - val_loss: 0.3440 - val_acc: 0.8540 - 26s/epoch - 259ms/step Epoch 3/30 100/100 - 26s - loss: 0.3520 - acc: 0.8525 - val_loss: 0.3028 - val_acc: 0.8770 - 26s/epoch - 256ms/step Epoch 4/30 100/100 - 25s - loss: 0.3211 - acc: 0.8710 - val_loss: 0.2839 - val_acc: 0.8880 - 25s/epoch - 254ms/step Epoch 5/30 100/100 - 26s - loss: 0.3059 - acc: 0.8660 - val_loss: 0.2729 - val_acc: 0.8990 - 26s/epoch - 256ms/step Epoch 6/30 100/100 - 26s - loss: 0.2930 - acc: 0.8780 - val_loss: 0.2674 - val_acc: 0.8930 - 26s/epoch - 256ms/step Epoch 7/30 100/100 - 26s - loss: 0.2797 - acc: 0.8845 - val_loss: 0.2583 - val_acc: 0.8980 - 26s/epoch - 257ms/step Epoch 8/30 100/100 - 25s - loss: 0.2728 - acc: 0.8925 - val_loss: 0.2597 - val_acc: 0.8940 - 25s/epoch - 255ms/step Epoch 9/30 100/100 - 26s - loss: 0.2615 - acc: 0.8965 - val_loss: 0.2524 - val_acc: 0.8980 - 26s/epoch - 256ms/step Epoch 10/30 100/100 - 25s - loss: 0.2495 - acc: 0.8930 - val_loss: 0.2481 - val_acc: 0.9000 - 25s/epoch - 255ms/step Epoch 11/30 100/100 - 25s - loss: 0.2499 - acc: 0.9035 - val_loss: 0.2455 - val_acc: 0.8980 - 25s/epoch - 255ms/step Epoch 12/30 100/100 - 25s - loss: 0.2408 - acc: 0.9020 - val_loss: 0.2444 - val_acc: 0.8980 - 25s/epoch - 252ms/step Epoch 13/30 100/100 - 25s - loss: 0.2380 - acc: 0.9070 - val_loss: 0.2444 - val_acc: 0.9040 - 25s/epoch - 253ms/step Epoch 14/30 100/100 - 25s - loss: 0.2218 - acc: 0.9115 - val_loss: 0.2512 - val_acc: 0.8970 - 25s/epoch - 251ms/step Epoch 15/30 100/100 - 25s - loss: 0.2217 - acc: 0.9090 - val_loss: 0.2392 - val_acc: 0.9040 - 25s/epoch - 253ms/step Epoch 16/30 100/100 - 25s - loss: 0.2218 - acc: 0.9125 - val_loss: 0.2459 - val_acc: 0.9010 - 25s/epoch - 254ms/step Epoch 17/30 100/100 - 26s - loss: 0.2241 - acc: 0.9080 - val_loss: 0.2409 - val_acc: 0.9000 - 26s/epoch - 256ms/step Epoch 18/30 100/100 - 25s - loss: 0.2164 - acc: 0.9105 - val_loss: 0.2366 - val_acc: 0.9020 - 25s/epoch - 253ms/step Epoch 19/30 100/100 - 25s - loss: 0.2038 - acc: 0.9200 - val_loss: 0.2511 - val_acc: 0.8950 - 25s/epoch - 254ms/step Epoch 20/30 100/100 - 25s - loss: 0.2090 - acc: 0.9195 - val_loss: 0.2372 - val_acc: 0.9050 - 25s/epoch - 254ms/step Epoch 21/30 100/100 - 25s - loss: 0.2012 - acc: 0.9225 - val_loss: 0.2363 - val_acc: 0.9020 - 25s/epoch - 251ms/step Epoch 22/30 100/100 - 25s - loss: 0.2096 - acc: 0.9140 - val_loss: 0.2360 - val_acc: 0.8990 - 25s/epoch - 251ms/step Epoch 23/30 100/100 - 25s - loss: 0.2083 - acc: 0.9245 - val_loss: 0.2351 - val_acc: 0.9030 - 25s/epoch - 252ms/step Epoch 24/30 100/100 - 25s - loss: 0.2057 - acc: 0.9175 - val_loss: 0.2412 - val_acc: 0.9000 - 25s/epoch - 254ms/step Epoch 25/30 100/100 - 25s - loss: 0.1910 - acc: 0.9290 - val_loss: 0.2473 - val_acc: 0.9010 - 25s/epoch - 251ms/step Epoch 26/30 100/100 - 25s - loss: 0.1928 - acc: 0.9220 - val_loss: 0.2353 - val_acc: 0.9080 - 25s/epoch - 250ms/step Epoch 27/30 100/100 - 25s - loss: 0.1919 - acc: 0.9230 - val_loss: 0.2391 - val_acc: 0.9080 - 25s/epoch - 250ms/step Epoch 28/30 100/100 - 25s - loss: 0.1856 - acc: 0.9275 - val_loss: 0.2402 - val_acc: 0.9020 - 25s/epoch - 251ms/step Epoch 29/30 100/100 - 25s - loss: 0.1815 - acc: 0.9300 - val_loss: 0.2395 - val_acc: 0.9030 - 25s/epoch - 251ms/step Epoch 30/30 100/100 - 25s - loss: 0.1859 - acc: 0.9280 - val_loss: 0.2393 - val_acc: 0.9030 - 25s/epoch - 251ms/step
model.save('cats_dogs_small_3_dataAug_VGG_30_epoch.h5')
import matplotlib.pyplot as plt
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
분류기가 미리 훈련되지 않으면 훈련되는 동안 너무 큰 오차 신호가 네트워크에 전파됩니다.
처음 세 단계는 특성 추출을 할 때 이미 완료했습니다.
conv_base.summary()
Model: "vgg16" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_1 (InputLayer) [(None, 150, 150, 3)] 0 block1_conv1 (Conv2D) (None, 150, 150, 64) 1792 block1_conv2 (Conv2D) (None, 150, 150, 64) 36928 block1_pool (MaxPooling2D) (None, 75, 75, 64) 0 block2_conv1 (Conv2D) (None, 75, 75, 128) 73856 block2_conv2 (Conv2D) (None, 75, 75, 128) 147584 block2_pool (MaxPooling2D) (None, 37, 37, 128) 0 block3_conv1 (Conv2D) (None, 37, 37, 256) 295168 block3_conv2 (Conv2D) (None, 37, 37, 256) 590080 block3_conv3 (Conv2D) (None, 37, 37, 256) 590080 block3_pool (MaxPooling2D) (None, 18, 18, 256) 0 block4_conv1 (Conv2D) (None, 18, 18, 512) 1180160 block4_conv2 (Conv2D) (None, 18, 18, 512) 2359808 block4_conv3 (Conv2D) (None, 18, 18, 512) 2359808 block4_pool (MaxPooling2D) (None, 9, 9, 512) 0 block5_conv1 (Conv2D) (None, 9, 9, 512) 2359808 block5_conv2 (Conv2D) (None, 9, 9, 512) 2359808 block5_conv3 (Conv2D) (None, 9, 9, 512) 2359808 block5_pool (MaxPooling2D) (None, 4, 4, 512) 0 ================================================================= Total params: 14,714,688 Trainable params: 0 Non-trainable params: 14,714,688 _________________________________________________________________
conv_base.trainable = True
set_trainable = False
for layer in conv_base.layers:
if layer.name == 'block5_conv1':
set_trainable = True
if set_trainable:
layer.trainable = True
else:
layer.trainable = False
%%time
model.compile(loss='binary_crossentropy',
optimizer=optimizers.RMSprop(lr=1e-5),
metrics=['acc'])
history = model.fit_generator(
train_generator,
steps_per_epoch=100, # total_train // batch_size
epochs=50,
validation_data=validation_generator,
validation_steps=50, # total_val // batch_size
verbose=1)
/usr/local/lib/python3.7/dist-packages/keras/optimizer_v2/rmsprop.py:130: UserWarning: The `lr` argument is deprecated, use `learning_rate` instead. super(RMSprop, self).__init__(name, **kwargs) /usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:13: UserWarning: `Model.fit_generator` is deprecated and will be removed in a future version. Please use `Model.fit`, which supports generators. del sys.path[0]
Epoch 1/50 100/100 [==============================] - 30s 271ms/step - loss: 0.2049 - acc: 0.9125 - val_loss: 0.2927 - val_acc: 0.8840 Epoch 2/50 100/100 [==============================] - 27s 268ms/step - loss: 0.1486 - acc: 0.9435 - val_loss: 0.2194 - val_acc: 0.9220 Epoch 3/50 100/100 [==============================] - 27s 269ms/step - loss: 0.1357 - acc: 0.9475 - val_loss: 0.2155 - val_acc: 0.9170 Epoch 4/50 100/100 [==============================] - 27s 267ms/step - loss: 0.1167 - acc: 0.9605 - val_loss: 0.2070 - val_acc: 0.9190 Epoch 5/50 100/100 [==============================] - 27s 270ms/step - loss: 0.1085 - acc: 0.9580 - val_loss: 0.2113 - val_acc: 0.9200 Epoch 6/50 100/100 [==============================] - 27s 269ms/step - loss: 0.0840 - acc: 0.9675 - val_loss: 0.2349 - val_acc: 0.9110 Epoch 7/50 100/100 [==============================] - 27s 271ms/step - loss: 0.0924 - acc: 0.9680 - val_loss: 0.2123 - val_acc: 0.9200 Epoch 8/50 100/100 [==============================] - 27s 267ms/step - loss: 0.0712 - acc: 0.9740 - val_loss: 0.2333 - val_acc: 0.9210 Epoch 9/50 100/100 [==============================] - 27s 268ms/step - loss: 0.0582 - acc: 0.9780 - val_loss: 0.2257 - val_acc: 0.9280 Epoch 10/50 100/100 [==============================] - 27s 270ms/step - loss: 0.0519 - acc: 0.9830 - val_loss: 0.2757 - val_acc: 0.9120 Epoch 11/50 100/100 [==============================] - 27s 272ms/step - loss: 0.0546 - acc: 0.9830 - val_loss: 0.2997 - val_acc: 0.9070 Epoch 12/50 100/100 [==============================] - 27s 273ms/step - loss: 0.0501 - acc: 0.9850 - val_loss: 0.2472 - val_acc: 0.9150 Epoch 13/50 100/100 [==============================] - 27s 271ms/step - loss: 0.0501 - acc: 0.9840 - val_loss: 0.2334 - val_acc: 0.9210 Epoch 14/50 100/100 [==============================] - 27s 271ms/step - loss: 0.0458 - acc: 0.9855 - val_loss: 0.2115 - val_acc: 0.9210 Epoch 15/50 100/100 [==============================] - 27s 270ms/step - loss: 0.0335 - acc: 0.9875 - val_loss: 0.2254 - val_acc: 0.9240 Epoch 16/50 100/100 [==============================] - 27s 270ms/step - loss: 0.0286 - acc: 0.9895 - val_loss: 0.2243 - val_acc: 0.9300 Epoch 17/50 100/100 [==============================] - 27s 270ms/step - loss: 0.0294 - acc: 0.9940 - val_loss: 0.2262 - val_acc: 0.9270 Epoch 18/50 100/100 [==============================] - 27s 275ms/step - loss: 0.0269 - acc: 0.9900 - val_loss: 0.2338 - val_acc: 0.9240 Epoch 19/50 100/100 [==============================] - 28s 275ms/step - loss: 0.0246 - acc: 0.9905 - val_loss: 0.2649 - val_acc: 0.9150 Epoch 20/50 100/100 [==============================] - 27s 269ms/step - loss: 0.0237 - acc: 0.9925 - val_loss: 0.3777 - val_acc: 0.9030 Epoch 21/50 100/100 [==============================] - 27s 267ms/step - loss: 0.0184 - acc: 0.9935 - val_loss: 0.3034 - val_acc: 0.9130 Epoch 22/50 100/100 [==============================] - 26s 264ms/step - loss: 0.0221 - acc: 0.9925 - val_loss: 0.2562 - val_acc: 0.9250 Epoch 23/50 100/100 [==============================] - 26s 264ms/step - loss: 0.0185 - acc: 0.9940 - val_loss: 0.2645 - val_acc: 0.9210 Epoch 24/50 100/100 [==============================] - 27s 265ms/step - loss: 0.0216 - acc: 0.9920 - val_loss: 0.2532 - val_acc: 0.9260 Epoch 25/50 100/100 [==============================] - 26s 264ms/step - loss: 0.0104 - acc: 0.9975 - val_loss: 0.2571 - val_acc: 0.9280 Epoch 26/50 100/100 [==============================] - 27s 266ms/step - loss: 0.0145 - acc: 0.9955 - val_loss: 0.4208 - val_acc: 0.9070 Epoch 27/50 100/100 [==============================] - 27s 267ms/step - loss: 0.0147 - acc: 0.9960 - val_loss: 0.3010 - val_acc: 0.9260 Epoch 28/50 100/100 [==============================] - 27s 270ms/step - loss: 0.0131 - acc: 0.9975 - val_loss: 0.3044 - val_acc: 0.9230 Epoch 29/50 100/100 [==============================] - 27s 269ms/step - loss: 0.0113 - acc: 0.9955 - val_loss: 0.3494 - val_acc: 0.9260 Epoch 30/50 100/100 [==============================] - 27s 268ms/step - loss: 0.0119 - acc: 0.9950 - val_loss: 0.2851 - val_acc: 0.9300 Epoch 31/50 100/100 [==============================] - 27s 268ms/step - loss: 0.0117 - acc: 0.9955 - val_loss: 0.2791 - val_acc: 0.9300 Epoch 32/50 100/100 [==============================] - 27s 267ms/step - loss: 0.0168 - acc: 0.9950 - val_loss: 0.2667 - val_acc: 0.9270 Epoch 33/50 100/100 [==============================] - 27s 267ms/step - loss: 0.0070 - acc: 0.9980 - val_loss: 0.3492 - val_acc: 0.9250 Epoch 34/50 100/100 [==============================] - 27s 266ms/step - loss: 0.0106 - acc: 0.9960 - val_loss: 0.3605 - val_acc: 0.9200 Epoch 35/50 100/100 [==============================] - 27s 269ms/step - loss: 0.0135 - acc: 0.9950 - val_loss: 0.3302 - val_acc: 0.9180 Epoch 36/50 100/100 [==============================] - 27s 268ms/step - loss: 0.0054 - acc: 0.9975 - val_loss: 0.3356 - val_acc: 0.9280 Epoch 37/50 100/100 [==============================] - 27s 267ms/step - loss: 0.0051 - acc: 0.9985 - val_loss: 0.2848 - val_acc: 0.9330 Epoch 38/50 100/100 [==============================] - 27s 267ms/step - loss: 0.0121 - acc: 0.9950 - val_loss: 0.3051 - val_acc: 0.9260 Epoch 39/50 100/100 [==============================] - 26s 263ms/step - loss: 0.0087 - acc: 0.9965 - val_loss: 0.3200 - val_acc: 0.9230 Epoch 40/50 100/100 [==============================] - 27s 266ms/step - loss: 0.0067 - acc: 0.9980 - val_loss: 0.3095 - val_acc: 0.9260 Epoch 41/50 100/100 [==============================] - 27s 265ms/step - loss: 0.0031 - acc: 0.9995 - val_loss: 0.3152 - val_acc: 0.9280 Epoch 42/50 100/100 [==============================] - 27s 265ms/step - loss: 0.0128 - acc: 0.9965 - val_loss: 0.2791 - val_acc: 0.9280 Epoch 43/50 100/100 [==============================] - 26s 264ms/step - loss: 0.0055 - acc: 0.9985 - val_loss: 0.3054 - val_acc: 0.9360 Epoch 44/50 100/100 [==============================] - 27s 265ms/step - loss: 0.0061 - acc: 0.9990 - val_loss: 0.2962 - val_acc: 0.9300 Epoch 45/50 100/100 [==============================] - 27s 266ms/step - loss: 0.0129 - acc: 0.9965 - val_loss: 0.2914 - val_acc: 0.9330 Epoch 46/50 100/100 [==============================] - 27s 266ms/step - loss: 0.0061 - acc: 0.9975 - val_loss: 0.4213 - val_acc: 0.9120 Epoch 47/50 100/100 [==============================] - 27s 265ms/step - loss: 0.0045 - acc: 0.9975 - val_loss: 0.3242 - val_acc: 0.9350 Epoch 48/50 100/100 [==============================] - 26s 264ms/step - loss: 0.0038 - acc: 0.9990 - val_loss: 0.3308 - val_acc: 0.9300 Epoch 49/50 100/100 [==============================] - 26s 264ms/step - loss: 0.0109 - acc: 0.9965 - val_loss: 0.2993 - val_acc: 0.9340 Epoch 50/50 100/100 [==============================] - 26s 263ms/step - loss: 0.0026 - acc: 0.9990 - val_loss: 0.3268 - val_acc: 0.9350 CPU times: user 25min 5s, sys: 1min 6s, total: 26min 11s Wall time: 22min 52s
## 모델 저장
model.save('cats_and_dogs_small_4_misetunning_50.h5')
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(150, 150),
batch_size=20,
class_mode='binary')
test_loss, test_acc = model.evaluate_generator(test_generator, steps=50)
print('test acc:', test_acc)
Found 1000 images belonging to 2 classes.
/usr/local/lib/python3.7/dist-packages/ipykernel_launcher.py:7: UserWarning: `Model.evaluate_generator` is deprecated and will be removed in a future version. Please use `Model.evaluate`, which supports generators. import sys
test acc: 0.9359999895095825