from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, SimpleRNN
from tensorflow.keras.layers import LSTM
from tensorflow.keras.datasets import imdb
from tensorflow.keras.layers import Dense
from tensorflow.keras.preprocessing import sequence
import matplotlib.pyplot as plt
max_features = 10000 # 특성으로 사용할 단어의 수
maxlen = 500 # 텍스트의 길이(가장 빈번한 max_features 개의 단어만 사용합니다)
batch_size = 32 # 배치 데이터 사이즈
(input_train, y_train), (input_test, y_test) = imdb.load_data(num_words=max_features)
print(len(input_train), '훈련 시퀀스')
print(len(input_test), '테스트 시퀀스')
print(input_train[0], y_train[0])
25000 훈련 시퀀스 25000 테스트 시퀀스 [1, 14, 22, 16, 43, 530, 973, 1622, 1385, 65, 458, 4468, 66, 3941, 4, 173, 36, 256, 5, 25, 100, 43, 838, 112, 50, 670, 2, 9, 35, 480, 284, 5, 150, 4, 172, 112, 167, 2, 336, 385, 39, 4, 172, 4536, 1111, 17, 546, 38, 13, 447, 4, 192, 50, 16, 6, 147, 2025, 19, 14, 22, 4, 1920, 4613, 469, 4, 22, 71, 87, 12, 16, 43, 530, 38, 76, 15, 13, 1247, 4, 22, 17, 515, 17, 12, 16, 626, 18, 2, 5, 62, 386, 12, 8, 316, 8, 106, 5, 4, 2223, 5244, 16, 480, 66, 3785, 33, 4, 130, 12, 16, 38, 619, 5, 25, 124, 51, 36, 135, 48, 25, 1415, 33, 6, 22, 12, 215, 28, 77, 52, 5, 14, 407, 16, 82, 2, 8, 4, 107, 117, 5952, 15, 256, 4, 2, 7, 3766, 5, 723, 36, 71, 43, 530, 476, 26, 400, 317, 46, 7, 4, 2, 1029, 13, 104, 88, 4, 381, 15, 297, 98, 32, 2071, 56, 26, 141, 6, 194, 7486, 18, 4, 226, 22, 21, 134, 476, 26, 480, 5, 144, 30, 5535, 18, 51, 36, 28, 224, 92, 25, 104, 4, 226, 65, 16, 38, 1334, 88, 12, 16, 283, 5, 16, 4472, 113, 103, 32, 15, 16, 5345, 19, 178, 32] 1
# 리스트를 (samples, maxlen)크기의 2D 정수 텐서로 변환
print('시퀀스 패딩 (samples x time)')
print('input_train 크기:', input_train.shape)
print('input_test 크기:', input_test.shape)
input_train = sequence.pad_sequences(input_train, maxlen=maxlen)
input_test = sequence.pad_sequences(input_test, maxlen=maxlen)
print('input_train 크기:', input_train.shape)
print('input_test 크기:', input_test.shape)
시퀀스 패딩 (samples x time) input_train 크기: (25000,) input_test 크기: (25000,) input_train 크기: (25000, 500) input_test 크기: (25000, 500)
model = Sequential()
model.add(Embedding(max_features, 32))
model.add(LSTM(32))
model.add(Dense(1, activation='sigmoid'))
model.summary()
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= embedding (Embedding) (None, None, 32) 320000 lstm (LSTM) (None, 32) 8320 dense (Dense) (None, 1) 33 ================================================================= Total params: 328,353 Trainable params: 328,353 Non-trainable params: 0 _________________________________________________________________
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['acc'])
%%time
history = model.fit(input_train, y_train,
epochs=10,
batch_size=128,
validation_split=0.2)
Epoch 1/10 157/157 [==============================] - 38s 225ms/step - loss: 0.5068 - acc: 0.7578 - val_loss: 0.3977 - val_acc: 0.8394 Epoch 2/10 157/157 [==============================] - 32s 202ms/step - loss: 0.2926 - acc: 0.8878 - val_loss: 0.4592 - val_acc: 0.8304 Epoch 3/10 157/157 [==============================] - 33s 209ms/step - loss: 0.2368 - acc: 0.9117 - val_loss: 0.3514 - val_acc: 0.8500 Epoch 4/10 157/157 [==============================] - 32s 203ms/step - loss: 0.2019 - acc: 0.9252 - val_loss: 0.3669 - val_acc: 0.8478 Epoch 5/10 157/157 [==============================] - 30s 192ms/step - loss: 0.1784 - acc: 0.9362 - val_loss: 0.3402 - val_acc: 0.8872 Epoch 6/10 157/157 [==============================] - 30s 193ms/step - loss: 0.1582 - acc: 0.9434 - val_loss: 0.7199 - val_acc: 0.7838 Epoch 7/10 157/157 [==============================] - 30s 194ms/step - loss: 0.1447 - acc: 0.9478 - val_loss: 0.3439 - val_acc: 0.8840 Epoch 8/10 157/157 [==============================] - 30s 193ms/step - loss: 0.1295 - acc: 0.9554 - val_loss: 0.3455 - val_acc: 0.8498 Epoch 9/10 157/157 [==============================] - 31s 200ms/step - loss: 0.1184 - acc: 0.9589 - val_loss: 0.3737 - val_acc: 0.8794 Epoch 10/10 157/157 [==============================] - 34s 215ms/step - loss: 0.1106 - acc: 0.9614 - val_loss: 0.3655 - val_acc: 0.8840 CPU times: total: 19min 51s Wall time: 5min 20s
model.evaluate(input_test, y_test)
782/782 [==============================] - 25s 32ms/step - loss: 0.4142 - acc: 0.8658
[0.4142196476459503, 0.8658000230789185]
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
# 정확도
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
#
plt.figure()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()