import matplotlib.pyplot as plt
from tensorflow.keras.datasets import cifar10
(train_x, train_y), (test_x, test_y) = cifar10.load_data()
class_names = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
train_x = train_x/255.
test_x = test_x/255.
from tensorflow.keras.utils import to_categorical
train_y = to_categorical(train_y)
test_y = to_categorical(test_y)
# CNN 모델 디자인
from tensorflow.keras import models, layers
model = models.Sequential()
# (32, 32, 3) => (30, 30, 32)
model.add(layers.Conv2D(filters=32, kernel_size=(3, 3),
activation='relu',
input_shape=(32, 32, 3)))
# (30, 30, 32) => (15, 15, 32)
model.add(layers.MaxPool2D(pool_size=(2, 2)))
# (15, 15, 32) => (13, 13, 64)
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3),
activation='relu'))
# (13, 13, 64) => (6, 6, 64)
model.add(layers.MaxPool2D(pool_size=(2, 2)))
# (6, 6, 64) => (4, 4, 64)
model.add(layers.Conv2D(filters=64, kernel_size=(3, 3),
activation='relu'))
# 3D를 1D로 변환
model.add(layers.Flatten())
# Classification : Fully Connected Layer 추가
model.add(layers.Dense(units=64, activation='relu'))
model.add(layers.Dense(units=10, activation='softmax'))
# 모델의 학습 정보 설정
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
# 모델 학습
history = model.fit(x=train_x, y=train_y, epochs=20, batch_size=256, verbose=2, validation_split=0.2)
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and Validation Accuracy')
plt.legend()
plt.show()
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.show()
Epoch 1/20 157/157 - 14s - loss: 1.9085 - accuracy: 0.3078 - val_loss: 1.8139 - val_accuracy: 0.3445 - 14s/epoch - 86ms/step
Epoch 2/20 157/157 - 2s - loss: 1.5692 - accuracy: 0.4403 - val_loss: 1.6256 - val_accuracy: 0.4085 - 2s/epoch - 10ms/step
Epoch 3/20 157/157 - 1s - loss: 1.4280 - accuracy: 0.4917 - val_loss: 1.4535 - val_accuracy: 0.4857 - 1s/epoch - 10ms/step
Epoch 4/20 157/157 - 2s - loss: 1.3163 - accuracy: 0.5346 - val_loss: 1.2948 - val_accuracy: 0.5500 - 2s/epoch - 10ms/step
Epoch 5/20 157/157 - 2s - loss: 1.2313 - accuracy: 0.5694 - val_loss: 1.2821 - val_accuracy: 0.5418 - 2s/epoch - 10ms/step
Epoch 6/20 157/157 - 1s - loss: 1.1545 - accuracy: 0.5939 - val_loss: 1.1883 - val_accuracy: 0.5775 - 1s/epoch - 9ms/step
Epoch 7/20 157/157 - 2s - loss: 1.0871 - accuracy: 0.6196 - val_loss: 1.3106 - val_accuracy: 0.5394 - 2s/epoch - 10ms/step
Epoch 8/20 157/157 - 1s - loss: 1.0307 - accuracy: 0.6401 - val_loss: 1.1601 - val_accuracy: 0.5968 - 1s/epoch - 10ms/step
Epoch 9/20 157/157 - 2s - loss: 0.9835 - accuracy: 0.6560 - val_loss: 1.1063 - val_accuracy: 0.6180 - 2s/epoch - 10ms/step
Epoch 10/20 157/157 - 1s - loss: 0.9346 - accuracy: 0.6746 - val_loss: 1.0333 - val_accuracy: 0.6383 - 1s/epoch - 9ms/step
Epoch 11/20 157/157 - 2s - loss: 0.8930 - accuracy: 0.6883 - val_loss: 0.9684 - val_accuracy: 0.6631 - 2s/epoch - 10ms/step
Epoch 12/20 157/157 - 1s - loss: 0.8531 - accuracy: 0.7048 - val_loss: 1.2063 - val_accuracy: 0.5955 - 1s/epoch - 9ms/step
Epoch 13/20 157/157 - 2s - loss: 0.8149 - accuracy: 0.7169 - val_loss: 1.0456 - val_accuracy: 0.6429 - 2s/epoch - 10ms/step
Epoch 14/20 157/157 - 2s - loss: 0.7843 - accuracy: 0.7276 - val_loss: 1.0638 - val_accuracy: 0.6347 - 2s/epoch - 10ms/step
Epoch 15/20 157/157 - 1s - loss: 0.7552 - accuracy: 0.7368 - val_loss: 0.9637 - val_accuracy: 0.6678 - 1s/epoch - 9ms/step
Epoch 16/20 157/157 - 2s - loss: 0.7232 - accuracy: 0.7477 - val_loss: 1.0395 - val_accuracy: 0.6453 - 2s/epoch - 10ms/step
Epoch 17/20 157/157 - 1s - loss: 0.6947 - accuracy: 0.7578 - val_loss: 0.9817 - val_accuracy: 0.6709 - 1s/epoch - 9ms/step
Epoch 18/20 157/157 - 2s - loss: 0.6714 - accuracy: 0.7620 - val_loss: 1.0031 - val_accuracy: 0.6643 - 2s/epoch - 10ms/step
Epoch 19/20 157/157 - 2s - loss: 0.6362 - accuracy: 0.7765 - val_loss: 1.0580 - val_accuracy: 0.6549 - 2s/epoch - 10ms/step
Epoch 20/20 157/157 - 2s - loss: 0.6081 - accuracy: 0.7897 - val_loss: 1.1715 - val_accuracy: 0.6134 - 2s/epoch - 10ms/step
* accuracy는 0.78로 좋지않은 성능을 가졌지만, 검증 데이터셋(validation)에 대한 정확도(validation_accuracy)는 0.61로 더욱 낮다. epoch을 늘리던가 모델의 복잡도를 증가시키던가 오버피팅을 일으키는 방향으로 먼저 정확도를 높여야한다.
'프로젝트 > 코드프레소 체험단' 카테고리의 다른 글
이미지 데이터 처리를 위한 CNN 완벽 가이드 - ImageDataGenerator API (0) | 2022.03.08 |
---|---|
이미지 데이터 처리를 위한 CNN 완벽 가이드 - CIFAR-10-codepresso 분류 CNN 모델 구현 (0) | 2022.03.07 |
이미지 데이터 처리를 위한 CNN 완벽 가이드 - fashion-mnist 분류 CNN 모델 구현 (0) | 2022.03.07 |
이미지 데이터 처리를 위한 CNN 완벽 가이드 - MNIST 데이터셋 분류 CNN 모델 구현 (0) | 2022.03.06 |
이미지 데이터 처리를 위한 CNN 완벽 가이드 - Keras의 CNN API (0) | 2022.03.05 |