Train_ver2.py
0.01MB
Training_Inference_Example.ipynb
1.05MB
Python 3.9.0
Tensorflow 2.9.1
Network : EfficientNet B3
import numpy as np
import os
def createFolder(directory):
try:
if not os.path.exists(directory):
os.makedirs(directory)
except OSError:
print ('Error: Creating directory. ' + directory)
np.set_printoptions(precision=4)
DATASETPATH = r"D:\ADC_POC\Training_Dataset_ADC"
BATCH_SIZE = 32
IMG_SIZE = (250, 250)
SAVEPATH = r"D:\ADC_POC\Training_Dataset_ADC_RESULT"
print(f'Directory : {os.listdir(DATASETPATH)}')
dataset_dir = os.path.join(DATASETPATH)
import tensorflow as tf
from tensorflow.keras.preprocessing import image_dataset_from_directory
total_dataset = image_dataset_from_directory(dataset_dir,
shuffle=True,
batch_size=BATCH_SIZE,
image_size=IMG_SIZE)
class_names = total_dataset.class_names
class_count = len(class_names)
print(f'Class Count : {class_count}')
for images, labels in total_dataset.take(1):
print(f'image count : {len(images)}')
print(f'image size : {images[0].shape}')
import matplotlib.pyplot as plt
plt.figure(figsize=(10,10))
for i in range(20):
ax = plt.subplot(4, 5, i+1)
plt.imshow(images[i].numpy().astype('uint8'))
plt.title(class_names[labels[i]])
plt.axis('off')
# 사용할 수 있는 데이터 배치 수 확인
total_batches = tf.data.experimental.cardinality(total_dataset)
print(f'Number of total batches : {total_batches}')
valid_batches = int(total_batches//5)
test_batches = int(total_batches//5)
train_batches = total_batches - valid_batches - test_batches
train_dataset = total_dataset.take(train_batches)
total_dataset_trainSkip = total_dataset.skip(train_batches)
valid_dataset = total_dataset_trainSkip.take(valid_batches)
total_dataset_trainValidSkip = total_dataset_trainSkip.skip(valid_batches)
test_dataset = total_dataset_trainValidSkip.take(test_batches)
total_dataset_trainValidTestSkip = total_dataset_trainValidSkip.skip(test_batches)
train_dataset_repeat = train_dataset.repeat(50)
print('Number of train batches: %d' % tf.data.experimental.cardinality(train_dataset))
print('Number of train repeat batches: %d' % tf.data.experimental.cardinality(train_dataset_repeat))
print('Number of validation batches: %d' % tf.data.experimental.cardinality(valid_dataset))
print('Number of test batches: %d' % tf.data.experimental.cardinality(test_dataset))
print('Number of total batches: %d' % tf.data.experimental.cardinality(total_dataset_trainValidTestSkip))
plt.figure(figsize=(10,10))
for i in range(20):
ax = plt.subplot(4, 5, i+1)
plt.imshow(images[i].numpy().astype('uint8'))
plt.title(class_names[labels[i]])
plt.axis('off')
AUTOTUNE = tf.data.AUTOTUNE
train_dataset_prefetch = train_dataset_repeat.prefetch(buffer_size=AUTOTUNE)
valid_dataset_prefetch = valid_dataset.prefetch(buffer_size=AUTOTUNE)
test_dataset_prefetch = test_dataset.prefetch(buffer_size=AUTOTUNE)
data_augmentation = tf.keras.Sequential([
tf.keras.layers.experimental.preprocessing.RandomFlip('horizontal'),
tf.keras.layers.experimental.preprocessing.RandomFlip('vertical'),
tf.keras.layers.experimental.preprocessing.RandomRotation((-8.0,0.8),fill_mode='nearest'),
#tf.keras.layers.experimental.preprocessing.CenterCrop(200,200),
tf.keras.layers.experimental.preprocessing.RandomTranslation(0.05,0.05,fill_mode='nearest',interpolation='bilinear',seed=1,fill_value=0.0),
tf.keras.layers.experimental.preprocessing.RandomZoom((0.1, 0.2)),
tf.keras.layers.experimental.preprocessing.RandomContrast(0.1)
])
for image, _ in train_dataset_prefetch.take(1):
plt.figure(figsize=(10,10))
first_image = image[0]
for i in range(BATCH_SIZE):
ax = plt.subplot(7,7,i+1)
augmented_image = data_augmentation(tf.expand_dims(first_image, 0))
plt.imshow(augmented_image[0]/255)
plt.axis('off')
preprocess_input = tf.keras.applications.efficientnet.preprocess_input
IMG_SHAPE = IMG_SIZE+(3,)
base_model = tf.keras.applications.efficientnet.EfficientNetB3(input_shape=IMG_SHAPE, include_top = False, weights='imagenet')
image_batch, label_batch = next(iter(train_dataset_prefetch))
feature_batch = base_model(image_batch)
print(feature_batch.shape)
base_model.trainable = False
#base_model.summary()
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
feature_batch_average = global_average_layer(feature_batch)
print(feature_batch_average.shape)
prediction_layer = tf.keras.layers.Dense(class_count, activation='softmax')
prediction_batch = prediction_layer(feature_batch_average)
print(prediction_batch.shape)
inputs = tf.keras.Input(shape=IMG_SHAPE)
x = data_augmentation(inputs)
x = preprocess_input(x)
x = base_model(x, training=False)
x = global_average_layer(x)
x = tf.keras.layers.Dropout(0.2)(x)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs, outputs)
base_learning_rate = 0.001
model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate=base_learning_rate),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy'])
# model.summary()
print("Number of trainable_variables: ", len(model.trainable_variables))
initial_epochs = 10
loss0, accuracy0 = model.evaluate(valid_dataset_prefetch)
print("initial loss:{:.2f}".format(loss0))
print("initial accuracy:{:.2f}".format(accuracy0))
history = model.fit(train_dataset_prefetch,
epochs = initial_epochs,
validation_data = valid_dataset_prefetch
)
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
base_model.trainable = True
# Let's take a look to see how many layers are in the base model
print("Number of layers in the base model: ", len(base_model.layers))
# Fine-tune from this layer onwards
fine_tune_at = 300
# Freeze all the layers before the \`fine_tune_at\` layer
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
model.compile(optimizer = tf.keras.optimizers.Adam(learning_rate=base_learning_rate/10),
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=['accuracy'])
model.summary()
print("Number of trainable_variables: ", len(model.trainable_variables))
fine_tune_epochs = 10
total_epochs = initial_epochs + fine_tune_epochs
#Callback 함수
weight_filePath = SAVEPATH + "\\" + "Model_" + "{epoch:02d}-{val_loss:.10f}.h5"
modelcheckpoint = tf.keras.callbacks.ModelCheckpoint(filepath=weight_filePath,monitor="val_loss",save_best_only=False,verbose=1)
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',patience=25, factor=0.1, verbose=1)
earlyStopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',patience=50, restore_best_weights=True, verbose=1)
history_fine = model.fit(train_dataset_prefetch,
epochs=total_epochs,
initial_epoch=history.epoch[-1],
validation_data=valid_dataset_prefetch,
callbacks=[modelcheckpoint, reduce_lr, earlyStopping])
acc += history_fine.history['accuracy']
val_acc += history_fine.history['val_accuracy']
loss += history_fine.history['loss']
val_loss += history_fine.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
loss, accuracy = model.evaluate(test_dataset_prefetch)
print('Test accuracy :', accuracy)
image_batch, label_batch = test_dataset_prefetch.as_numpy_iterator().next()
plt.figure(figsize=(10, 10))
for i in range(9):
ax = plt.subplot(3, 3, i + 1)
plt.imshow(image_batch[i].astype("uint8"))
plt.title(class_names[label_batch[i]])
plt.axis("off")
print("THE END")