데이터분석가 과정/Tensorflow

DAY71. Tensorflow Face detection (2)

LEE_BOMB 2021. 12. 30. 17:35
celeb image classifier

1. celeb5 이미지 분류기 : CNN model 
2. Image Generator : model 공급할 이미지 생성 

* cats dogs imageGenerator 참고 


from tensorflow.keras import Sequential #keras model 
from tensorflow.keras.layers import Conv2D, MaxPool2D #Convolution layer
from tensorflow.keras.layers import Dense, Flatten #Affine layer


공급 image 크기 

img_h = 150 # height
img_w = 150 # width
input_shape = (img_h, img_w, 3)



1. CNN Model layer 

print('model create')
model = Sequential()


Convolution layer1 

model.add(Conv2D(32, kernel_size=(3, 3), activation='relu',
                 input_shape = input_shape))
model.add(MaxPool2D(pool_size=(2,2)))


Convolution layer2 

model.add(Conv2D(64,kernel_size=(3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2,2)))

#Convolution layer3 : [수정] 제외


Flatten layer : 3d -> 1d

model.add(Flatten())


DNN hidden layer(Fully connected layer)

model.add(Dense(256, activation = 'relu'))


DNN Output layer : [수정]

model.add(Dense(5, activation = 'softmax')) #5 class(0~4)


model training set : [수정]

model.compile(optimizer = 'adam',
              loss = 'sparse_categorical_crossentropy', #y = class
              metrics = ['sparse_categorical_accuracy'])

loss = categorical_crossentropy : one_hot encoding(2진수) 
loss = 'sparse_categorical_crossentropy' : class(10진수)



2. image file preprocessing : image 제너레이터   

from tensorflow.keras.preprocessing.image import ImageDataGenerator


dir setting

base_dir = r"C:\ITWILL\5_Tensorflow\workspace\chap07_Face_detection\lecture02_celeb_classifier\celeb5__image"

train_dir = base_dir + '/train_celeb5' 
validation_dir = base_dir + '/val_celeb5'


학습 이미지 데이터셋 생성기

train_data = ImageDataGenerator(rescale=1./255) #정규화


검증 이미지 데이터셋 생성기

validation_data = ImageDataGenerator(rescale=1./255)


학습 이미지 생성기 -> 이미지 가공 

train_generator = train_data.flow_from_directory(
        train_dir,
        target_size=(150,150), #image reshape
        batch_size=20, #batch size
        class_mode='binary') #binary label
#Found 990 images belonging to 5 classes.


검증 이미지 생성기 -> 이미지 가공 

validation_generator = validation_data.flow_from_directory(
        validation_dir,
        target_size=(150,150),
        batch_size=20, #batch size
        class_mode='binary') #binary label
#Found 250 images belonging to 5 classes.

 



3. model training : image제너레이터 이용 모델 훈련 

model_fit = model.fit_generator(
          train_generator, #훈련용 image set 
          steps_per_epoch=50, #[수정] 20*50 = 1,000
          epochs=5, #[수정] 1000 * 5 = 5,000
          validation_data=validation_generator, #검증용 image set 
          validation_steps=13) #[수정] 50*13=260


model evaluation

model.evaluate(validation_generator)

38ms/step - loss: 0.0151 - sparse_categorical_accuracy: 1.0000



#4. model history graph

import matplotlib.pyplot as plt
print(model_fit.history.keys())
#dict_keys(['loss', 'accuracy', 'val_loss', 'val_accuracy'])
#dict_keys(['loss', 'sparse_categorical_accuracy', 'val_loss', 'val_sparse_categorical_accuracy'])

loss = model_fit.history['loss'] #train
acc = model_fit.history['sparse_categorical_accuracy'] #[수정]
val_loss = model_fit.history['val_loss'] #validation
val_acc = model_fit.history['val_sparse_categorical_accuracy'] #[수정]





#과적합 시작점 확인 

epochs = range(1, len(acc) + 1)



#acc vs val_acc   

plt.plot(epochs, acc, 'b--', label='train acc')
plt.plot(epochs, val_acc, 'r', label='val acc')
plt.title('Training vs validation accuracy')
plt.xlabel('epoch')
plt.ylabel('accuray')
plt.legend(loc='best')
plt.show()



#loss vs val_loss 

plt.plot(epochs, loss, 'b--', label='train loss')
plt.plot(epochs, val_loss, 'r', label='val loss')
plt.title('Training vs validation loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='best')
plt.show()

 

 

 

 

 

celeb image classifier2

1. Dropout 적용 : 과적합 해결 
2. EarlyStopping : 적합한 epoch size  

* celeb image classifier 참고 

from tensorflow.keras import Sequential #keras model 
from tensorflow.keras.layers import Conv2D, MaxPool2D #Convolution layer
from tensorflow.keras.layers import Dense, Flatten, Dropout #[추가] Affine layer
from tensorflow.keras.callbacks import EarlyStopping #[추가]


공급 image 크기 

img_h = 150 # height
img_w = 150 # width
input_shape = (img_h, img_w, 3)




1. CNN Model layer 

print('model create')
model = Sequential()


Convolution layer1 

model.add(Conv2D(32, kernel_size=(3, 3), activation='relu',
                 input_shape = input_shape))
model.add(MaxPool2D(pool_size=(2,2)))


Convolution layer2 

model.add(Conv2D(64,kernel_size=(3, 3), activation='relu'))
model.add(MaxPool2D(pool_size=(2,2)))

#Convolution layer3 : 제외


Flatten layer : 3d -> 1d

model.add(Flatten())


DNN hidden layer(Fully connected layer)

model.add(Dense(256, activation = 'relu'))
model.add(Dropout(rate=0.5)) #[추가]


DNN Output layer 

model.add(Dense(5, activation = 'softmax')) #5 class(0~4)


model training set 

model.compile(optimizer = 'adam',
              loss = 'sparse_categorical_crossentropy', #y = class
              metrics = ['sparse_categorical_accuracy'])

loss = categorical_crossentropy : one_hot encoding(2진수) 
loss = 'sparse_categorical_crossentropy' : class(10진수)



2. image file preprocessing : image 제너레이터   

from tensorflow.keras.preprocessing.image import ImageDataGenerator


dir setting

base_dir = r"C:\ITWILL\5_Tensorflow\workspace\chap07_Face_detection\lecture02_celeb_classifier\celeb5__image"

train_dir = base_dir + '/train_celeb5' 
validation_dir = base_dir + '/val_celeb5'


학습 이미지 데이터셋 생성기

train_data = ImageDataGenerator(rescale=1./255) #정규화


검증 이미지 데이터셋 생성기

validation_data = ImageDataGenerator(rescale=1./255)


학습 이미지 생성기 -> 이미지 가공 

train_generator = train_data.flow_from_directory(
        train_dir,
        target_size=(150,150), #image reshape
        batch_size=20, #batch size
        class_mode='binary') #binary label
#Found 990 images belonging to 5 classes.


검증 이미지 생성기 -> 이미지 가공 

validation_generator = validation_data.flow_from_directory(
        validation_dir,
        target_size=(150,150),
        batch_size=20, # batch size
        class_mode='binary') # binary label
#Found 250 images belonging to 5 classes.


[추가] 학습 조기종료 : epoch=17 

callback = EarlyStopping(monitor='val_loss', patience=5) 
#epoch=5 이후 검증 손실이 개선되지 않으면 조기종료




3. model training : image제너레이터 이용 모델 훈련 

model_fit = model.fit_generator(
          train_generator, #훈련용 image set 
          steps_per_epoch=50, #20*50 = 1,000
          epochs=20, #[수정] 1000 * 5 = 5,000
          validation_data=validation_generator, #검증용 image set 
          validation_steps=13, #50*13=260 
          callbacks = [callback]) #[추가]


model evaluation

model.evaluate(validation_generator)

38ms/step - loss: 0.0151 - sparse_categorical_accuracy: 1.0000



4. model history graph

import matplotlib.pyplot as plt
 
print(model_fit.history.keys())
#dict_keys(['loss', 'accuracy', 'val_loss', 'val_accuracy'])
#dict_keys(['loss', 'sparse_categorical_accuracy', 'val_loss', 'val_sparse_categorical_accuracy'])


[key 변경]

loss = model_fit.history['loss'] #train
acc = model_fit.history['sparse_categorical_accuracy'] #[수정]
val_loss = model_fit.history['val_loss'] #validation
val_acc = model_fit.history['val_sparse_categorical_accuracy'] #[수정]


과적합 시작점 확인 

epochs = range(1, len(acc) + 1)


acc vs val_acc   

plt.plot(epochs, acc, 'b--', label='train acc')
plt.plot(epochs, val_acc, 'r', label='val acc')
plt.title('Training vs validation accuracy')
plt.xlabel('epoch')
plt.ylabel('accuray')
plt.legend(loc='best')
plt.show()


loss vs val_loss 

plt.plot(epochs, loss, 'b--', label='train loss')
plt.plot(epochs, val_loss, 'r', label='val loss')
plt.title('Training vs validation loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend(loc='best')
plt.show()