Notice
Recent Posts
Recent Comments
Link
일 | 월 | 화 | 수 | 목 | 금 | 토 |
---|---|---|---|---|---|---|
1 | 2 | 3 | 4 | 5 | ||
6 | 7 | 8 | 9 | 10 | 11 | 12 |
13 | 14 | 15 | 16 | 17 | 18 | 19 |
20 | 21 | 22 | 23 | 24 | 25 | 26 |
27 | 28 | 29 | 30 | 31 |
Tags
- Python
- 판다스
- NULL
- KNeighborsClassifier
- NAN
- 이미지프로세싱
- Supervised learning
- pandas
- computer vision
- Deep learning
- 데이터프레임
- 비트와이즈
- 알파브렌딩
- mask detection
- sklearn
- 이미지처리
- 사이킷런
- 지도학습
- 파이썬
- tfidfvectorizer
- 결측값
- dataframe
- ML
- opencv
- 머신러닝
- scikit-learn
- dropna
- k-최근접 이웃 분류
- index
- 결측치
Archives
- Today
- Total
Sun.El Data Analysis
[openCV] 코로나-19 mask detection with CNN, openCV 본문
728x90
마스크 착용/미착용이 라벨링 된 사진 615장을 이용하여
CNN deep learning model을 구축하고
openCV Haar Cascade Face Detection model을 적용하여
카메라 구동하여 mask를 detection 하는 것을 확인함
1. 데이터 준비
- 마스크 착용 218개 image
- 마스크 미착용 397개 image
2. 데이터 불러오기
[IN]
import cv2,os
import numpy as np
from tensorflow.python.keras.utils import np_utils
import imutils #pip install imutils
import pytesseract #pip install pytesseract
import tensorflow as tf
import keras
import matplotlib.pyplot as plt
%matplotlib inline
[IN]
data_path='../data/mask_data/'
categories=os.listdir(data_path)
labels=[i for i in range(len(categories))]
label_dict=dict(zip(categories,labels))
label_dict
[OUT]
{'without_mask': 0, 'with_mask': 1}
[IN]
data=[]
target=[]
for category in categories:
folder_path=os.path.join(data_path,category)
img_names=os.listdir(folder_path)
for img_name in img_names:
img_path=os.path.join(folder_path,img_name)
img=cv2.imread(img_path)
try:
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
resized=cv2.resize(gray,(100, 100))
data.append(resized)
target.append(label_dict[category])
except Exception as e:
print('Exception:', e)
[IN]
data=np.array(data)/255.0
data=np.reshape(data,(data.shape[0], 100, 100, 1))
target=np.array(target)
new_target=np_utils.to_categorical(target)
np.save('data', data)
np.save('target', new_target)
data=np.load('data.npy')
target=np.load('target.npy')
print(data.shape)
print(target.shape)
[OUT]
(615, 100, 100, 1)
(615, 2)
3. 학습용/검증용 세트로 분할하기
[IN]
from keras.models import Sequential
from keras.layers import Dense,Activation,Flatten,Dropout
from keras.layers import Conv2D,MaxPooling2D
from keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
from keras.callbacks import EarlyStopping
from tensorflow.python.keras.optimizer_v2 import rmsprop
train_data,test_data,train_target,test_target=train_test_split(data,target, test_size=0.2, random_state=42)
4. 이미지 생성(Data augmentation to improve generalization)
[IN]
from tensorflow.keras.preprocessing.image import ImageDataGenerator
aug = ImageDataGenerator(rotation_range=20,
zoom_range=0.2,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.15,
horizontal_flip=True,
fill_mode="nearest")
5. 하이퍼파라미터 튜닝(Hyperparameter Tuning With Bayesian Optimization)
[IN]
def get_model(input_shape, dropout2_rate=0.5):
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape,
name="conv2d_1"))
model.add(Conv2D(64, (3, 3), activation='relu', name="conv2d_2"))
model.add(MaxPooling2D(pool_size=(2, 2), name="maxpool2d_1"))
model.add(Dropout(0.25, name="dropout_1"))
model.add(Flatten(name="flatten"))
model.add(Dense(128, activation='relu', name="dense_1"))
model.add(Dropout(dropout2_rate, name="dropout_2"))
model.add(Dense(2, activation='softmax', name="dense_2"))
return model
[IN]
def fit_with(input_shape, verbose, dropout2_rate, lr):
model = get_model(input_shape, dropout2_rate)
#optimizer = rmsprop.RMSProp(learning_rate=lr)
optimizer = 'rmsprop'
model.compile(loss=tf.keras.losses.BinaryCrossentropy(),
optimizer=optimizer,
metrics=['accuracy'])
model.fit(train_data, train_target, epochs=10,
batch_size=4, verbose=verbose)
score = model.evaluate(test_data, test_target, steps=10, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
return score[1]
[IN]
from functools import partial
verbose = 1
fit_with_partial = partial(fit_with, input_shape, verbose)
[IN]
from bayes_opt import BayesianOptimization
pbounds = {'dropout2_rate': (0.1, 0.5), 'lr': (1e-4, 1e-2)}
optimizer = BayesianOptimization(
f=fit_with_partial,
pbounds=pbounds,
verbose=2,
random_state=1,
)
optimizer.maximize(init_points=10, n_iter=10)
for i, res in enumerate(optimizer.res):
print("Iteration {}: \n\t{}".format(i, res))
print(optimizer.max)
[OUT]
{'target': 0.8373983502388, 'params': {'dropout2_rate': 0.10004574992693796, 'lr': 0.003093092469055214}}
6. CNN 모델 구축
[IN]
model=Sequential()
model.add(Conv2D(32,kernel_size=(3,3),input_shape=data.shape[1:], activation='relu'))
model.add(Conv2D(64,(3,3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128,activation='relu'))
model.add(Dropout(0.1942))
model.add(Dense(2, activation='softmax'))
[IN]
opt = keras.optimizers.Adam(learning_rate=0.0001)
model.compile(loss='binary_crossentropy',
optimizer=opt,
metrics=['accuracy'])
[IN]
early_stopping = EarlyStopping(monitor='val_loss', patience=50)
checkpoint = ModelCheckpoint(
'{epoch:03d}-{val_loss:.4f}.model',
monitor='val_loss',
verbose=0,
save_best_only=True,
mode='auto',
strict=True
)
[IN] Train Model
history=model.fit(aug.flow(train_data, train_target),
epochs=100,
batch_size=4,
callbacks=[checkpoint, early_stopping],
validation_data=(test_data, test_target))
print(model.evaluate(test_data,test_target))
model.summary()
[OUT]
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
conv2d (Conv2D) (None, 98, 98, 32) 320
conv2d_1 (Conv2D) (None, 96, 96, 64) 18496
max_pooling2d (MaxPooling2 (None, 48, 48, 64) 0
D)
dropout (Dropout) (None, 48, 48, 64) 0
flatten (Flatten) (None, 147456) 0
dense (Dense) (None, 128) 18874496
dropout_1 (Dropout) (None, 128) 0
dense_1 (Dense) (None, 2) 258
=================================================================
Total params: 18893570 (72.07 MB)
Trainable params: 18893570 (72.07 MB)
Non-trainable params: 0 (0.00 Byte)
7. 모델 평가(Evaluate the model : best model (086-0.3412.model))
[IN]
%matplotlib inline
import matplotlib.pyplot as plt
fig, loss_ax = plt.subplots(figsize=(10,6))
acc_ax = loss_ax.twinx()
acc_ax.plot(history.history['accuracy'], 'b', label='train acc')
acc_ax.plot(history.history['val_accuracy'], 'g', label='val acc')
loss_ax.plot(history.history['loss'], 'y', label='train loss')
loss_ax.plot(history.history['val_loss'], 'r', label='val loss')
loss_ax.set_xlabel('epoch')
loss_ax.set_ylabel('loss')
acc_ax.set_ylabel('accuray')
acc_ax.legend(loc='upper left', bbox_to_anchor=(1.15, 0.5))
loss_ax.legend(loc='lower left', bbox_to_anchor=(1.15, 0.5))
plt.show()
[OUT]
8. 카메라 구동 후 mask detection(Face mask detection in real-time video)
[IN]
import cv2
import numpy as np
from keras.models import load_model
labels_dict={1:'with_mask', 0:'without_mask'}
color_dict={0:(0,255,0),1:(0,0,255)}
source=cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc('X', 'V', 'I', 'D')
out = cv2.VideoWriter('output.avi', fourcc, 20, (640,480))
[IN]
# face recognition/classifier : haar feature
#face_clsfr=cv2.CascadeClassifier('../data/haarcascade_frontalface_default.xml')
face_clsfr=cv2.CascadeClassifier(cv2.data.haarcascades + '../data/haarcascade_frontalface_default.xml')
# face mask detector
model = load_model('086-0.3412.model')
while(True):
ret,img=source.read()
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces=face_clsfr.detectMultiScale(gray,1.3,5)
for x,y,w,h in faces:
face_img=gray[y:y+w,x:x+w]
resized=cv2.resize(face_img,(100,100))
normalized=resized/255.0
reshaped=np.reshape(normalized,(1,100,100,1))
result=model.predict(reshaped)
label=np.argmax(result,axis=1)[0]
cv2.rectangle(img,(x,y),(x+w,y+h),color_dict[label],2)
cv2.rectangle(img,(x,y-40),(x+w,y),color_dict[label],-1)
if(labels_dict[label] == 'with_mask'):
print("No Beep")
else:
sound.play()
print("Beep")
cv2.putText(
img, "{}: {:.2f}%".format(labels_dict[label], np.max(result) * 100),
(x, y-10),
cv2.FONT_HERSHEY_SIMPLEX,0.6,(255,255,255),2)
cv2.imshow('LIVE',img)
out.write(img)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
source.release()
out.release()
cv2.destroyAllWindows()
[OUT]
'Deep Learning' 카테고리의 다른 글
[openCV] 이미지 연산 및 합성 (0) | 2023.08.22 |
---|---|
[openCV] 이미지 Thresholding (2) | 2023.08.22 |
[openCV] 컬러 스페이스 (0) | 2023.08.22 |
[openCV] ROI(Region Of Interest) (0) | 2023.08.22 |
[openCV] Window management, Keyboard Event, Trackbar, Mouse event (0) | 2023.08.21 |