이번 포스팅은 학습 모델에 대해서 리뷰하도록 하겠습니다.
- [Code Review/ self-driving lab] Udacity Self-driving Car - (1)
- [Code Review/ self-driving lab] Udacity Self-driving Car - (2)
1. Model.py
Model.py의 코드는 다음과 같습니다.
해당 코드에 대해서 차례차레 살펴보도록 하겠습니다.
import numpy as np import tensorflow as tf import csv import matplotlib.pyplot as plt import os, sys import cv2 from sklearn.model_selection import train_test_split from keras.models import Sequential, Model from keras.layers.core import Dense, Dropout, Activation,Lambda from keras.optimizers import Adam from keras.utils import np_utils from keras.layers import Convolution2D, MaxPooling2D, Flatten, Input, ELU from keras import initializations from keras.models import load_model, model_from_json from keras.layers.normalization import BatchNormalization from sklearn.utils import shuffle from keras import backend as K import json import gc import tensorflow as tf tf.python.control_flow_ops = tf csv_path = 'driving_log.csv' # my data (fantastic graphic mode) csv_path1 = 'data/driving_log.csv' # udacity data (fastest graphic mode) center_db, left_db, right_db, steer_db = [], [], [], [] Rows, Cols = 64, 64 offset = 0.22 # read csv file with open(csv_path1) as csvfile: reader = csv.DictReader(csvfile) for row in reader: if float(row['steering']) != 0.0: center_db.append(row['center']) left_db.append(row['left'].strip()) right_db.append(row['right'].strip()) steer_db.append(float(row['steering'])) else: prob = np.random.uniform() if prob <= 0.15: center_db.append(row['center']) left_db.append(row['left'].strip()) right_db.append(row['right'].strip()) steer_db.append(float(row['steering'])) # shuffle a dataset center_db, left_db, right_db, steer_db = shuffle(center_db, left_db, right_db, steer_db) # split train & valid data img_train, img_valid, steer_train, steer_valid = train_test_split(center_db, steer_db, test_size=0.1, random_state=42) plt.hist(steer_db, bins= 50, color= 'orange') plt.xlabel('steering value') plt.ylabel('counts') # plt.show() def select_img(center, left, right, steer, num, offsets=0.22): """ randomly select among center, left, right images add ±0.22 to left, right steering angle. couldn't find exact left, right steering angle by using geometric method because we didn't have enough information. """ rand = np.random.randint(3) if rand == 0: image, steering = cv2.imread(center[num]), steer[num] elif rand == 1: image, steering = cv2.imread(left[num]), steer[num] + offsets elif rand == 2: image, steering = cv2.imread(right[num]), steer[num] - offsets if abs(steering) > 1: steering = -1 if (steering < 0) else 1 return image, steering def valid_img(valid_image, valid_steer, num): """ using only center image for validation """ steering = valid_steer[num] image = cv2.imread(valid_image[num]) return image, steering def crop_img(image): """ crop unnecessary parts """ cropped_img = image[63:136, 0:319] resized_img = cv2.resize(cropped_img, (Cols, Rows), cv2.INTER_AREA) img = cv2.cvtColor(resized_img, cv2.COLOR_BGR2RGB) return resized_img def shift_img(image, steer): """ randomly shift image horizontally add proper steering angle to each image """ max_shift = 55 max_ang = 0.14 # ang_per_pixel = 0.0025 rows, cols, _ = image.shape random_x = np.random.randint(-max_shift, max_shift + 1) dst_steer = steer + (random_x / max_shift) * max_ang if abs(dst_steer) > 1: dst_steer = -1 if (dst_steer < 0) else 1 mat = np.float32([[1, 0, random_x], [0, 1, 0]]) dst_img = cv2.warpAffine(image, mat, (cols, rows)) return dst_img, dst_steer def brightness_img(image): """ randomly change brightness by converting Y value """ br_img = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) coin = np.random.randint(2) if coin == 0: random_bright = 0.2 + np.random.uniform(0.2, 0.6) br_img[:, :, 2] = br_img[:, :, 2] * random_bright br_img = cv2.cvtColor(br_img, cv2.COLOR_HSV2RGB) return br_img def generate_shadow(image, min_alpha=0.5, max_alpha = 0.75): """generate random shadow in random region""" top_x, bottom_x = np.random.randint(0, Cols, 2) coin = np.random.randint(2) rows, cols, _ = image.shape shadow_img = image.copy() if coin == 0: rand = np.random.randint(2) vertices = np.array([[(50, 65), (45, 0), (145, 0), (150, 65)]], dtype=np.int32) if rand == 0: vertices = np.array([[top_x, 0], [0, 0], [0, rows], [bottom_x, rows]], dtype=np.int32) elif rand == 1: vertices = np.array([[top_x, 0], [cols, 0], [cols, rows], [bottom_x, rows]], dtype=np.int32) mask = image.copy() channel_count = image.shape[2] # i.e. 3 or 4 depending on your image ignore_mask_color = (0,) * channel_count cv2.fillPoly(mask, [vertices], ignore_mask_color) rand_alpha = np.random.uniform(min_alpha, max_alpha) cv2.addWeighted(mask, rand_alpha, image, 1 - rand_alpha, 0., shadow_img) return shadow_img def flip_img(image, steering): """ randomly flip image to gain right turn data (track1 is biaed in left turn) """ flip_image = image.copy() flip_steering = steering num = np.random.randint(2) if num == 0: flip_image, flip_steering = cv2.flip(image, 1), -steering return flip_image, flip_steering def network_model(): """ designed with 4 convolutional layer & 3 fully connected layer weight init : glorot_uniform activation func : relu pooling : maxpooling used dropout """ model = Sequential() model.add(Lambda(lambda x: x / 127.5 - 1.0, input_shape=(Rows, Cols, 3))) model.add(Convolution2D(32, 3, 3, border_mode='same', subsample=(2, 2), activation='relu', name='Conv1')) #model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2), strides=None, border_mode='same')) model.add(Convolution2D(64, 3, 3, border_mode='same', subsample=(2, 2), activation='relu', name='Conv2')) #model.add(BatchNormalization()) model.add(MaxPooling2D(pool_size=(2, 2), strides=None, border_mode='same')) model.add(Convolution2D(128, 3, 3, border_mode='same', subsample=(1, 1), activation='relu', name='Conv3')) model.add(MaxPooling2D(pool_size=(2, 2), strides=None, border_mode='same')) #model.add(BatchNormalization()) model.add(Convolution2D(128, 2, 2, border_mode='same', subsample=(1, 1), activation='relu', name='Conv4')) #model.add(BatchNormalization()) model.add(Flatten()) model.add(Dropout(0.2)) model.add(Dense(128, activation='relu', name='FC1')) model.add(Dropout(0.5)) model.add(Dense(128, activation='relu', name='FC2')) model.add(Dropout(0.5)) model.add(Dense(64, activation='relu', name='FC3')) model.add(Dense(1)) model.summary() return model def generate_train(center, left, right, steer): """ data augmentation transformed image & crop """ num = np.random.randint(0, len(steer)) # to avoid bias in straight angle #bal = True #while bal: # num = np.random.randint(0, len(steer)) # check_steer = steer[num] # if check_steer == 0: # rand = np.random.uniform() # if rand <= 0.25: # bal = False # else: # bal = False image, steering = select_img(center, left, right, steer, num, offset) image, steering = shift_img(image, steering) image, steering = flip_img(image, steering) image = brightness_img(image) # image = generate_shadow(image) image = crop_img(image) return image, steering def generate_valid(img_valid, steer_valid): """ generate validation set """ img_set = np.zeros((len(img_valid), Rows, Cols, 3)) steer_set = np.zeros(len(steer_valid)) for i in range(len(img_valid)): img, steer = valid_img(img_valid, steer_valid, i) img_set[i] = crop_img(img) steer_set[i] = steer return img_set, steer_set def generate_train_batch(center, left, right, steering, batch_size): """ compose training batch set """ image_set = np.zeros((batch_size, Rows, Cols, 3)) steering_set = np.zeros(batch_size) while 1: for i in range(batch_size): img, steer = generate_train(center, left, right, steering) image_set[i] = img steering_set[i] = steer yield image_set, steering_set batch_size = 256 epoch = 10 train_generator = generate_train_batch(center_db, left_db, right_db, steer_db, batch_size) image_val, steer_val = generate_valid(img_valid, steer_valid) model = network_model() adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) model.compile(optimizer=adam, loss='mse') model_json = 'model.json' model_weights = 'model.h5' history = model.fit_generator(train_generator, samples_per_epoch=20480, nb_epoch=epoch, validation_data=(image_val, steer_val), verbose=1) json_string = model.to_json() try: os.remove(model_json) os.remove(model_weights) except OSError: pass with open(model_json, 'w') as jfile: json.dump(json_string, jfile) model.save_weights(model_weights) # to avoid " 'NoneType' object has no attribute 'TF_DeleteStatus' " error gc.collect() K.clear_session()
2. CSV file reading
- 해당 저장소를 clone을 받으면, data/data/drving_log.csv파일이 있습니다. 이건 Udacity의 학습 데이터고, 사용자가 커스텀 데이터 셋을 사용하고 싶다면, Udacity의 csv파일 포맷에 맞춰서 커스텀해서 loading을 하면됩니다.
- center_db, left_db, right_db, steer_db는 차량 대시보드의 왼쪽/중앙/오른쪽의 카메라 영상과 차량의 steering angle값에 대한 변수입니다.
- Rows, Cols는 네트워크 입력단에 들어가는 Image의 크기를 정의해둔 것입니다.
- offset은 data augmentation할 때, steering값을 보정하기 위한 변수입니다.
- 본격적으로 csv파일을 읽어서, steering값이 0이 아닌 것들만 차례차례 위에서 선언한 center_db, left_db, right_db, steer_db에 넣어줍니다.
- 만약에 steering이 0이라면 랜덤한 값을 뽑아서, 그값이 0.15보다 작을 때만 넣어줍니다
- 이렇게 steer값이 0인 것을 적게 넣어주는 이유는, 일반적으로 차량이 달릴 때, 직진 데이터가 대다수고, 커브인 경우는 적기 때문에, 직진 데이터를 과하게 학습하는 것을 방지하기 위함입니다.
csv_path = 'driving_log.csv' # my data (fantastic graphic mode) csv_path1 = 'data/driving_log.csv' # udacity data (fastest graphic mode) center_db, left_db, right_db, steer_db = [], [], [], [] Rows, Cols = 64, 64 offset = 0.22 # read csv file with open(csv_path1) as csvfile: reader = csv.DictReader(csvfile) for row in reader: if float(row['steering']) != 0.0: center_db.append(row['center']) left_db.append(row['left'].strip()) right_db.append(row['right'].strip()) steer_db.append(float(row['steering'])) else: prob = np.random.uniform() if prob <= 0.15: center_db.append(row['center']) left_db.append(row['left'].strip()) right_db.append(row['right'].strip()) steer_db.append(float(row['steering']))
3. Select_img Method
- 위에서 가공한 학습 데이터를 임의로 섞어줍니다.
- 학습 데이터에서 train/dev/test set으로 데이터를 분할해줍니다.
:: 학습은 중앙 카메라와 steering값으로만 합니다, test set의 사이즈는 0.1로 합니다.
- steering값을 plot해서 분포를 한번 확인합니다.
- select_img는 data augmentation을 위해서 사용하는 메소드입니다.
:: 다양한 steer값을 얻기 위해서 image를 shift하고 그에 맞는 steering값을 보정하는 것을 통해서 data augmentation을 합니다. [0,1,2]값을 임의로 뽑아서, 0이나오면, 센터 이미지와 정상적인 steer 값을 사용하고, 1이면 left image를 사용하고, steering값에 offset값을 더합니다., 2가 나오면 right image를 사용하고, steering값에 offset을 빼줍니다.
# shuffle a dataset center_db, left_db, right_db, steer_db = shuffle(center_db, left_db, right_db, steer_db) # split train & valid data img_train, img_valid, steer_train, steer_valid = train_test_split(center_db, steer_db, test_size=0.1, random_state=42) plt.hist(steer_db, bins= 50, color= 'orange') plt.xlabel('steering value') plt.ylabel('counts') # plt.show() def select_img(center, left, right, steer, num, offsets=0.22): """ randomly select among center, left, right images add ±0.22 to left, right steering angle. couldn't find exact left, right steering angle by using geometric method because we didn't have enough information. """ rand = np.random.randint(3) if rand == 0: image, steering = cv2.imread(center[num]), steer[num] elif rand == 1: image, steering = cv2.imread(left[num]), steer[num] + offsets elif rand == 2: image, steering = cv2.imread(right[num]), steer[num] - offsets if abs(steering) > 1: steering = -1 if (steering < 0) else 1 return image, steering
4. 기타 data augmentation
- valid_img 메소드는 학습데이터를 검증하기 위한 dev이미지를 추출하는데 사용합니다.
- crop_img 메소드는 전체 이미지에서 도로 영역만 잘라내서 학습 데이터로 사용하기 위해서 쓰입니다.
- shift_img는 image를 shift하면서, steering값도 조정합니다.
:: 아래 그림에서 왼쪽이 원본이고, 오른쪽이 왼쪽으로 shift된 이미지인데, 아래의 console창을 보면 steering값이 바뀌어있는 것을 확인할 수 있습니다.
- brightness_img 메소드는 Image를 HSV Color Space로 변환 후에, 밝기를 변경해줍니다.
- 그림자를 만들어 주는 함수입니다.
- flip_img 메소드는 Image를 flip해줍니다.
def valid_img(valid_image, valid_steer, num): """ using only center image for validation """ steering = valid_steer[num] image = cv2.imread(valid_image[num]) return image, steering def crop_img(image): """ crop unnecessary parts """ cropped_img = image[63:136, 0:319] resized_img = cv2.resize(cropped_img, (Cols, Rows), cv2.INTER_AREA) img = cv2.cvtColor(resized_img, cv2.COLOR_BGR2RGB) return resized_img def shift_img(image, steer): """ randomly shift image horizontally add proper steering angle to each image """ max_shift = 55 max_ang = 0.14 # ang_per_pixel = 0.0025 rows, cols, _ = image.shape random_x = np.random.randint(-max_shift, max_shift + 1) dst_steer = steer + (random_x / max_shift) * max_ang if abs(dst_steer) > 1: dst_steer = -1 if (dst_steer < 0) else 1 mat = np.float32([[1, 0, random_x], [0, 1, 0]]) dst_img = cv2.warpAffine(image, mat, (cols, rows)) return dst_img, dst_steer def brightness_img(image): """ randomly change brightness by converting Y value """ br_img = cv2.cvtColor(image, cv2.COLOR_RGB2HSV) coin = np.random.randint(2) if coin == 0: random_bright = 0.2 + np.random.uniform(0.2, 0.6) br_img[:, :, 2] = br_img[:, :, 2] * random_bright br_img = cv2.cvtColor(br_img, cv2.COLOR_HSV2RGB) return br_img def generate_shadow(image, min_alpha=0.5, max_alpha = 0.75): """generate random shadow in random region""" top_x, bottom_x = np.random.randint(0, Cols, 2) coin = np.random.randint(2) rows, cols, _ = image.shape shadow_img = image.copy() if coin == 0: rand = np.random.randint(2) vertices = np.array([[(50, 65), (45, 0), (145, 0), (150, 65)]], dtype=np.int32) if rand == 0: vertices = np.array([[top_x, 0], [0, 0], [0, rows], [bottom_x, rows]], dtype=np.int32) elif rand == 1: vertices = np.array([[top_x, 0], [cols, 0], [cols, rows], [bottom_x, rows]], dtype=np.int32) mask = image.copy() channel_count = image.shape[2] # i.e. 3 or 4 depending on your image ignore_mask_color = (0,) * channel_count cv2.fillPoly(mask, [vertices], ignore_mask_color) rand_alpha = np.random.uniform(min_alpha, max_alpha) cv2.addWeighted(mask, rand_alpha, image, 1 - rand_alpha, 0., shadow_img) return shadow_img def flip_img(image, steering): """ randomly flip image to gain right turn data (track1 is biaed in left turn) """ flip_image = image.copy() flip_steering = steering num = np.random.randint(2) if num == 0: flip_image, flip_steering = cv2.flip(image, 1), -steering return flip_image, flip_steering
5. 학습 준비
- gernerate_valid는 valid_img 메소드를 통해서, 도로영역 crop 및 resize를 해서 dev set을 만듭니다.
- generate_train_batch 메소드는 4-dimension(batch_size, Rows, Cols, 3)으로 CNN에 넣어줄 학습 데이터를 만듭니다.
- 정답셋인 steering 데이터도 batch size에 맞게 조합해줍니다.
- 그리고 batch size에 맞게 학습 데이터를 augmentation해서 학습 데이터를 생성합니다.
def generate_train(center, left, right, steer): """ data augmentation transformed image & crop """ num = np.random.randint(0, len(steer)) # to avoid bias in straight angle #bal = True #while bal: # num = np.random.randint(0, len(steer)) # check_steer = steer[num] # if check_steer == 0: # rand = np.random.uniform() # if rand <= 0.25: # bal = False # else: # bal = False image, steering = select_img(center, left, right, steer, num, offset) image, steering = shift_img(image, steering) image, steering = flip_img(image, steering) image = brightness_img(image) # image = generate_shadow(image) image = crop_img(image) return image, steering def generate_valid(img_valid, steer_valid): """ generate validation set """ img_set = np.zeros((len(img_valid), Rows, Cols, 3)) steer_set = np.zeros(len(steer_valid)) for i in range(len(img_valid)): img, steer = valid_img(img_valid, steer_valid, i) img_set[i] = crop_img(img) steer_set[i] = steer return img_set, steer_set def generate_train_batch(center, left, right, steering, batch_size): """ compose training batch set """ image_set = np.zeros((batch_size, Rows, Cols, 3)) steering_set = np.zeros(batch_size) while 1: for i in range(batch_size): img, steer = generate_train(center, left, right, steering) image_set[i] = img steering_set[i] = steer yield image_set, steering_set
6. 학습
- batch_size는 256
- epoch는 10
- 학습 데이터와 dev set을 생성해주고, network 모델 변수를 만들어 줍니다.
- Optimizer는 Adam으로 해당 옵션들을 줍니다.
- 모델을 컴파일하면서, model과 weights를 저장할 이름을 만듭니다.
- fit_generator 메소드를 호출해서 네트워크 학습을 시작합니다.
- 모델을 json형태로 export하고, 기존에 같은 이름의 파일이 있다면, 지워주고 Model.json과 weights파일을 저장해줍니다.
batch_size = 256 epoch = 10 train_generator = generate_train_batch(center_db, left_db, right_db, steer_db, batch_size) image_val, steer_val = generate_valid(img_valid, steer_valid) model = network_model() adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) model.compile(optimizer=adam, loss='mse') model_json = 'model.json' model_weights = 'model.h5' history = model.fit_generator(train_generator, samples_per_epoch=20480, nb_epoch=epoch, validation_data=(image_val, steer_val), verbose=1) json_string = model.to_json() try: os.remove(model_json) os.remove(model_weights) except OSError: pass with open(model_json, 'w') as jfile: json.dump(json_string, jfile) model.save_weights(model_weights) # to avoid " 'NoneType' object has no attribute 'TF_DeleteStatus' " error gc.collect() K.clear_session()
'IT > Deeplearning' 카테고리의 다른 글
[Object Detection / Deeplearning ] YOLO Darknet v2 - [2] (3) | 2017.11.20 |
---|---|
[Object Detection / Deeplearning ] YOLO Darknet v2 - [1] (0) | 2017.11.20 |
[Code Review/ self-driving lab] Udacity Self-driving Car - (2) (0) | 2017.10.26 |
[Code Review/ self-driving lab] Udacity Self-driving Car - (1) (0) | 2017.10.20 |
[Performance Measurement] Precision/Accuracy (2) | 2017.10.19 |