Batch_size Modification
Opened this issue · 0 comments
WuXueQiCai-DaJiangShen commented
It seems that when we use train_on_batch
to train our Faster RCNN model, we couldn't change the batch_size directly as what in model.fit( )
. So I just simply concatenated a mini-batch of X
, X2
, Y
, Y1
and Y2
and put them into the training process. However, the results turned out pretty bad. What's the reason of that and how could I modify the batch_size in training of Faster RCNN model?
...
if len(rpn_accuracy_rpn_monitor) == epoch_length and C.verbose:
mean_overlapping_bboxes = float(sum(rpn_accuracy_rpn_monitor))/len(rpn_accuracy_rpn_monitor)
rpn_accuracy_rpn_monitor = []
print('Average number of overlapping bounding boxes from RPN = {} for {} previous iterations'.format(mean_overlapping_bboxes, epoch_length))
if mean_overlapping_bboxes == 0:
print('RPN is not producing bounding boxes that overlap the ground truth boxes. Check RPN settings or keep training.')
# X, Y, img_data = next(data_gen_train)
batch_size = 4
X = []
Y = [[],[]]
img_datas = []
iter_batch = 0
for iter_batch in range(batch_size):
X_s, Y_s, img_data = next(data_gen_train)
X.append(X_s[0])
Y[0].append(Y_s[0][0])
Y[1].append(Y_s[1][0])
img_datas.append(img_data)
X = np.array(X)
Y = [np.array(Y[0]),np.array(Y[1])]
loss_rpn = model_rpn.train_on_batch(X, Y)
P_rpn = model_rpn.predict_on_batch(X)
...
...
X2s = []
Y1s = []
Y2s = []
X_classifier = []
Y_classifier = []
iter_batch = 0
for iter_batch in range(batch_size):
# note: calc_iou converts from (x1,y1,x2,y2) to (x,y,w,h) format
X2, Y1, Y2 = roi_helpers.calc_iou(R[iter_batch], img_datas[iter_batch], C, class_mapping)
if X2 is None:
rpn_accuracy_rpn_monitor.append(0)
rpn_accuracy_for_epoch.append(0)
continue
neg_samples = np.where(Y1[0, :, -1] == 1)
pos_samples = np.where(Y1[0, :, -1] == 0)
if len(neg_samples) > 0:
neg_samples = neg_samples[0]
else:
neg_samples = []
if len(pos_samples) > 0:
pos_samples = pos_samples[0]
else:
pos_samples = []
rpn_accuracy_rpn_monitor.append(len(pos_samples))
rpn_accuracy_for_epoch.append((len(pos_samples)))
if C.num_rois > 1:
if len(pos_samples) < C.num_rois//2:
selected_pos_samples = pos_samples.tolist()
else:
selected_pos_samples = np.random.choice(pos_samples, C.num_rois//2, replace=False).tolist()
try:
selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=False).tolist()
except:
selected_neg_samples = np.random.choice(neg_samples, C.num_rois - len(selected_pos_samples), replace=True).tolist()
sel_samples = selected_pos_samples + selected_neg_samples
else:
# in the extreme case where num_rois = 1, we pick a random pos or neg sample
selected_pos_samples = pos_samples.tolist()
selected_neg_samples = neg_samples.tolist()
if np.random.randint(0, 2):
sel_samples = random.choice(neg_samples)
else:
sel_samples = random.choice(pos_samples)
X2s.append(X2[:, sel_samples, :][0])
Y1s.append(Y1[:, sel_samples, :][0])
Y2s.append(Y2[:, sel_samples, :][0])
X2s = np.array(X2s)
Y1s = np.array(Y1s)
Y2s = np.array(Y2s)
# loss_class = model_classifier.train_on_batch([X, X2[:, sel_samples, :]], [Y1[:, sel_samples, :], Y2[:, sel_samples, :]])
if X2s.shape[0] != batch_size:
continue
loss_class = model_classifier.train_on_batch([X, X2s], [Y1s, Y2s])
...