ChunML/ssd-tf2

why i rebuild the model this way then the loss cant convergence

Closed this issue · 0 comments

import tensorflow as tf
from tensorflow_core import keras
from tensorflow_core.python.keras.applications.vgg16 import VGG16

class ssd(tf.keras.Model):
def init(self,num_classes):
super(ssd, self).init()
self.num_classes = num_classes
# 3003003
self.conv1_1 = keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')
self.conv1_2 = keras.layers.Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')
self.pool1 = keras.layers.MaxPooling2D((2, 2),padding = 'same', strides=(2, 2), name='block1_pool')
# 15015064
self.conv2_1 = keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')
self.conv2_2 = keras.layers.Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')
self.pool2 = keras.layers.MaxPooling2D((2, 2),padding = 'same', strides=(2, 2), name='block2_pool')
# 7575128
self.conv3_1 = keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')
self.conv3_2 = keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')
self.conv3_3 = keras.layers.Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')
self.pool3 = keras.layers.MaxPooling2D((2, 2),padding = 'same', strides=(2, 2), name='block3_pool')
# 3838256
self.conv4_1 = keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')
self.conv4_2 = keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')
self.conv4_3 = keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')
# todo output_1
self.pool4 = keras.layers.MaxPooling2D((2, 2),padding = 'same', strides=(2, 2), name='block4_pool')
# 1919512
self.conv5_1 = keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')
self.conv5_2 = keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')
self.conv5_3 = keras.layers.Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')
# 1919512
self.pool5 = keras.layers.MaxPooling2D((3,3),strides=(1,1),padding = 'same',name='block5_pool')
self.conv6_1 = keras.layers.Conv2D(1024,(3,3),padding = 'same',activation = 'relu',dilation_rate = 6,name = 'block6_conv1')
# todo output_2
self.conv6_2 = keras.layers.Conv2D(1024,(1,1),padding = 'same',activation = 'relu')
#19191024
self.conv7_1 = keras.layers.Conv2D(256,(1,1),padding = 'same',activation = 'relu',name = 'conv7_1')
# todo output3
self.conv7_2 = keras.layers.Conv2D(512,(3,3),padding = 'same',strides = (2,2),activation = 'relu',name = 'conv7_2')
# 1010512
self.conv8_1 = keras.layers.Conv2D(128,(1,1),padding = 'same',activation = 'relu',name = 'conv8_1')
# 5*5
self.conv8_2 = keras.layers.Conv2D(256,(3,3),padding = 'same',strides = (2,2),activation = 'relu',name = 'conv8_2')

    self.conv9_1 = keras.layers.Conv2D(128,(1,1),padding = 'same',activation = 'relu',name = 'conv9_1')
    # 3*3
    self.conv9_2 = keras.layers.Conv2D(256,(3,3),padding = 'same',strides = (2,2),activation = 'relu',name = 'conv9_2')

    self.conv10_1 = keras.layers.Conv2D(128,(1,1),activation = 'relu',name = 'conv10_1')
    # 1*1
    self.conv10_2 = keras.layers.Conv2D(256,(3,3),activation='relu',name = 'conv10_2')
    # 1*1*256
    # self.conv11_1 = keras.layers.Conv2D(128,(1,1),activation = 'relu',name = 'conv11_1')
    # self.conv11_2 = keras.layers.Conv2D(256,(3,3),strides = (1,1),activation = 'relu',name = 'conv11_2')

    # 第1特征图对应的框为default_box尺寸为4个
    self.conf1 = keras.layers.Conv2D(4*num_classes,(3,3),padding = 'same',name = 'conf1')
    # 第2,3,4个特征图对应的default_box的尺寸为6个
    self.conf2 = keras.layers.Conv2D(6 * num_classes, (3, 3), padding='same',name = 'conf2')
    self.conf3 = keras.layers.Conv2D(6 * num_classes, (3, 3), padding='same',name = 'conf3')
    self.conf4 = keras.layers.Conv2D(6 * num_classes, (3, 3), padding='same',name = 'conf4')
    # 第5,6,个特征图对应的default_box的尺寸为4个
    self.conf5 = keras.layers.Conv2D(4 * num_classes, (3, 3), padding='same',name = 'conf5')
    # self.conf6 = keras.layers.Conv2D(4 * num_classes, (3, 3), padding='same',name = 'conf5')
    # 最后一张特征图为1*1,没有default_box
    self.conf6 = keras.layers.Conv2D(4 * num_classes, (1, 1), padding = 'same',name = 'conf6')

    # 第1特征图对应的框为default_box尺寸为4个
    self.loc1 = keras.layers.Conv2D(4*4,(3,3),padding = 'same',name = 'loc1')
    # 第2,3,4个特征图对应的default_box的尺寸为6个
    self.loc2 = keras.layers.Conv2D(6 * 4, (3, 3), padding='same',name = 'loc2')
    self.loc3 = keras.layers.Conv2D(6 * 4, (3, 3), padding='same',name = 'loc3')
    self.loc4 = keras.layers.Conv2D(6 * 4, (3, 3), padding='same',name = 'loc4')
    # 第5,6,个特征图对应的default_box的尺寸为4个
    self.loc5 = keras.layers.Conv2D(4 * 4, (3, 3), padding='same',name = 'loc5')
    # self.loc6 = keras.layers.Conv2D(4 * 4, (3, 3), padding='same',name = 'loc6')
    # 最后一张特征图为1*1,没有default_box
    self.loc6 = keras.layers.Conv2D(4 * 4, (1, 1), padding = 'same',name = 'loc6')

    self.batch_norm = keras.layers.BatchNormalization(
        beta_initializer='glorot_uniform',
        gamma_initializer='glorot_uniform'
    )

    # self.conv1 = keras.layers.Conv2D(filter_num = 64,kernel_size = (3,3),padding = 'same',activation='relu')

def call(self, input, **kwargs):
    conf = []
    loc = []
    # block1
    x1_1 = self.conv1_1(input)
    x1_2 = self.conv1_2(x1_1)
    x1_3 = self.pool1(x1_2)
    # block2
    x2_1 = self.conv2_1(x1_3)
    x2_2 = self.conv2_2(x2_1)
    x2_3 = self.pool2(x2_2)
    # block3
    x3_1 = self.conv3_1(x2_3)
    x3_2 = self.conv3_2(x3_1)
    x3_3 = self.conv3_3(x3_2)
    x3_4 = self.pool3(x3_3)
    # block4
    x4_1 = self.conv4_1(x3_4)
    x4_2 = self.conv4_2(x4_1)
    x4_3 = self.conv4_3(x4_2) #todo output1
    print("x4_3___"+str(x4_3.shape))
    # conf1 = self.conf1(x4_3)
    x = self.batch_norm(x4_3)
    conf1 = self.conf1(x)
    conf1 = tf.reshape(conf1,[conf1.shape[0],-1,self.num_classes])
    loc1 = self.loc1(x4_3)
    loc1 = tf.reshape(loc1,[loc1.shape[0],-1,4])
    conf.append(conf1)
    loc.append(loc1)


    x4_4 = self.pool4(x4_3)
    # block5
    x5_1 = self.conv5_1(x4_4)
    x5_2 = self.conv5_2(x5_1)
    x5_3 = self.conv5_3(x5_2)
    x5_4 = self.pool5(x5_3)
    # block6
    x6_1 = self.conv6_1(x5_4)
    x6_2 = self.conv6_2(x6_1) #todo output2
    print("x6_2___" + str(x6_2.shape))
    conf2 = self.conf2(x6_2)
    # conf2 = self.conf2(self.batch_norm(x6_2))
    conf2 = tf.reshape(conf2,[conf2.shape[0],-1,self.num_classes])
    loc2 = self.loc2(x6_2)
    loc2 = tf.reshape(loc2,[loc2.shape[0],-1,4])
    conf.append(conf2)
    loc.append(loc2)

    # block7
    x7_1 = self.conv7_1(x6_2)
    x7_2 = self.conv7_2(x7_1) #todo output3
    print("x7_2___" + str(x7_2.shape))
    # conf3 = self.conf3(self.batch_norm(x7_2))
    conf3 = self.conf3(x7_2)
    conf3 = tf.reshape(conf3,[conf3.shape[0],-1,self.num_classes])
    loc3 = self.loc3(x7_2)
    loc3 = tf.reshape(loc3,[loc3.shape[0],-1,4])
    conf.append(conf3)
    loc.append(loc3)

    # block8
    x8_1 = self.conv8_1(x7_2)
    x8_2 = self.conv8_2(x8_1) #todo output4
    print("x8_2___" + str(x8_2.shape))
    # conf4 = self.conf4(self.batch_norm(x8_2))
    conf4 = self.conf4(x8_2)
    conf4 = tf.reshape(conf4,[conf4.shape[0],-1,self.num_classes])
    loc4 = self.loc4(x8_2)
    loc4 = tf.reshape(loc4,[loc4.shape[0],-1,4])
    conf.append(conf4)
    loc.append(loc4)


    # block9
    x9_1 = self.conv9_1(x8_2)
    x9_2 = self.conv9_2(x9_1) #todo output5
    print("x9_2___" + str(x9_2.shape))
    conf5 = self.conf5(x9_2)
    # conf5 = self.conf5(self.batch_norm(x9_2))
    conf5 = tf.reshape(conf5,[conf5.shape[0],-1,self.num_classes])
    loc5 = self.loc5(x9_2)
    loc5 = tf.reshape(loc5,[loc5.shape[0],-1,4])
    conf.append(conf5)
    loc.append(loc5)


    # block10
    x10_1 = self.conv10_1(x9_2)
    x10_2 = self.conv10_2(x10_1) #todo output6
    print("x10_2___" + str(x10_2.shape))
    conf6 = self.conf6(x10_2)
    # conf6 = self.conf6(self.batch_norm(x10_2))
    conf6 = tf.reshape(conf6,[conf6.shape[0],-1,self.num_classes])
    loc6 = self.loc6(x10_2)
    loc6 = tf.reshape(loc6,[loc6.shape[0],-1,4])
    conf.append(conf6)
    loc.append(loc6)

    confs = tf.concat(conf,axis=1)
    locs = tf.concat(loc,axis=1)
    print('conv_shape...:'+str(len(conf)))
    return [confs,locs]

if name == 'main':
model = ssd(num_classes=21)
model.build(input_shape = (1,300,300,3))
model.summary()

i find the params are same to your model,and the shape is same too.but cant recognized the target correctly last.