No data provided for "input_1"
Opened this issue · 1 comments
GKalliatakis commented
I've tried to replicate your build_classification_model
using the following code
def build_default_classification_model():
"""Instantiates the initial classification model
"""
input_shape = [IMG_DIM, IMG_DIM, NUM_BANDS]
img_input = Input(shape=input_shape)
# Block 1
x = Conv2D(filters=32, kernel_size=3, strides=(1, 1), padding='same',
kernel_regularizer=l2(0.0002),
activation='relu', name='droughtwatch_block1_conv1')(img_input)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="droughtwatch_block1_pool", padding='valid')(x)
# Block 2
x = Conv2D(filters=64, kernel_size=3, strides=(1, 1), padding='same',
kernel_regularizer=l2(0.0002),
activation='relu', name='droughtwatch_block2_conv1')(x)
x = Conv2D(filters=64, kernel_size=3, strides=(1, 1), padding='same',
kernel_regularizer=l2(0.0002),
activation='relu', name='droughtwatch_block2_conv2')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="droughtwatch_block2_pool", padding='valid')(x)
x = Dropout(0.2, name='droughtwatch_drop_block2')(x)
# Block 3
x = Conv2D(filters=64, kernel_size=3, strides=(1, 1), padding='same',
kernel_regularizer=l2(0.0002),
activation='relu', name='droughtwatch_block3_conv1')(x)
x = Conv2D(filters=64, kernel_size=3, strides=(1, 1), padding='same',
kernel_regularizer=l2(0.0002),
activation='relu', name='droughtwatch_block3_conv2')(x)
x = MaxPooling2D(pool_size=(2, 2), strides=(2, 2), name="droughtwatch_block3_pool", padding='valid')(x)
x = Dropout(0.2, name='droughtwatch_drop_block3')(x)
x = Flatten(name='droughtwatch_flatten')(x)
# Classification block
x = Dense(128, activation='relu', name='droughtwatch_fc1')(x)
x = Dense(50, activation='relu', name='droughtwatch_fc2')(x)
x = Dense(NUM_CLASSES, activation='softmax', name='droughtwatch_predictions')(x)
# Create model
model = Model(img_input, x)
# set up optimizer
model.compile(loss=tf.keras.losses.categorical_crossentropy,
optimizer=optimizers.Adam(),
metrics=['accuracy'])
return model
And then fit the data using the following:
def train_convnet(data_path,
epochs,
batch_size=64,
num_train=86317,
num_val=10778):
# load training data in TFRecord format
train_tfrecords, val_tfrecords = load_data(data_path)
# load images and labels from TFRecords
train_images, train_labels = parse_tfrecords(train_tfrecords, batch_size, num_train)
val_images, val_labels = parse_tfrecords(val_tfrecords, batch_size, num_val)
model = build_default_classification_model()
# number of steps per epoch is the total data size divided by the batch size
train_steps_per_epoch = int(math.floor(float(NUM_TRAIN) / float(batch_size)))
val_steps_per_epoch = int(math.floor(float(NUM_VAL) / float(batch_size)))
model.fit(train_images, train_labels,
steps_per_epoch=train_steps_per_epoch,
epochs=epochs,
class_weight=class_weights_matrix(),
validation_data=(val_images, val_labels),
validation_steps=val_steps_per_epoch)
But I end up with the following error:
in standardize_input_data
'for each key in: ' + str(names))
ValueError: No data provided for "input_1". Need data for each key in: ['input_1']
Any idea what's wrong with this?
GKalliatakis commented
Update on my question above: training works ok when the sequential model is used instead, but still no with the functional model I shared above.