No gradients provided for any variable: ['cnn_model_2/conv2d_6/kernel:0', 'cnn_model_2/conv2d_6/bias:0', 'cnn_model_2/conv2d_7/kernel:0', 'cnn_model_2/conv2d_7/bias:0', 'cnn_model_2/dense_4/kernel:0', 'cnn_model_2/dense_4/bias:0', 'cnn_model_2/dense_5/kernel:0', 'cnn_model_2/dense_5/bias:0'].
SlowMonk opened this issue · 0 comments
ValueError Traceback (most recent call last)
in ()in train(train_dataset, train_labels_dataset, epochs)
9 labels = label_batch
10 print('logits->',logits.shape , 'labels->',labels.shape)
---> 11 train_step(logits,labels)
12
13 if (epoch + 1) % 15 == 0:in train_step(logits, labels)
12 #loss = cross_entropy(labels,tf.argmax(logits,axis=1))
13 gradient_of_cnn = cnn_tape.gradient(loss,model.trainable_variables)
---> 14 cnn_optimizer.apply_gradients(zip(gradient_of_cnn,model.trainable_variables))~/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py in apply_gradients(self, grads_and_vars, name)
394 ValueError: If none of the variables have gradients.
395 """
--> 396 grads_and_vars = filter_grads(grads_and_vars)
397 var_list = [v for (, v) in grads_and_vars]
398~/anaconda3/lib/python3.7/site-packages/tensorflow/python/keras/optimizer_v2/optimizer_v2.py in _filter_grads(grads_and_vars)
922 if not filtered:
923 raise ValueError("No gradients provided for any variable: %s." %
--> 924 ([v.name for _, v in grads_and_vars],))
925 if vars_with_empty_grads:
926 logging.warning(ValueError: No gradients provided for any variable: ['cnn_model_2/conv2d_6/kernel:0', 'cnn_model_2/conv2d_6/bias:0', 'cnn_model_2/conv2d_7/kernel:0', 'cnn_model_2/conv2d_7/bias:0', 'cnn_model_2/dense_4/kernel:0', 'cnn_model_2/dense_4/bias:0', 'cnn_model_2/dense_5/kernel:0', 'cnn_model_2/dense_5/bias:0'].
i```
nput_shape = (28,28.1)
class cnn_model(tf.keras.Model):
def __init__(self,inputs=(28,28,1)):
super(cnn_model,self).__init__()
#self.conv1 = layers.Conv2D(32,(3,3),activation='relu',input_shape= input_shape)
self.conv1 = layers.Conv2D(32, 3, 3, padding='same', activation='relu')
self.maxpool = layers.MaxPool2D((2,2))
self.conv2 = layers.Conv2D(64,(3,3),activation ='relu')
self.conv3 = layers.Conv2D(128,(3,3),activation='relu')
self.flatten = layers.Flatten()
self.dense64 = layers.Dense(64,activation='relu')
self.dense10 = layers.Dense(10,activation='relu')
self.dropout = layers.Dropout(0.25)
def call(self,x):
x = self.conv1(x)
x = self.conv2(x)
x = self.maxpool(x)
x = self.dropout(x)
x = self.flatten(x)
x = self.dense64(x)
x = self.dense10(x)
return x
#loss = tf.losses.mean_squared_error(labels,logits)
cnn_optimizer = tf.optimizers.Adam(1e-4)
#optimizer = tf.train. (learning_rate=0.001)
#print(loss)
checkpoint_dir = './training_checkpoints'
checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
checkpoint = tf.train.Checkpoint(cnn_optimizer=cnn_optimizer)
# Notice the use of `tf.function`
# This annotation causes the function to be "compiled".
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
#@tf.function
def train_step(logits,labels):
with tf.GradientTape() as cnn_tape:
print("train_step")
loss = tf.losses.mean_squared_error(labels,tf.argmax(logits,axis=1))
#loss = tf.losses.BinaryCrossentropy(multi_class_labels=labels, logits=logits)
#loss = cross_entropy(labels,tf.argmax(logits,axis=1))
gradient_of_cnn = cnn_tape.gradient(loss,model.trainable_variables)
cnn_optimizer.apply_gradients(zip(gradient_of_cnn,model.trainable_variables))
def train(train_dataset,train_labels_dataset,epochs):
for epoch in range(epochs):
start = time.time()
for train_batch,label_batch in zip(train_dataset,train_labels_dataset):
print(train_batch.shape,label_batch.shape)
#logits = tf.argmax(model(train_batch),axis=1)
logits = model(train_batch)
labels = label_batch
print('logits->',logits.shape , 'labels->',labels.shape)
train_step(logits,labels)
if (epoch + 1) % 15 == 0:
checkpoint.save(file_prefix = checkpoint_prefix)
print ('Time for epoch {} is {} sec'.format(epoch + 1, time.time()-start))
%%time
train(train_dataset,train_labels_dataset, EPOCHS)