Variable Encoder/enc_h0_lin/weights already exists, disallowed. Did you mean to set reuse=True or reuse=tf.AUTO_REUSE in VarScope? Originally defined at:
Rohit-Satyam opened this issue · 1 comments
Rohit-Satyam commented
When I run scDreamer more than once I get the following error. I have to restart my notebook in order to get around it. Is there another way to prevent this error from occuring?
reading data
Data set to work on:
[[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
...
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]
[0. 0. 0. ... 0. 0. 0.]]
(55510, 2000)
[[1. 0. 0.]
[1. 0. 0.]
[1. 0. 0.]
...
[0. 1. 0.]
[0. 1. 0.]
[0. 1. 0.]]
(55510, 3)
encoder input shape Tensor("concat_4:0", shape=(?, 2003), dtype=float32)
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
Cell In[9], line 10
7 name = "pf_unsupervised_rnanorm"
8 with tf.Session(config = run_config) as sess:
---> 10 dreamer = model.scDREAMER(
11 sess,
12 epoch = 300,
13 dataset_name = 'adata_bi_norm.h5ad',
14 batch = 'batch',
15 cell_type = 'labels.stage',
16 name = name
17 )
19 dreamer.train_cluster()
File ~/Documents/zena_scrnaseq_singleR/extra_analysis_June2023/scDREAMER/scDREAMER/src/model.py:583, in scDREAMER.__init__(self, sess, batch, cell_type, name, epoch, lr, beta1, batch_size, X_dim, z_dim, dataset_name, checkpoint_dir, sample_dir, result_dir, num_layers, g_h_dim, d_h_dim, gen_activation, leak, keep_param, trans, is_bn, g_iter, lam, sampler)
578 self.total_size = self.test_size
580 #print("Shape self.data_train:", shape(self.data_train))
581 #print("Shape self.data_test:", shape(self.data_test))
--> 583 self.build_model()
File ~/Documents/zena_scrnaseq_singleR/extra_analysis_June2023/scDREAMER/scDREAMER/src/model.py:34, in build_model(self)
31 print('encoder input shape ',self.enc_input)
33 # AJ: Encoder output...
---> 34 self.encoder_output, self.z_post_m, self.z_post_v, self.l_post_m, self.l_post_v = self.encoder(self.enc_input) # self.x_input
35 self.encoder_output_, self.z_post_m_, self.z_post_v_, self.l_post_m_, self.l_post_v_ = self.encoder(self.enc_input_, reuse = True)
37 self.expression = self.x_input
File ~/Documents/zena_scrnaseq_singleR/extra_analysis_June2023/scDREAMER/scDREAMER/src/model.py:328, in encoder(self, x, reuse)
323 l_post_v = tf.exp(dense(h, self.z_dim, 1, name='enc_l_post_v' + str(self.num_layers) + '_lin'))
326 else:
--> 328 h = tf.nn.dropout(lrelu(dense(x, self.X_dim + self.N_batch, self.g_h_dim[0], name='enc_h0_lin'), alpha=self.leak),
329 keep_prob=self.keep_prob)
331 for i in range(1, self.num_layers):
333 h = tf.nn.dropout(lrelu(dense(h, self.g_h_dim[i - 1], self.g_h_dim[i], name='enc_h' + str(i) + '_lin'),
334 alpha=self.leak), keep_prob=self.keep_prob)
File ~/Documents/zena_scrnaseq_singleR/extra_analysis_June2023/scDREAMER/scDREAMER/src/utils.py:97, in dense(x, inp_dim, out_dim, name)
94 def dense(x, inp_dim, out_dim, name = 'dense'):
96 with tf.variable_scope(name, reuse=None): # earlier only tf
---> 97 weights = tf.get_variable("weights", shape=[inp_dim, out_dim],
98 initializer = #tf2.keras.initializers.glorot_uniform(seed = 0))
99 tf2.initializers.GlorotUniform()) # contrib: tf.contrib.layers.xavier_initializer()
101 bias = tf.get_variable("bias", shape=[out_dim], initializer = tf.constant_initializer(0.0))
103 # initializer= tf2.initializers.GlorotUniform(); same as Xavier's initializer; tf.contrib.layers.xavier_initializer()
File ~/miniconda3/envs/scdreamer/lib/python3.9/site-packages/tensorflow/python/ops/variable_scope.py:1617, in get_variable(name, shape, dtype, initializer, regularizer, trainable, collections, caching_device, partitioner, validate_shape, use_resource, custom_getter, constraint, synchronization, aggregation)
1601 @tf_export(v1=["get_variable"])
1602 def get_variable(name,
1603 shape=None,
(...)
1615 synchronization=VariableSynchronization.AUTO,
1616 aggregation=VariableAggregation.NONE):
-> 1617 return get_variable_scope().get_variable(
1618 _get_default_variable_store(),
1619 name,
1620 shape=shape,
1621 dtype=dtype,
1622 initializer=initializer,
1623 regularizer=regularizer,
1624 trainable=trainable,
1625 collections=collections,
1626 caching_device=caching_device,
1627 partitioner=partitioner,
1628 validate_shape=validate_shape,
1629 use_resource=use_resource,
1630 custom_getter=custom_getter,
1631 constraint=constraint,
1632 synchronization=synchronization,
1633 aggregation=aggregation)
File ~/miniconda3/envs/scdreamer/lib/python3.9/site-packages/tensorflow/python/ops/variable_scope.py:1327, in VariableScope.get_variable(self, var_store, name, shape, dtype, initializer, regularizer, reuse, trainable, collections, caching_device, partitioner, validate_shape, use_resource, custom_getter, constraint, synchronization, aggregation)
1325 if dtype is None:
1326 dtype = self._dtype
-> 1327 return var_store.get_variable(
1328 full_name,
1329 shape=shape,
1330 dtype=dtype,
1331 initializer=initializer,
1332 regularizer=regularizer,
1333 reuse=reuse,
1334 trainable=trainable,
1335 collections=collections,
1336 caching_device=caching_device,
1337 partitioner=partitioner,
1338 validate_shape=validate_shape,
1339 use_resource=use_resource,
1340 custom_getter=custom_getter,
1341 constraint=constraint,
1342 synchronization=synchronization,
1343 aggregation=aggregation)
File ~/miniconda3/envs/scdreamer/lib/python3.9/site-packages/tensorflow/python/ops/variable_scope.py:583, in _VariableStore.get_variable(self, name, shape, dtype, initializer, regularizer, reuse, trainable, collections, caching_device, partitioner, validate_shape, use_resource, custom_getter, constraint, synchronization, aggregation)
581 return custom_getter(**custom_getter_kwargs)
582 else:
--> 583 return _true_getter(
584 name,
585 shape=shape,
586 dtype=dtype,
587 initializer=initializer,
588 regularizer=regularizer,
589 reuse=reuse,
590 trainable=trainable,
591 collections=collections,
592 caching_device=caching_device,
593 partitioner=partitioner,
594 validate_shape=validate_shape,
595 use_resource=use_resource,
596 constraint=constraint,
597 synchronization=synchronization,
598 aggregation=aggregation)
File ~/miniconda3/envs/scdreamer/lib/python3.9/site-packages/tensorflow/python/ops/variable_scope.py:536, in _VariableStore.get_variable.<locals>._true_getter(name, shape, dtype, initializer, regularizer, reuse, trainable, collections, caching_device, partitioner, validate_shape, use_resource, constraint, synchronization, aggregation)
530 if "%s/part_0" % name in self._vars:
531 raise ValueError(
532 "No partitioner was provided, but a partitioned version of the "
533 "variable was found: %s/part_0. Perhaps a variable of the same "
534 "name was already created with partitioning?" % name)
--> 536 return self._get_single_variable(
537 name=name,
538 shape=shape,
539 dtype=dtype,
540 initializer=initializer,
541 regularizer=regularizer,
542 reuse=reuse,
543 trainable=trainable,
544 collections=collections,
545 caching_device=caching_device,
546 validate_shape=validate_shape,
547 use_resource=use_resource,
548 constraint=constraint,
549 synchronization=synchronization,
550 aggregation=aggregation)
File ~/miniconda3/envs/scdreamer/lib/python3.9/site-packages/tensorflow/python/ops/variable_scope.py:899, in _VariableStore._get_single_variable(self, name, shape, dtype, initializer, regularizer, partition_info, reuse, trainable, collections, caching_device, validate_shape, use_resource, constraint, synchronization, aggregation)
894 # Throw away internal tf entries and only take a few lines. In some
895 # cases the traceback can be longer (e.g. if someone uses factory
896 # functions to create variables) so we take more than needed in the
897 # default case.
898 tb = [x for x in tb if "tensorflow/python" not in x[0]][:5]
--> 899 raise ValueError("%s Originally defined at:\n\n%s" %
900 (err_msg, "".join(traceback.format_list(tb))))
901 found_var = self._vars[name]
902 if not shape.is_compatible_with(found_var.get_shape()):
ValueError: Variable Encoder/enc_h0_lin/weights already exists, disallowed. Did you mean to set reuse=True or reuse=tf.AUTO_REUSE in VarScope? Originally defined at:
File "/home/subudhak/Documents/zena_scrnaseq_singleR/extra_analysis_June2023/scDREAMER/scDREAMER/src/utils.py", line 97, in dense
weights = tf.get_variable("weights", shape=[inp_dim, out_dim],
File "/home/subudhak/Documents/zena_scrnaseq_singleR/extra_analysis_June2023/scDREAMER/scDREAMER/src/model.py", line 328, in encoder
h = tf.nn.dropout(lrelu(dense(x, self.X_dim + self.N_batch, self.g_h_dim[0], name='enc_h0_lin'), alpha=self.leak),
File "/home/subudhak/Documents/zena_scrnaseq_singleR/extra_analysis_June2023/scDREAMER/scDREAMER/src/model.py", line 34, in build_model
self.encoder_output, self.z_post_m, self.z_post_v, self.l_post_m, self.l_post_v = self.encoder(self.enc_input) # self.x_input
File "/home/subudhak/Documents/zena_scrnaseq_singleR/extra_analysis_June2023/scDREAMER/scDREAMER/src/model.py", line 583, in __init__
self.build_model()
File "/tmp/ipykernel_3218285/2234130666.py", line 10, in <module>
dreamer = model.scDREAMER(
ajitashree commented
Hi Rohit, It is important to restart the runtime for every run so that new weights can be learned for Encoder-Decoder networks.