Discrete() produces floats occasionally
dangpzanco opened this issue · 5 comments
The parameter sherpa.Discrete('num_units', [32, 128])
can produce floats after some trials.
To reproduce, install sherpa (!pip3 install parameter-sherpa gpyopt flask
) and run the mnist_mlp.ipynb
example on Google Colab.
Error output:
INFO:GP:initializing Y
INFO:GP:initializing inference method
INFO:GP:adding kernel and likelihood as parameters
WARNING:rbf:reconstraining parameters GP_regression.rbf
WARNING:variance:reconstraining parameters GP_regression.Gaussian_noise.variance
Trial 4: {'num_units': 125.0, 'activation': 'tanh'}
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-13-d3ff5f70567f> in <module>()
5
6 model = Sequential()
----> 7 model.add(Dense(units=trial.parameters['num_units'], activation=trial.parameters['activation'], input_dim=784))
8 model.add(Dense(units=10, activation='softmax'))
9 model.compile(loss='categorical_crossentropy',
10 frames
/usr/local/lib/python3.6/dist-packages/keras/engine/sequential.py in add(self, layer)
163 # and create the node connecting the current layer
164 # to the input layer we just created.
--> 165 layer(x)
166 set_inputs = True
167 else:
/usr/local/lib/python3.6/dist-packages/keras/engine/base_layer.py in __call__(self, inputs, **kwargs)
429 'You can build it manually via: '
430 '`layer.build(batch_input_shape)`')
--> 431 self.build(unpack_singleton(input_shapes))
432 self.built = True
433
/usr/local/lib/python3.6/dist-packages/keras/layers/core.py in build(self, input_shape)
864 name='kernel',
865 regularizer=self.kernel_regularizer,
--> 866 constraint=self.kernel_constraint)
867 if self.use_bias:
868 self.bias = self.add_weight(shape=(self.units,),
/usr/local/lib/python3.6/dist-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
89 warnings.warn('Update your `' + object_name + '` call to the ' +
90 'Keras 2 API: ' + signature, stacklevel=2)
---> 91 return func(*args, **kwargs)
92 wrapper._original_function = func
93 return wrapper
/usr/local/lib/python3.6/dist-packages/keras/engine/base_layer.py in add_weight(self, name, shape, dtype, initializer, regularizer, trainable, constraint)
247 if dtype is None:
248 dtype = K.floatx()
--> 249 weight = K.variable(initializer(shape),
250 dtype=dtype,
251 name=name,
/usr/local/lib/python3.6/dist-packages/keras/initializers.py in __call__(self, shape, dtype)
216 limit = np.sqrt(3. * scale)
217 return K.random_uniform(shape, -limit, limit,
--> 218 dtype=dtype, seed=self.seed)
219
220 def get_config(self):
/usr/local/lib/python3.6/dist-packages/keras/backend/tensorflow_backend.py in random_uniform(shape, minval, maxval, dtype, seed)
4137 seed = np.random.randint(10e6)
4138 return tf.random_uniform(shape, minval=minval, maxval=maxval,
-> 4139 dtype=dtype, seed=seed)
4140
4141
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/random_ops.py in random_uniform(shape, minval, maxval, dtype, seed, name)
245 shape, minval, maxval, seed=seed1, seed2=seed2, name=name)
246 else:
--> 247 rnd = gen_random_ops.random_uniform(shape, dtype, seed=seed1, seed2=seed2)
248 return math_ops.add(rnd * (maxval - minval), minval, name=name)
249
/usr/local/lib/python3.6/dist-packages/tensorflow/python/ops/gen_random_ops.py in random_uniform(shape, dtype, seed, seed2, name)
775 _, _, _op = _op_def_lib._apply_op_helper(
776 "RandomUniform", shape=shape, dtype=dtype, seed=seed, seed2=seed2,
--> 777 name=name)
778 _result = _op.outputs[:]
779 _inputs_flat = _op.inputs
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py in _apply_op_helper(self, op_type_name, name, **keywords)
608 _SatisfiesTypeConstraint(base_type,
609 _Attr(op_def, input_arg.type_attr),
--> 610 param_name=input_name)
611 attrs[input_arg.type_attr] = attr_value
612 inferred_from[input_arg.type_attr] = input_name
/usr/local/lib/python3.6/dist-packages/tensorflow/python/framework/op_def_library.py in _SatisfiesTypeConstraint(dtype, attr_def, param_name)
58 "allowed values: %s" %
59 (param_name, dtypes.as_dtype(dtype).name,
---> 60 ", ".join(dtypes.as_dtype(x).name for x in allowed_list)))
61
62
TypeError: Value passed to parameter 'shape' has DataType float32 not in list of allowed values: int32, int64
I have the same problem when using GPyOpt as algorithm
This error occurs with me too. You can change the float to an integer as temporary solution. After that, the code can be executed.
Example:
model.add(Dense(units=int(trial.parameters['num_units']), activation=trial.parameters['activation'], input_dim=784))
Hi and apologies for the delay on this. It seems like this problem only occurred for the version on PyPi? I have checked the master branch and added a test and it seems to work fine. I have accordingly updated the code on PyPi, so if you update using pip
the updates should be installed. Would be great if you could check. Otherwise I'll close this issue here in a week or so.
At the moment I can't test it properly because I can't reach the dashboard. See: #59
Yes, it works now!