nilmtk/nilmtk-contrib

API Error - Neural Net 2 input tensors

rpolea opened this issue · 5 comments

Hi,

I've been trying out some of the neural nets from the API documentation and I get the same error no matter which dataset I am using or what example I use. Currently tearing my hair out!

I have converted the REDD dataset to HDF5 using the documentation and then tried to feed it into the API as below. I've also pasted the error below.

from nilmtk_contrib.disaggregate import DAE,Seq2Point, Seq2Seq, RNN, WindowGRU
redd = {
'power': {
'mains': ['apparent','active'],
'appliance': ['apparent','active']
},
'sample_rate': 60,
'appliances': ['fridge'],
'methods': {
'WindowGRU':WindowGRU({'n_epochs':50,'batch_size':32}),
'RNN':RNN({'n_epochs':50,'batch_size':32}),
'DAE':DAE({'n_epochs':50,'batch_size':32}),
'Seq2Point':Seq2Point({'n_epochs':50,'batch_size':32}),
'Seq2Seq':Seq2Seq({'n_epochs':50,'batch_size':32}),
'Mean': Mean({}),
},
'train': {
'datasets': {
'REDD': {
'path': '/home/rpolea/redd_test.h5',
'buildings': {
1: {
'start_time': '2011-04-18',
'end_time': '2011-04-28'
},
}

		}
		}
},
'test': {
'datasets': {
	'REDD': {
		'path': '/home/rpolea/redd_test.h5',
		'buildings': {
			1: {
				'start_time': '2011-05-01',
				'end_time': '2011-05-03'
			},
		}
}

},
'metrics':['mae']
}
}

Started training for WindowGRU
Joint training for WindowGRU
............... Loading Data for training ...................
Loading data for REDD dataset
Loading building ... 1
Loading data for meter ElecMeterID(instance=2, building=1, dataset='REDD')
Done loading data all meters for this chunk.
Dropping missing values
Training processing
First model training for fridge
Epoch 1/50
358/358 [==============================] - ETA: 0s - loss: 0.0116


ValueError Traceback (most recent call last)
in
----> 1 API(redd)

/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/nilmtk/api.py in init(self, params)
44 self.DROP_ALL_NANS = params.get("DROP_ALL_NANS", True)
45 self.site_only = params.get('site_only',False)
---> 46 self.experiment()
47
48

/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/nilmtk/api.py in experiment(self)
89 else:
90 print ("Joint training for ",clf.MODEL_NAME)
---> 91 self.train_jointly(clf,d)
92
93 print ("Finished training for ",clf.MODEL_NAME)

/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/nilmtk/api.py in train_jointly(self, clf, d)
238 self.train_submeters = appliance_readings
239
--> 240 clf.partial_fit(self.train_mains,self.train_submeters)
241
242

/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/nilmtk_contrib/disaggregate/WindowGRU.py in partial_fit(self, train_main, train_appliances, do_preprocessing, **load_kwargs)
70 checkpoint = ModelCheckpoint(filepath,monitor='val_loss',verbose=1,save_best_only=True,mode='min')
71 train_x, v_x, train_y, v_y = train_test_split(mains, app_reading, test_size=.15,random_state=10)
---> 72 model.fit(train_x,train_y,validation_data=[v_x,v_y],epochs=self.n_epochs,callbacks=[checkpoint],shuffle=True,batch_size=self.batch_size)
73 model.load_weights(filepath)
74

/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
1139 workers=workers,
1140 use_multiprocessing=use_multiprocessing,
-> 1141 return_dict=True)
1142 val_logs = {'val_' + name: val for name, val in val_logs.items()}
1143 epoch_logs.update(val_logs)

/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py in evaluate(self, x, y, batch_size, verbose, sample_weight, steps, callbacks, max_queue_size, workers, use_multiprocessing, return_dict)
1387 with trace.Trace('test', step_num=step, _r=1):
1388 callbacks.on_test_batch_begin(step)
-> 1389 tmp_logs = self.test_function(iterator)
1390 if data_handler.should_sync:
1391 context.async_wait()

/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in call(self, *args, **kwds)
826 tracing_count = self.experimental_get_tracing_count()
827 with trace.Trace(self._name) as tm:
--> 828 result = self._call(*args, **kwds)
829 compiler = "xla" if self._experimental_compile else "nonXla"
830 new_tracing_count = self.experimental_get_tracing_count()

/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in _call(self, *args, **kwds)
869 # This is the first call of call, so we have to initialize.
870 initializers = []
--> 871 self._initialize(args, kwds, add_initializers_to=initializers)
872 finally:
873 # At this point we know that the initialization is complete (or less

/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in _initialize(self, args, kwds, add_initializers_to)
724 self._concrete_stateful_fn = (
725 self._stateful_fn._get_concrete_function_internal_garbage_collected( # pylint: disable=protected-access
--> 726 *args, **kwds))
727
728 def invalid_creator_scope(*unused_args, **unused_kwds):

/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/tensorflow/python/eager/function.py in _get_concrete_function_internal_garbage_collected(self, *args, **kwargs)
2967 args, kwargs = None, None
2968 with self._lock:
-> 2969 graph_function, _ = self._maybe_define_function(args, kwargs)
2970 return graph_function
2971

/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/tensorflow/python/eager/function.py in _maybe_define_function(self, args, kwargs)
3359
3360 self._function_cache.missed.add(call_context_key)
-> 3361 graph_function = self._create_graph_function(args, kwargs)
3362 self._function_cache.primary[cache_key] = graph_function
3363

/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/tensorflow/python/eager/function.py in _create_graph_function(self, args, kwargs, override_flat_arg_shapes)
3204 arg_names=arg_names,
3205 override_flat_arg_shapes=override_flat_arg_shapes,
-> 3206 capture_by_value=self._capture_by_value),
3207 self._function_attributes,
3208 function_spec=self.function_spec,

/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/tensorflow/python/framework/func_graph.py in func_graph_from_py_func(name, python_func, args, kwargs, signature, func_graph, autograph, autograph_options, add_control_dependencies, arg_names, op_return_value, collections, capture_by_value, override_flat_arg_shapes)
988 _, original_func = tf_decorator.unwrap(python_func)
989
--> 990 func_outputs = python_func(*func_args, **func_kwargs)
991
992 # invariant: func_outputs contains only Tensors, CompositeTensors,

/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/tensorflow/python/eager/def_function.py in wrapped_fn(*args, **kwds)
632 xla_context.Exit()
633 else:
--> 634 out = weak_wrapped_fn().wrapped(*args, **kwds)
635 return out
636

/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/tensorflow/python/framework/func_graph.py in wrapper(*args, **kwargs)
975 except Exception as e: # pylint:disable=broad-except
976 if hasattr(e, "ag_error_metadata"):
--> 977 raise e.ag_error_metadata.to_exception(e)
978 else:
979 raise

ValueError: in user code:

/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py:1233 test_function  *
    return step_function(self, iterator)
/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py:1224 step_function  **
    outputs = model.distribute_strategy.run(run_step, args=(data,))
/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/tensorflow/python/distribute/distribute_lib.py:1259 run
    return self._extended.call_for_each_replica(fn, args=args, kwargs=kwargs)
/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/tensorflow/python/distribute/distribute_lib.py:2730 call_for_each_replica
    return self._call_for_each_replica(fn, args, kwargs)
/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/tensorflow/python/distribute/distribute_lib.py:3417 _call_for_each_replica
    return fn(*args, **kwargs)
/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py:1217 run_step  **
    outputs = model.test_step(data)
/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/tensorflow/python/keras/engine/training.py:1183 test_step
    y_pred = self(x, training=False)
/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/tensorflow/python/keras/engine/base_layer.py:998 __call__
    input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
/usr/local/miniconda/envs/nilm/lib/python3.7/site-packages/tensorflow/python/keras/engine/input_spec.py:207 assert_input_compatibility
    ' input tensors. Inputs received: ' + str(inputs))

ValueError: Layer sequential_11 expects 1 input(s), but it received 2 input tensors. Inputs received: [<tf.Tensor 'IteratorGetNext:0' shape=(None, 99, 1) dtype=float32>, <tf.Tensor 'IteratorGetNext:1' shape=(None, 1) dtype=float32>]

@hetvi0609 can you try to replicate this?

@rpolea can you provide the versions for Tensorflow, Keras, etc.?

@rpolea Hi! Can you try the code again by changing the following line in eg:- WindowGRU.py, rnn.py etc:-

model.fit(train_x,train_y,validation_data=(v_x,v_y),epochs=self.n_epochs,callbacks=[checkpoint],shuffle=True,batch_size=self.batch_size)

You need to add the validation data as a tuple rather than a list. I think this will solve your problem. I have implemented it again and was able to get the results.

redd = {
  'power': {
    'mains': ['apparent','active'],
    'appliance': ['apparent','active']
  },
  'sample_rate': 60,

  'appliances': ['fridge'],
   
  'methods': {
    'WindowGRU':WindowGRU({'n_epochs':50,'batch_size':32}),
    'RNN':RNN({'n_epochs':50,'batch_size':32}),
          
  },
   'train': {    
    'datasets': {
            'REDD': {
                'path': '/home/hetvi.shastri/redd.h5',
				'buildings': {
                    1: {
                    'start_time': '2011-04-18',
                    'end_time': '2011-04-28'
                    },
                                       
				}
				                
			}
			}
	},
	'test': {
	'datasets': {
		'REDD': {
			'path': '/home/hetvi.shastri/redd.h5',
			'buildings': {
                    1: {
                        'start_time': '2011-05-01',
                        'end_time': '2011-05-03'
                    },  

			}
	}
},
        'metrics':['mae']
}
}

Output

Joint Testing for all algorithms
Loading data for  REDD  dataset
Loading data for meter ElecMeterID(instance=2, building=1, dataset='REDD')     
Done loading data all meters for this chunk.
Dropping missing values
Generating predictions for : WindowGRU
Generating predictions for : RNN
............  mae  ..............
        WindowGRU        RNN
fridge  15.912712  16.804665

Hi @hetvi0609 ,

Thank you for getting back to me with a solution. I made the changes as you suggested and the model now runs however when the model finishes running I get another error.

`---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
in
----> 1 API(redd)

~\Anaconda3\envs\nilm\lib\site-packages\nilmtk\api.py in init(self, params)
44 self.DROP_ALL_NANS = params.get("DROP_ALL_NANS", True)
45 self.site_only = params.get('site_only',False)
---> 46 self.experiment()
47
48

~\Anaconda3\envs\nilm\lib\site-packages\nilmtk\api.py in experiment(self)
89 else:
90 print ("Joint training for ",clf.MODEL_NAME)
---> 91 self.train_jointly(clf,d)
92
93 print ("Finished training for ",clf.MODEL_NAME)

~\Anaconda3\envs\nilm\lib\site-packages\nilmtk\api.py in train_jointly(self, clf, d)
238 self.train_submeters = appliance_readings
239
--> 240 clf.partial_fit(self.train_mains,self.train_submeters)
241
242

~\Anaconda3\envs\nilm\lib\site-packages\nilmtk_contrib\disaggregate\WindowGRU.py in partial_fit(self, train_main, train_appliances, do_preprocessing, **load_kwargs)
71 train_x, v_x, train_y, v_y = train_test_split(mains, app_reading, test_size=.15,random_state=10)
72 model.fit(train_x,train_y,validation_data=(v_x,v_y),epochs=self.n_epochs,callbacks=[checkpoint],shuffle=True,batch_size=self.batch_size)
---> 73 model.load_weights(filepath)
74
75

~\Anaconda3\envs\nilm\lib\site-packages\keras\engine\saving.py in load_wrapper(*args, **kwargs)
490 os.remove(tmp_filepath)
491 return res
--> 492 return load_function(*args, **kwargs)
493
494 return load_wrapper

~\Anaconda3\envs\nilm\lib\site-packages\keras\engine\network.py in load_weights(self, filepath, by_name, skip_mismatch, reshape)
1228 else:
1229 saving.load_weights_from_hdf5_group(
-> 1230 f, self.layers, reshape=reshape)
1231 if hasattr(f, 'close'):
1232 f.close()

~\Anaconda3\envs\nilm\lib\site-packages\keras\engine\saving.py in load_weights_from_hdf5_group(f, layers, reshape)
1181 """
1182 if 'keras_version' in f.attrs:
-> 1183 original_keras_version = f.attrs['keras_version'].decode('utf8')
1184 else:
1185 original_keras_version = '1'

AttributeError: 'str' object has no attribute 'decode'`

@rpolea
Please refer to the issue #56

Hi,

Sorry for the delay but wanted to say I've done as you've suggested and now all the workbooks are running. Thank you so much for you help!