AttributeError: 'TimeMixer' object has no attribute 'pred_len'
carusyte opened this issue · 2 comments
carusyte commented
What happened + What you expected to happen
Fitting TimeMixer model with channel_independence=1
throws captioned error. Error stack:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
Cell In[28], line 1
----> 1 nf.fit(
2 df_train,
3 use_init_models=True,
4 val_size=200,
5 )
File ~/ProgramData/git/neuralforecast/neuralforecast/core.py:544, in NeuralForecast.fit(self, df, static_df, val_size, sort_df, use_init_models, verbose, id_col, time_col, target_col, distributed_config)
541 self._reset_models()
543 for i, model in enumerate(self.models):
--> 544 self.models[i] = model.fit(
545 self.dataset, val_size=val_size, distributed_config=distributed_config
546 )
548 self._fitted = True
File ~/ProgramData/git/neuralforecast/neuralforecast/common/_base_multivariate.py:556, in BaseMultivariate.fit(self, dataset, val_size, test_size, random_seed, distributed_config)
552 if distributed_config is not None:
553 raise ValueError(
554 "multivariate models cannot be trained using distributed data parallel."
555 )
--> 556 return self._fit(
557 dataset=dataset,
558 batch_size=self.n_series,
559 valid_batch_size=self.n_series,
560 val_size=val_size,
561 test_size=test_size,
562 random_seed=random_seed,
563 shuffle_train=False,
564 distributed_config=None,
565 )
File ~/ProgramData/git/neuralforecast/neuralforecast/common/_base_model.py:356, in BaseModel._fit(self, dataset, batch_size, valid_batch_size, val_size, test_size, random_seed, shuffle_train, distributed_config)
354 model = self
355 trainer = pl.Trainer(**model.trainer_kwargs)
--> 356 trainer.fit(model, datamodule=datamodule)
357 model.metrics = trainer.callback_metrics
358 model.__dict__.pop("_trainer", None)
File ~/.pyenv/versions/3.12.2/envs/venv_3.12.2/lib/python3.12/site-packages/pytorch_lightning/trainer/trainer.py:543, in Trainer.fit(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
541 self.state.status = TrainerStatus.RUNNING
542 self.training = True
--> 543 call._call_and_handle_interrupt(
544 self, self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path
545 )
File ~/.pyenv/versions/3.12.2/envs/venv_3.12.2/lib/python3.12/site-packages/pytorch_lightning/trainer/call.py:44, in _call_and_handle_interrupt(trainer, trainer_fn, *args, **kwargs)
42 if trainer.strategy.launcher is not None:
43 return trainer.strategy.launcher.launch(trainer_fn, *args, trainer=trainer, **kwargs)
---> 44 return trainer_fn(*args, **kwargs)
46 except _TunerExitException:
47 _call_teardown_hook(trainer)
File ~/.pyenv/versions/3.12.2/envs/venv_3.12.2/lib/python3.12/site-packages/pytorch_lightning/trainer/trainer.py:579, in Trainer._fit_impl(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
572 assert self.state.fn is not None
573 ckpt_path = self._checkpoint_connector._select_ckpt_path(
574 self.state.fn,
575 ckpt_path,
576 model_provided=True,
577 model_connected=self.lightning_module is not None,
578 )
--> 579 self._run(model, ckpt_path=ckpt_path)
581 assert self.state.stopped
582 self.training = False
File ~/.pyenv/versions/3.12.2/envs/venv_3.12.2/lib/python3.12/site-packages/pytorch_lightning/trainer/trainer.py:986, in Trainer._run(self, model, ckpt_path)
981 self._signal_connector.register_signal_handlers()
983 # ----------------------------
984 # RUN THE TRAINER
985 # ----------------------------
--> 986 results = self._run_stage()
988 # ----------------------------
989 # POST-Training CLEAN UP
990 # ----------------------------
991 log.debug(f"{self.__class__.__name__}: trainer tearing down")
File ~/.pyenv/versions/3.12.2/envs/venv_3.12.2/lib/python3.12/site-packages/pytorch_lightning/trainer/trainer.py:1028, in Trainer._run_stage(self)
1026 if self.training:
1027 with isolate_rng():
-> 1028 self._run_sanity_check()
1029 with torch.autograd.set_detect_anomaly(self._detect_anomaly):
1030 self.fit_loop.run()
File ~/.pyenv/versions/3.12.2/envs/venv_3.12.2/lib/python3.12/site-packages/pytorch_lightning/trainer/trainer.py:1057, in Trainer._run_sanity_check(self)
1054 call._call_callback_hooks(self, "on_sanity_check_start")
1056 # run eval step
-> 1057 val_loop.run()
1059 call._call_callback_hooks(self, "on_sanity_check_end")
1061 # reset logger connector
File ~/.pyenv/versions/3.12.2/envs/venv_3.12.2/lib/python3.12/site-packages/pytorch_lightning/loops/utilities.py:182, in _no_grad_context.<locals>._decorator(self, *args, **kwargs)
180 context_manager = torch.no_grad
181 with context_manager():
--> 182 return loop_run(self, *args, **kwargs)
File ~/.pyenv/versions/3.12.2/envs/venv_3.12.2/lib/python3.12/site-packages/pytorch_lightning/loops/evaluation_loop.py:135, in _EvaluationLoop.run(self)
133 self.batch_progress.is_last_batch = data_fetcher.done
134 # run step hooks
--> 135 self._evaluation_step(batch, batch_idx, dataloader_idx, dataloader_iter)
136 except StopIteration:
137 # this needs to wrap the `*_step` call too (not just `next`) for `dataloader_iter` support
138 break
File ~/.pyenv/versions/3.12.2/envs/venv_3.12.2/lib/python3.12/site-packages/pytorch_lightning/loops/evaluation_loop.py:396, in _EvaluationLoop._evaluation_step(self, batch, batch_idx, dataloader_idx, dataloader_iter)
390 hook_name = "test_step" if trainer.testing else "validation_step"
391 step_args = (
392 self._build_step_args_from_hook_kwargs(hook_kwargs, hook_name)
393 if not using_dataloader_iter
394 else (dataloader_iter,)
395 )
--> 396 output = call._call_strategy_hook(trainer, hook_name, *step_args)
398 self.batch_progress.increment_processed()
400 if using_dataloader_iter:
401 # update the hook kwargs now that the step method might have consumed the iterator
File ~/.pyenv/versions/3.12.2/envs/venv_3.12.2/lib/python3.12/site-packages/pytorch_lightning/trainer/call.py:311, in _call_strategy_hook(trainer, hook_name, *args, **kwargs)
308 return None
310 with trainer.profiler.profile(f"[Strategy]{trainer.strategy.__class__.__name__}.{hook_name}"):
--> 311 output = fn(*args, **kwargs)
313 # restore current_fx when nested context
314 pl_module._current_fx_name = prev_fx_name
File ~/.pyenv/versions/3.12.2/envs/venv_3.12.2/lib/python3.12/site-packages/pytorch_lightning/strategies/strategy.py:411, in Strategy.validation_step(self, *args, **kwargs)
409 if self.model != self.lightning_module:
410 return self._forward_redirection(self.model, self.lightning_module, "validation_step", *args, **kwargs)
--> 411 return self.lightning_module.validation_step(*args, **kwargs)
File ~/ProgramData/git/neuralforecast/neuralforecast/common/_base_multivariate.py:438, in BaseMultivariate.validation_step(self, batch, batch_idx)
429 windows_batch = dict(
430 insample_y=insample_y, # [Ws, L, n_series]
431 insample_mask=insample_mask, # [Ws, L, n_series]
(...)
434 stat_exog=stat_exog,
435 ) # [n_series, S]
437 # Model Predictions
--> 438 output = self(windows_batch)
439 if self.loss.is_distribution_output:
440 outsample_y, y_loc, y_scale = self._inv_normalization(
441 y_hat=outsample_y, temporal_cols=batch["temporal_cols"], y_idx=y_idx
442 )
File ~/.pyenv/versions/3.12.2/envs/venv_3.12.2/lib/python3.12/site-packages/torch/nn/modules/module.py:1511, in Module._wrapped_call_impl(self, *args, **kwargs)
1509 return self._compiled_call_impl(*args, **kwargs) # type: ignore[misc]
1510 else:
-> 1511 return self._call_impl(*args, **kwargs)
File ~/.pyenv/versions/3.12.2/envs/venv_3.12.2/lib/python3.12/site-packages/torch/nn/modules/module.py:1520, in Module._call_impl(self, *args, **kwargs)
1515 # If we don't have any hooks, we want to skip the rest of the logic in
1516 # this function, and just call forward.
1517 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1518 or _global_backward_pre_hooks or _global_backward_hooks
1519 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1520 return forward_call(*args, **kwargs)
1522 try:
1523 result = None
File ~/ProgramData/git/neuralforecast/neuralforecast/models/timemixer.py:724, in TimeMixer.forward(self, windows_batch)
721 x_mark_enc = None
722 x_mark_dec = None
--> 724 y_pred = self.forecast(insample_y, x_mark_enc, x_mark_dec)
725 y_pred = y_pred[:, -self.h :, :]
726 y_pred = self.loss.domain_map(y_pred)
File ~/ProgramData/git/neuralforecast/neuralforecast/models/timemixer.py:675, in TimeMixer.forecast(self, x_enc, x_mark_enc, x_mark_dec)
672 enc_out_list = self.pdm_blocks[i](enc_out_list)
674 # Future Multipredictor Mixing as decoder for future
--> 675 dec_out_list = self.future_multi_mixing(B, enc_out_list, x_list)
677 dec_out = torch.stack(dec_out_list, dim=-1).sum(-1)
678 dec_out = self.normalize_layers[0](dec_out, "denorm")
File ~/ProgramData/git/neuralforecast/neuralforecast/models/timemixer.py:695, in TimeMixer.future_multi_mixing(self, B, enc_out_list, x_list)
692 else:
693 dec_out = self.projection_layer(dec_out)
694 dec_out = (
--> 695 dec_out.reshape(B, self.c_out, self.pred_len)
696 .permute(0, 2, 1)
697 .contiguous()
698 )
699 dec_out_list.append(dec_out)
701 else:
File ~/.pyenv/versions/3.12.2/envs/venv_3.12.2/lib/python3.12/site-packages/torch/nn/modules/module.py:1688, in Module.__getattr__(self, name)
1686 if name in modules:
1687 return modules[name]
-> 1688 raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
AttributeError: 'TimeMixer' object has no attribute 'pred_len'
Versions / Dependencies
neuralforecast: main branch
Reproduction script
N/A
Issue Severity
High: It blocks me from completing my task.
marcopeix commented
Hello @carusyte , thanks for raising the issue! Saw your fix, and nice catch, that should be fixed. Let me fix it on my end, it will be faster. Thanks again!
marcopeix commented
Fix is pushed. You can pip install from GitHub to access it immediately pip install git+https://github.com/Nixtla/neuralforecast.git
. Thanks for your help!