facebookresearch/multimodal

AttributeError: 'MultiDataLoader' object has no attribute '__code__'

gihanpanapitiya opened this issue · 1 comments

Issue description

At the execution of trainer.fit (line 87 of train.py),

trainer.fit(model, datamodule=datamodule)

I am getting the following error,

Code example

File ~/miniconda3/envs/torch-multimodal/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py:768, in Trainer.fit(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
    749 r"""
    750 Runs the full optimization routine.
    751 
   (...)
    765     datamodule: An instance of :class:`~pytorch_lightning.core.datamodule.LightningDataModule`.
    766 """
    767 self.strategy.model = model
--> 768 self._call_and_handle_interrupt(
    769     self._fit_impl, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path
    770 )

File ~/miniconda3/envs/torch-multimodal/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py:721, in Trainer._call_and_handle_interrupt(self, trainer_fn, *args, **kwargs)
    719         return self.strategy.launcher.launch(trainer_fn, *args, trainer=self, **kwargs)
    720     else:
--> 721         return trainer_fn(*args, **kwargs)
    722 # TODO: treat KeyboardInterrupt as BaseException (delete the code below) in v1.7
    723 except KeyboardInterrupt as exception:

File ~/miniconda3/envs/torch-multimodal/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py:809, in Trainer._fit_impl(self, model, train_dataloaders, val_dataloaders, datamodule, ckpt_path)
    805 ckpt_path = ckpt_path or self.resume_from_checkpoint
    806 self._ckpt_path = self.__set_ckpt_path(
    807     ckpt_path, model_provided=True, model_connected=self.lightning_module is not None
    808 )
--> 809 results = self._run(model, ckpt_path=self.ckpt_path)
    811 assert self.state.stopped
    812 self.training = False

File ~/miniconda3/envs/torch-multimodal/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py:1234, in Trainer._run(self, model, ckpt_path)
   1230 self._checkpoint_connector.restore_training_state()
   1232 self._checkpoint_connector.resume_end()
-> 1234 results = self._run_stage()
   1236 log.detail(f"{self.__class__.__name__}: trainer tearing down")
   1237 self._teardown()

File ~/miniconda3/envs/torch-multimodal/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py:1321, in Trainer._run_stage(self)
   1319 if self.predicting:
   1320     return self._run_predict()
-> 1321 return self._run_train()

File ~/miniconda3/envs/torch-multimodal/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py:1351, in Trainer._run_train(self)
   1349 self.fit_loop.trainer = self
   1350 with torch.autograd.set_detect_anomaly(self._detect_anomaly):
-> 1351     self.fit_loop.run()

File ~/miniconda3/envs/torch-multimodal/lib/python3.8/site-packages/pytorch_lightning/loops/base.py:203, in Loop.run(self, *args, **kwargs)
    201 while not self.done:
    202     try:
--> 203         self.on_advance_start(*args, **kwargs)
    204         self.advance(*args, **kwargs)
    205         self.on_advance_end()

File ~/miniconda3/envs/torch-multimodal/lib/python3.8/site-packages/pytorch_lightning/loops/fit_loop.py:254, in FitLoop.on_advance_start(self)
    251 self.trainer._call_callback_hooks("on_epoch_start")
    252 self.trainer._call_lightning_module_hook("on_epoch_start")
--> 254 self.trainer._call_callback_hooks("on_train_epoch_start")
    255 self.trainer._call_lightning_module_hook("on_train_epoch_start")
    257 self.epoch_progress.increment_started()

File ~/miniconda3/envs/torch-multimodal/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py:1634, in Trainer._call_callback_hooks(self, hook_name, *args, **kwargs)
   1632         if callable(fn):
   1633             with self.profiler.profile(f"[Callback]{callback.state_key}.{hook_name}"):
-> 1634                 fn(self, self.lightning_module, *args, **kwargs)
   1636 if pl_module:
   1637     # restore current_fx when nested context
   1638     pl_module._current_fx_name = prev_fx_name

File ~/miniconda3/envs/torch-multimodal/lib/python3.8/site-packages/pytorch_lightning/callbacks/progress/tqdm_progress.py:259, in TQDMProgressBar.on_train_epoch_start(self, trainer, *_)
    257 def on_train_epoch_start(self, trainer: "pl.Trainer", *_: Any) -> None:
    258     total_train_batches = self.total_train_batches
--> 259     total_val_batches = self.total_val_batches
    260     if total_train_batches != float("inf") and total_val_batches != float("inf"):
    261         # val can be checked multiple times per epoch
    262         val_checks_per_epoch = total_train_batches // trainer.val_check_batch

File ~/miniconda3/envs/torch-multimodal/lib/python3.8/site-packages/pytorch_lightning/callbacks/progress/base.py:173, in ProgressBarBase.total_val_batches(self)
    167 """The total number of validation batches, which may change from epoch to epoch for all val dataloaders.
    168 
    169 Use this to set the total number of iterations in the progress bar. Can return ``inf`` if the predict dataloader
    170 is of infinite size.
    171 """
    172 assert self._trainer is not None
--> 173 return sum(self.trainer.num_val_batches) if self._trainer.fit_loop.epoch_loop._should_check_val_epoch() else 0

File ~/miniconda3/envs/torch-multimodal/lib/python3.8/site-packages/pytorch_lightning/loops/epoch/training_epoch_loop.py:505, in TrainingEpochLoop._should_check_val_epoch(self)
    503 def _should_check_val_epoch(self):
    504     return (
--> 505         self.trainer.enable_validation
    506         and (self.trainer.current_epoch + 1) % self.trainer.check_val_every_n_epoch == 0
    507     )

File ~/miniconda3/envs/torch-multimodal/lib/python3.8/site-packages/pytorch_lightning/trainer/trainer.py:2278, in Trainer.enable_validation(self)
   2274 @property
   2275 def enable_validation(self) -> bool:
   2276     """Check if we should run validation during training."""
   2277     return (
-> 2278         self._data_connector._val_dataloader_source.is_defined()
   2279         and is_overridden("validation_step", self.lightning_module)
   2280         and self.limit_val_batches > 0
   2281     )

File ~/miniconda3/envs/torch-multimodal/lib/python3.8/site-packages/pytorch_lightning/trainer/connectors/data_connector.py:536, in _DataLoaderSource.is_defined(self)
    531 def is_defined(self) -> bool:
    532     """Returns whether the source dataloader can be retrieved or not.
    533 
    534     If the source is a module it checks that the method with given :attr:`name` is overridden.
    535     """
--> 536     return not self.is_module() or is_overridden(self.name, self.instance)

File ~/miniconda3/envs/torch-multimodal/lib/python3.8/site-packages/pytorch_lightning/utilities/model_helpers.py:56, in is_overridden(method_name, instance, parent)
     53 if parent_attr is None:
     54     raise ValueError("The parent should define the method")
---> 56 return instance_attr.__code__ != parent_attr.__code__

AttributeError: 'MultiDataLoader' object has no attribute '__code__'

Are there any methods unimplemented in torchmultimodal/examples/flava/data/multitask.py?

  • How you installed TorchMultimodal (conda, pip, source): conda
  • Build command you used (if compiling from source): pip install -e .
  • OS: linux (ubuntu)
  • TorchMultimodal version: torchmultimodal-0.1.0a0
  • Python version: 3.8
  • CUDA/cuDNN version: running in cpu
  • GPU models and configuration: N/A
  • Versions of any other relevant libraries:

Hi @gihanpanapitiya, thanks for creating the issue! Some aspects of the FLAVA code are still work in progress and should be fixed within the next couple weeks. We will keep you posted on the status here, in the meantime please let us know if you run into any other issues. cc @apsdehal