Qwen1.5微调报错
Archerer opened this issue · 3 comments
NotImplementedError Traceback (most recent call last)
Cell In[56], line 1
----> 1 trainer = Trainer(
2 model=model,
3 args=args,
4 train_dataset=tokenized_id,
5 data_collator=DataCollatorForSeq2Seq(tokenizer=tokenizer, padding=True),
6 )
File ~/miniconda3/lib/python3.10/site-packages/transformers/trainer.py:459, in Trainer.init(self, model, args, data_collator, train_dataset, eval_dataset, tokenizer, model_init, compute_metrics, callbacks, optimizers, preprocess_logits_for_metrics)
454 # Bnb Quantized models doesn't support .to
operation.
455 if (
456 self.place_model_on_device
457 and not getattr(model, "quantization_method", None) == QuantizationMethod.BITS_AND_BYTES
458 ):
--> 459 self._move_model_to_device(model, args.device)
461 # Force n_gpu to 1 to avoid DataParallel as MP will manage the GPUs
462 if self.is_model_parallel:
File ~/miniconda3/lib/python3.10/site-packages/transformers/trainer.py:693, in Trainer._move_model_to_device(self, model, device)
692 def _move_model_to_device(self, model, device):
--> 693 model = model.to(device)
694 # Moving a model to an XLA device disconnects the tied weights, so we have to retie them.
695 if self.args.parallel_mode == ParallelMode.TPU and hasattr(model, "tie_weights"):
File ~/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py:1160, in Module.to(self, *args, **kwargs)
1156 return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None,
1157 non_blocking, memory_format=convert_to_format)
1158 return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)
-> 1160 return self._apply(convert)
File ~/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py:810, in Module._apply(self, fn, recurse)
808 if recurse:
809 for module in self.children():
--> 810 module._apply(fn)
812 def compute_should_use_set_data(tensor, tensor_applied):
813 if torch._has_compatible_shallow_copy_type(tensor, tensor_applied):
814 # If the new tensor has compatible tensor type as the existing tensor,
815 # the current behavior is to change the tensor in-place using .data =
,
(...)
820 # global flag to let the user control whether they want the future
821 # behavior of overwriting the existing tensor or not.
File ~/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py:810, in Module._apply(self, fn, recurse)
808 if recurse:
809 for module in self.children():
--> 810 module._apply(fn)
812 def compute_should_use_set_data(tensor, tensor_applied):
813 if torch._has_compatible_shallow_copy_type(tensor, tensor_applied):
814 # If the new tensor has compatible tensor type as the existing tensor,
815 # the current behavior is to change the tensor in-place using .data =
,
(...)
820 # global flag to let the user control whether they want the future
821 # behavior of overwriting the existing tensor or not.
[... skipping similar frames: Module._apply at line 810 (9 times)]
File ~/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py:810, in Module._apply(self, fn, recurse)
808 if recurse:
809 for module in self.children():
--> 810 module._apply(fn)
812 def compute_should_use_set_data(tensor, tensor_applied):
813 if torch._has_compatible_shallow_copy_type(tensor, tensor_applied):
814 # If the new tensor has compatible tensor type as the existing tensor,
815 # the current behavior is to change the tensor in-place using .data =
,
(...)
820 # global flag to let the user control whether they want the future
821 # behavior of overwriting the existing tensor or not.
File ~/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py:833, in Module._apply(self, fn, recurse)
829 # Tensors stored in modules are graph leaves, and we don't want to
830 # track autograd history of param_applied
, so we have to use
831 # with torch.no_grad():
832 with torch.no_grad():
--> 833 param_applied = fn(param)
834 should_use_set_data = compute_should_use_set_data(param, param_applied)
835 if should_use_set_data:
File ~/miniconda3/lib/python3.10/site-packages/torch/nn/modules/module.py:1158, in Module.to..convert(t)
1155 if convert_to_format is not None and t.dim() in (4, 5):
1156 return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None,
1157 non_blocking, memory_format=convert_to_format)
-> 1158 return t.to(device, dtype if t.is_floating_point() or t.is_complex() else None, non_blocking)
NotImplementedError: Cannot copy out of meta tensor; no data!
我试了AutoDL,也是报错了,尝试了一下GLM4可以,应该是网络问题
我试了AutoDL,也是报错了,尝试了一下GLM4可以,应该是网络问题
谢谢,能运行了