IDEA-CCNL/Fengshenbang-LM

使用load_in_8bit和device_map='auto'加载Ziya-BLIP2-14B-Visual-v1会报错

WanBenLe opened this issue · 0 comments

lm_model = LlamaForCausalLM.from_pretrained('./Ziya-LLaMA-13B-convert', device_map=map_cuda, load_in_8bit=True)
model = load_checkpoint_and_dispatch(model, "./Ziya-BLIP2-14B-Visual-v1/pytorch_model.bin", device_map=map_cuda)
%%time
tokenizer = LlamaTokenizer.from_pretrained(LM_MODEL_PATH)
img = Image.open("./somefig.jpg")
output = model.chat(
tokenizer=tokenizer,
pixel_values=image_processor(img, return_tensors="pt").pixel_values.to(torch.device('cuda')),
query="Does this picture related to games? Please answer with Yes or No.",
previous_querys=[],
previous_outputs=[],
**generate_config,)

transformers及相关packages均为最新,模型分布在从cuda:0-cuda:3的GPU中
报错内容如下:

RuntimeError Traceback (most recent call last)
File :3

File ~/.cache/huggingface/modules/transformers_modules/modeling_ziya_blip2.py:272, in ZiyaBlip2ForCausalLM.chat(self, tokenizer, query, pixel_values, previous_querys, previous_outputs, **generate_kwargs)
253 def chat(self,
254 tokenizer,
255 query: str,
(...)
258 previous_outputs: List[str],
259 **generate_kwargs,):
260 """
261 use for generate text by chat-style
262 Args:
(...)
270 text: generate text
271 """
--> 272 input_ids, inputs_embeds = self.prepare_inputs_for_chat(
273 tokenizer, query, pixel_values, previous_querys, previous_outputs, 2048
274 )
275 response = self.language_model.generate(
276 inputs_embeds=inputs_embeds,
277 attention_mask=torch.ones_like(input_ids),
278 **generate_kwargs,
279 )
280 response = tokenizer.decode(response[0], skip_special_tokens=True)

File ~/.cache/huggingface/modules/transformers_modules/modeling_ziya_blip2.py:220, in ZiyaBlip2ForCausalLM.prepare_inputs_for_chat(self, tokenizer, query, pixel_values, previous_querys, previous_outputs, max_length)
218 # 2. Prepare embeddings
219 pixel_values.to(device)
--> 220 image_embeds = self.vision_model(pixel_values, return_dict=True).last_hidden_state
221 image_attention_mask = torch.ones(
222 image_embeds.size()[:-1], dtype=torch.long, device=image_embeds.device)
223 query_tokens = self.query_tokens.expand(image_embeds.shape[0], -1, -1)

File ~/autodl-tmp/conda/envs/mbb/lib/python3.10/site-packages/torch/nn/modules/module.py:1501, in Module._call_impl(self, *args, **kwargs)
1496 # If we don't have any hooks, we want to skip the rest of the logic in
1497 # this function, and just call forward.
1498 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1499 or _global_backward_pre_hooks or _global_backward_hooks
1500 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1501 return forward_call(*args, **kwargs)
1502 # Do not call functions when jit is used
1503 full_backward_hooks, non_full_backward_hooks = [], []

File ~/autodl-tmp/conda/envs/mbb/lib/python3.10/site-packages/transformers/models/blip_2/modeling_blip_2.py:550, in Blip2VisionModel.forward(self, pixel_values, output_attentions, output_hidden_states, return_dict)
546 raise ValueError("You have to specify pixel_values")
548 hidden_states = self.embeddings(pixel_values)
--> 550 encoder_outputs = self.encoder(
551 inputs_embeds=hidden_states,
552 output_attentions=output_attentions,
553 output_hidden_states=output_hidden_states,
554 return_dict=return_dict,
555 )
557 last_hidden_state = encoder_outputs[0]
558 last_hidden_state = self.post_layernorm(last_hidden_state)

File ~/autodl-tmp/conda/envs/mbb/lib/python3.10/site-packages/torch/nn/modules/module.py:1501, in Module._call_impl(self, *args, **kwargs)
1496 # If we don't have any hooks, we want to skip the rest of the logic in
1497 # this function, and just call forward.
1498 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1499 or _global_backward_pre_hooks or _global_backward_hooks
1500 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1501 return forward_call(*args, **kwargs)
1502 # Do not call functions when jit is used
1503 full_backward_hooks, non_full_backward_hooks = [], []

File ~/autodl-tmp/conda/envs/mbb/lib/python3.10/site-packages/transformers/models/blip_2/modeling_blip_2.py:489, in Blip2Encoder.forward(self, inputs_embeds, attention_mask, output_attentions, output_hidden_states, return_dict)
483 layer_outputs = torch.utils.checkpoint.checkpoint(
484 create_custom_forward(encoder_layer),
485 hidden_states,
486 attention_mask,
487 )
488 else:
--> 489 layer_outputs = encoder_layer(
490 hidden_states,
491 attention_mask,
492 output_attentions=output_attentions,
493 )
495 hidden_states = layer_outputs[0]
497 if output_attentions:

File ~/autodl-tmp/conda/envs/mbb/lib/python3.10/site-packages/torch/nn/modules/module.py:1501, in Module._call_impl(self, *args, **kwargs)
1496 # If we don't have any hooks, we want to skip the rest of the logic in
1497 # this function, and just call forward.
1498 if not (self._backward_hooks or self._backward_pre_hooks or self._forward_hooks or self._forward_pre_hooks
1499 or _global_backward_pre_hooks or _global_backward_hooks
1500 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1501 return forward_call(*args, **kwargs)
1502 # Do not call functions when jit is used
1503 full_backward_hooks, non_full_backward_hooks = [], []

File ~/autodl-tmp/conda/envs/mbb/lib/python3.10/site-packages/transformers/models/blip_2/modeling_blip_2.py:257, in Blip2EncoderLayer.forward(self, hidden_states, attention_mask, output_attentions)
254 hidden_states = self.layer_norm2(hidden_states)
255 hidden_states = self.mlp(hidden_states)
--> 257 hidden_states = hidden_states + residual
259 outputs = (hidden_states,)
261 if output_attentions:

RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cuda:1 and cuda:0!

请问该如何解决?感谢.