run sam_lora_image_encoder.py
Closed this issue · 5 comments
请问是在用test.py还是train.py出现的报错呀
是sam_lora_image_encoder.py这个模型文件,我想仅仅测试模型效果
请问是在用test.py还是train.py出现的报错呀
是不能直接这样调用吗?
目前没有设置这样调用的方式。你可以再描述一下你的任务吗,如果只是想测试image encoder,可以试试像H-SAM/segment_anything/modeling/sam.py这样调用?
import torch
from importlib import import_module
from HSAM.segment_anything import sam_model_registry
from HSAM.sam_lora_image_encoder import LoRA_Sam
net, img_embedding_size = sam_model_registry["vit_b"](checkpoint="HSAM/sam_vit_b_01ec64.pth",image_size=512,num_classes=1,)
#print(net)
#pkg = import_module(sam_lora_image_encoder)
model = LoRA_Sam(net, 4).cuda()
# 输入数据
x = torch.randn(32, 3, 512, 512).cuda()
outputs1,_, _, _ = model(x, False, 512, gt=None)
print(outputs1.shape)
#print(outputs2.shape)
print(attn1.shape)
#print(attn2.shape)
我本来是想这样调用的,但是感觉我的参数不太对输出结果如下
`torch.Size([32, 3, 512, 512])
RuntimeError Traceback (most recent call last)
Cell In[1], line 13
10 # 输入数据
11 x = torch.randn(32, 3, 512, 512).cuda()
---> 13 outputs1,_, _, _ = model(x, False, 512, gt=None)
14 print(outputs1.shape)
15 #print(outputs2.shape)
File ~/anaconda3/envs/vmunet/lib/python3.10/site-packages/torch/nn/modules/module.py:1190, in Module._call_impl(self, *input, **kwargs)
1186 # If we don't have any hooks, we want to skip the rest of the logic in
1187 # this function, and just call forward.
1188 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1189 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1190 return forward_call(*input, **kwargs)
1191 # Do not call functions when jit is used
1192 full_backward_hooks, non_full_backward_hooks = [], []
File ~/1U/VM-UNet-main/models/HSAM/sam_lora_image_encoder.py:207, in LoRA_Sam.forward(self, batched_input, multimask_output, image_size, gt, mode)
206 def forward(self, batched_input, multimask_output, image_size, gt=None, mode='train'):
--> 207 return self.sam(batched_input, multimask_output, image_size, gt=gt, mode=mode)
File ~/anaconda3/envs/vmunet/lib/python3.10/site-packages/torch/nn/modules/module.py:1190, in Module._call_impl(self, *input, **kwargs)
1186 # If we don't have any hooks, we want to skip the rest of the logic in
1187 # this function, and just call forward.
1188 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1189 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1190 return forward_call(*input, **kwargs)
1191 # Do not call functions when jit is used
1192 full_backward_hooks, non_full_backward_hooks = [], []
File ~/1U/VM-UNet-main/models/HSAM/segment_anything/modeling/sam.py:85, in Sam.forward(self, batched_input, multimask_output, image_size, gt, mode)
79 def forward(self, batched_input, multimask_output, image_size, gt=None, mode='train'):
80 # if isinstance(batched_input, list):
81 # outputs = self.forward_test(batched_input, multimask_output)
82
83 # else:
84 # outputs = self.forward_train(batched_input, multimask_output, image_size
---> 85 outputs = self.forward_train(batched_input, multimask_output, image_size, gt=gt, mode=mode)
87 return outputs
File ~/1U/VM-UNet-main/models/HSAM/segment_anything/modeling/sam.py:121, in Sam.forward_train(self, batched_input, multimask_output, image_size, input_points, gt, mode)
118 img_noise_gaussian = torch.randn((image_embeddings.size())).cuda() * 0.2 *(image_embeddings.max()-image_embeddings.min())
119 image_embeddings = (image_embeddings + img_noise_gaussian.cuda())
--> 121 low_res_masks2, iou_predictions2, attn1 = self.mask_decoder2(
122 image_embeddings=image_embeddings,
123 image_pe=self.prompt_encoder.get_dense_pe(),
124 sparse_prompt_embeddings=sparse_embeddings,
125 dense_prompt_embeddings=dense_embeddings,
126 multimask_output=multimask_output,
127 mask_feat = ps_mask,
128 gt=gt,
129 mode=mode,
130 msk_feat=msk_feat,
131 up_embed=up_embed
132 )
133 masks2 = self.postprocess_masks(
134 low_res_masks2,
135 input_size=(image_size, image_size),
136 original_size=(image_size, image_size)
137 )
138 outputs2 = {
139 'masks': masks2,
140 'iou_predictions': iou_predictions2,
141 'low_res_logits': low_res_masks2
142 }
File ~/anaconda3/envs/vmunet/lib/python3.10/site-packages/torch/nn/modules/module.py:1190, in Module._call_impl(self, *input, **kwargs)
1186 # If we don't have any hooks, we want to skip the rest of the logic in
1187 # this function, and just call forward.
1188 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1189 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1190 return forward_call(*input, **kwargs)
1191 # Do not call functions when jit is used
1192 full_backward_hooks, non_full_backward_hooks = [], []
File ~/1U/VM-UNet-main/models/HSAM/segment_anything/modeling/mask_decoder_512.py:298, in MaskDecoder2_512.forward(self, image_embeddings, image_pe, sparse_prompt_embeddings, dense_prompt_embeddings, multimask_output, mask_feat, gt, mode, msk_feat, up_embed)
270 def forward(
271 self,
272 image_embeddings: torch.Tensor,
(...)
281 up_embed=None
282 ) -> Tuple[torch.Tensor, torch.Tensor]:
283 """
284 Predict masks given image and prompt embeddings.
285
(...)
296 torch.Tensor: batched predictions of mask quality
297 """
--> 298 masks, iou_pred, attn_out = self.predict_masks(
299 image_embeddings=image_embeddings,
300 image_pe=image_pe,
301 sparse_prompt_embeddings=sparse_prompt_embeddings,
302 dense_prompt_embeddings=dense_prompt_embeddings,
303 mask_feat=mask_feat,
304 gt = gt,
305 mode = mode,
306 msk_feat=msk_feat,
307 up_embed=up_embed
308 )
310 # Prepare output
311 return masks, iou_pred, attn_out
File ~/1U/VM-UNet-main/models/HSAM/segment_anything/modeling/mask_decoder_512.py:375, in MaskDecoder2_512.predict_masks(self, image_embeddings, image_pe, sparse_prompt_embeddings, dense_prompt_embeddings, mask_feat, gt, mode, msk_feat, up_embed)
373 msk_feat = msk_feat.clone()+self.mlp(msk_feat)
374 msk_feat = self.norm2(msk_feat)
--> 375 msk_feat = self.med_sel(msk_feat.transpose( -2, -1))
377 # if flag_resize == 1:
378 # msk_feat = msk_feat.resize_(msk_feat.shape[-3], 1024, msk_feat.shape[-1],)
379 # flag_resize = 0
380 msk_feat = msk_feat.transpose(-1, -2).view(b, -1, h, w)
File ~/anaconda3/envs/vmunet/lib/python3.10/site-packages/torch/nn/modules/module.py:1190, in Module._call_impl(self, *input, **kwargs)
1186 # If we don't have any hooks, we want to skip the rest of the logic in
1187 # this function, and just call forward.
1188 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1189 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1190 return forward_call(*input, **kwargs)
1191 # Do not call functions when jit is used
1192 full_backward_hooks, non_full_backward_hooks = [], []
File ~/anaconda3/envs/vmunet/lib/python3.10/site-packages/torch/nn/modules/container.py:204, in Sequential.forward(self, input)
202 def forward(self, input):
203 for module in self:
--> 204 input = module(input)
205 return input
File ~/anaconda3/envs/vmunet/lib/python3.10/site-packages/torch/nn/modules/module.py:1190, in Module._call_impl(self, *input, **kwargs)
1186 # If we don't have any hooks, we want to skip the rest of the logic in
1187 # this function, and just call forward.
1188 if not (self._backward_hooks or self._forward_hooks or self._forward_pre_hooks or _global_backward_hooks
1189 or _global_forward_hooks or _global_forward_pre_hooks):
-> 1190 return forward_call(*input, **kwargs)
1191 # Do not call functions when jit is used
1192 full_backward_hooks, non_full_backward_hooks = [], []
File ~/anaconda3/envs/vmunet/lib/python3.10/site-packages/torch/nn/modules/linear.py:114, in Linear.forward(self, input)
113 def forward(self, input: Tensor) -> Tensor:
--> 114 return F.linear(input, self.weight, self.bias)
RuntimeError: Expected size for first two dimensions of batch2 tensor to be: [32, 2] but got: [32, 9].`