ExponentialML/ComfyUI_Native_DynamiCrafter

Sizes of tensors must match except in dimension 1. Expected size 1280 but got size 1024 for tensor number 1 in the list.

Closed this issue · 1 comments

use your workflow and got an error
Sizes of tensors must match except in dimension 1. Expected size 1280 but got size 1024 for tensor number 1 in the list.

Error occurred when executing KSampler:

Sizes of tensors must match except in dimension 1. Expected size 1280 but got size 1024 for tensor number 1 in the list.

File "D:\ai\ComfyUI-aki-v1.1\execution.py", line 151, in recursive_execute
output_data, output_ui = get_output_data(obj, input_data_all)
File "D:\ai\ComfyUI-aki-v1.1\execution.py", line 81, in get_output_data
return_values = map_node_over_list(obj, input_data_all, obj.FUNCTION, allow_interrupt=True)
File "D:\ai\ComfyUI-aki-v1.1\execution.py", line 74, in map_node_over_list
results.append(getattr(obj, func)(**slice_dict(input_data_all, i)))
File "D:\ai\ComfyUI-aki-v1.1\nodes.py", line 1369, in sample
return common_ksampler(model, seed, steps, cfg, sampler_name, scheduler, positive, negative, latent_image, denoise=denoise)
File "D:\ai\ComfyUI-aki-v1.1\nodes.py", line 1339, in common_ksampler
samples = comfy.sample.sample(model, noise, steps, cfg, sampler_name, scheduler, positive, negative, latent_image,
File "D:\ai\ComfyUI-aki-v1.1\custom_nodes\ComfyUI-Impact-Pack\modules\impact\sample_error_enhancer.py", line 22, in informative_sample
raise e
File "D:\ai\ComfyUI-aki-v1.1\custom_nodes\ComfyUI-Impact-Pack\modules\impact\sample_error_enhancer.py", line 9, in informative_sample
return original_sample(*args, **kwargs) # This code helps interpret error messages that occur within exceptions but does not have any impact on other operations.
File "D:\ai\ComfyUI-aki-v1.1\custom_nodes\ComfyUI-AnimateDiff-Evolved\animatediff\sampling.py", line 267, in motion_sample
return orig_comfy_sample(model, noise, *args, **kwargs)
File "D:\ai\ComfyUI-aki-v1.1\comfy\sample.py", line 100, in sample
samples = sampler.sample(noise, positive_copy, negative_copy, cfg=cfg, latent_image=latent_image, start_step=start_step, last_step=last_step, force_full_denoise=force_full_denoise, denoise_mask=noise_mask, sigmas=sigmas, callback=callback, disable_pbar=disable_pbar, seed=seed)
File "D:\ai\ComfyUI-aki-v1.1\custom_nodes\ComfyUI_smZNodes\smZNodes.py", line 1380, in KSampler_sample
return _KSampler_sample(*args, **kwargs)
File "D:\ai\ComfyUI-aki-v1.1\comfy\samplers.py", line 705, in sample
return sample(self.model, noise, positive, negative, cfg, self.device, sampler, sigmas, self.model_options, latent_image=latent_image, denoise_mask=denoise_mask, callback=callback, disable_pbar=disable_pbar, seed=seed)
File "D:\ai\ComfyUI-aki-v1.1\custom_nodes\ComfyUI_smZNodes\smZNodes.py", line 1399, in sample
return _sample(*args, **kwargs)
File "D:\ai\ComfyUI-aki-v1.1\comfy\samplers.py", line 610, in sample
samples = sampler.sample(model_wrap, sigmas, extra_args, callback, noise, latent_image, denoise_mask, disable_pbar)
File "D:\ai\ComfyUI-aki-v1.1\comfy\samplers.py", line 548, in sample
samples = self.sampler_function(model_k, noise, sigmas, extra_args=extra_args, callback=k_callback, disable=disable_pbar, **self.extra_options)
File "D:\ai\ComfyUI-aki-v1.1\python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "D:\ai\ComfyUI-aki-v1.1\comfy\k_diffusion\sampling.py", line 154, in sample_euler_ancestral
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "D:\ai\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ai\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ai\ComfyUI-aki-v1.1\comfy\samplers.py", line 286, in forward
out = self.inner_model(x, sigma, cond=cond, uncond=uncond, cond_scale=cond_scale, model_options=model_options, seed=seed)
File "D:\ai\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self.call_impl(*args, **kwargs)
File "D:\ai\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\module.py", line 1520, in call_impl
return forward_call(*args, **kwargs)
File "D:\ai\ComfyUI-aki-v1.1\comfy\samplers.py", line 273, in forward
return self.apply_model(*args, **kwargs)
File "D:\ai\ComfyUI-aki-v1.1\custom_nodes\ComfyUI_smZNodes\smZNodes.py", line 1012, in apply_model
out = super().apply_model(*args, **kwargs)
File "D:\ai\ComfyUI-aki-v1.1\comfy\samplers.py", line 270, in apply_model
out = sampling_function(self.inner_model, x, timestep, uncond, cond, cond_scale, model_options=model_options, seed=seed)
File "D:\ai\ComfyUI-aki-v1.1\comfy\samplers.py", line 250, in sampling_function
cond_pred, uncond_pred = calc_cond_uncond_batch(model, cond, uncond
, x, timestep, model_options)
File "D:\ai\ComfyUI-aki-v1.1\custom_nodes\ComfyUI-TiledDiffusion.patches.py", line 4, in calc_cond_uncond_batch
return calc_cond_uncond_batch_original_tiled_diffusion_39266ca6(model, cond, uncond, x_in, timestep, model_options)
File "D:\ai\ComfyUI-aki-v1.1\comfy\samplers.py", line 222, in calc_cond_uncond_batch
output = model_options['model_function_wrapper'](model.apply_model, {"input": input_x, "timestep": timestep
, "c": c, "cond_or_uncond": cond_or_uncond}).chunk(batch_chunks)
File "D:\ai\ComfyUI-aki-v1.1\custom_nodes\ComfyUI_Native_DynamiCrafter\nodes.py", line 128, in _forward
x_out = apply_model(
File "D:\ai\ComfyUI-aki-v1.1\comfy\model_base.py", line 96, in apply_model
model_output = self.diffusion_model(xc, t, context=context, control=control, transformer_options=transformer_options, **extra_conds).float()
File "D:\ai\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\module.py", line 1511, in _wrapped_call_impl
return self._call_impl(*args, **kwargs)
File "D:\ai\ComfyUI-aki-v1.1\python\lib\site-packages\torch\nn\modules\module.py", line 1520, in _call_impl
return forward_call(*args, **kwargs)
File "D:\ai\ComfyUI-aki-v1.1\custom_nodes\ComfyUI_Native_DynamiCrafter\lvdm\modules\networks\openaimodel3d.py", line 723, in forward
context = context_processor(context, num_video_frames, img_emb=img_emb)
File "D:\ai\ComfyUI-aki-v1.1\custom_nodes\ComfyUI_Native_DynamiCrafter\lvdm\modules\networks\openaimodel3d.py", line 318, in context_processor
context = torch.cat([context, img_emb.to(context.device, context.dtype)], dim=1

sorry,i use wrong clip model