AUTOMATIC1111 / stable-diffusion-webui-tensorrt

Geek Repo:Geek Repo

Github PK Tool:Github PK Tool

RuntimeError: cpu gpu

kalle07 opened this issue · comments

(win10- rtx4060 16gb)
all runign fine without tensor
( i do not change any standart settings)

if i choose in settings my compiled model

*** Error completing request
*** Arguments: ('task(nxynh2sm66uhnwh)', 'a photo of a woman', '', [], 35, 'DPM++ 2M Karras', 1, 1, 7, 1024, 768, False, 0.7, 2, 'Latent', 0, 0, 0, 'Use same checkpoint', 'Use same sampler', '', '', [], <gradio.routes.Request object at 0x000001E7D082E890>, 0, False, '', 0.8, -1, False, -1, 0, 0, 0, False, False, 'positive', 'comma', 0, False, False, '', 1, '', [], 0, '', [], 0, '', [], True, False, False, False, 0, False) {}
Traceback (most recent call last):
File "D:\stable-diffusion\webui\modules\call_queue.py", line 57, in f
res = list(func(*args, **kwargs))
File "D:\stable-diffusion\webui\modules\call_queue.py", line 36, in f
res = func(*args, **kwargs)
File "D:\stable-diffusion\webui\modules\txt2img.py", line 55, in txt2img
processed = processing.process_images(p)
File "D:\stable-diffusion\webui\modules\processing.py", line 732, in process_images
res = process_images_inner(p)
File "D:\stable-diffusion\webui\modules\processing.py", line 867, in process_images_inner
samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts)
File "D:\stable-diffusion\webui\modules\processing.py", line 1140, in sample
samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x))
File "D:\stable-diffusion\webui\modules\sd_samplers_kdiffusion.py", line 235, in sample
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "D:\stable-diffusion\webui\modules\sd_samplers_common.py", line 261, in launch_sampling
return func()
File "D:\stable-diffusion\webui\modules\sd_samplers_kdiffusion.py", line 235, in
samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs))
File "d:\stable-diffusion\system\python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "D:\stable-diffusion\webui\repositories\k-diffusion\k_diffusion\sampling.py", line 594, in sample_dpmpp_2m
denoised = model(x, sigmas[i] * s_in, **extra_args)
File "d:\stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\stable-diffusion\webui\modules\sd_samplers_cfg_denoiser.py", line 169, in forward
x_out = self.inner_model(x_in, sigma_in, cond=make_condition_dict(cond_in, image_cond_in))
File "d:\stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\stable-diffusion\webui\repositories\k-diffusion\k_diffusion\external.py", line 112, in forward
eps = self.get_eps(input * c_in, self.sigma_to_t(sigma), **kwargs)
File "D:\stable-diffusion\webui\repositories\k-diffusion\k_diffusion\external.py", line 138, in get_eps
return self.inner_model.apply_model(*args, **kwargs)
File "D:\stable-diffusion\webui\modules\sd_models_xl.py", line 37, in apply_model
return self.model(x, t, cond)
File "d:\stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\stable-diffusion\webui\modules\sd_hijack_utils.py", line 17, in
setattr(resolved_obj, func_path[-1], lambda *args, **kwargs: self(*args, **kwargs))
File "D:\stable-diffusion\webui\modules\sd_hijack_utils.py", line 28, in call
return self.__orig_func(*args, **kwargs)
File "D:\stable-diffusion\webui\repositories\generative-models\sgm\modules\diffusionmodules\wrappers.py", line 28, in forward
return self.diffusion_model(
File "d:\stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\stable-diffusion\webui\repositories\generative-models\sgm\modules\diffusionmodules\openaimodel.py", line 984, in forward
emb = self.time_embed(t_emb)
File "d:\stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "d:\stable-diffusion\system\python\lib\site-packages\torch\nn\modules\container.py", line 217, in forward
input = module(input)
File "d:\stable-diffusion\system\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "D:\stable-diffusion\webui\extensions-builtin\Lora\networks.py", line 429, in network_Linear_forward
return originals.Linear_forward(self, input)
File "d:\stable-diffusion\system\python\lib\site-packages\torch\nn\modules\linear.py", line 114, in forward
return F.linear(input, self.weight, self.bias)
RuntimeError: Expected all tensors to be on the same device, but found at least two devices, cpu and cuda:0! (when checking argument for argument mat1 in method wrapper_CUDA_addmm)