SCUTlihaoyu / open-chat-video-editor

Open source short video automatic generation tool

Geek Repo:Geek Repo

Github PK Tool:Github PK Tool

AssertionError: Torch not compiled with CUDA enabled

1893945 opened this issue · comments

╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████████| 3.46G/3.46G [07:25<00:00, 11.1MB/s]
│ E:\voice2face\open-chat-video-editor\app\app.py:27 in │
│ │
│ 24 │ │ # cfg_path = "configs/video_by_retrieval_text_by_chatgpt_zh.yaml" │
│ 25 │ │ cfg.merge_from_file(cfg_path) │
│ 26 │ │ print(cfg) │
│ ❱ 27 │ │ editor = build_editor(cfg) │
│ 28 │ │ def run_Text2VideoEditor_logit(input_text, style_text): │
│ 29 │ │ │ out_video = "test.mp4" │
│ 30 │ │ │ out_text,video_out = editor.run(input_text,style_text,out_video) │
│ │
│ E:\voice2face\open-chat-video-editor\editor\build.py:14 in build_editor │
│ │
│ 11 │ logger.info('visual_gen_type: {}'.format(visual_gen_type)) │
│ 12 │ # image_by_diffusion video_by_retrieval image_by_retrieval_then_diffusion video_by_ │
│ 13 │ if visual_gen_type in ["image_by_retrieval","image_by_diffusion","image_by_retrieval │
│ ❱ 14 │ │ vision_generator = build_image_generator(cfg) │
│ 15 │ else: │
│ 16 │ │ vision_generator = build_video_generator(cfg) │
│ 17 │
│ │
│ E:\voice2face\open-chat-video-editor\generator\image\build.py:33 in build_image_generator │
│ │
│ 30 │ │ image_generator = ImageGenbyRetrieval(cfg,query_model,index_server,meta_server) │
│ 31 │ elif visual_gen_type == "image_by_diffusion": │
│ 32 │ │ logger.info("start build_img_gen_model") │
│ ❱ 33 │ │ img_gen_model = build_img_gen_model(cfg) │
│ 34 │ │ image_generator = ImageGenByDiffusion(cfg,img_gen_model) │
│ 35 │ elif visual_gen_type == "image_by_retrieval_then_diffusion": │
│ 36 │ │ # build img retrieval generator │
│ │
│ E:\voice2face\open-chat-video-editor\generator\image\generation\build.py:6 in │
│ build_img_gen_model │
│ │
│ 3 def build_img_gen_model(cfg): │
│ 4 │ │
│ 5 │ model_id = cfg.video_editor.visual_gen.image_by_diffusion.model_id │
│ ❱ 6 │ model = StableDiffusionImgModel(model_id) │
│ 7 │ return model │
│ 8 │
│ 9 def build_img2img_gen_model(cfg): │
│ │
│ E:\voice2face\open-chat-video-editor\generator\image\generation\stable_diffusion.py:10 in │
init
│ │
│ 7 │ │ self.model_id = model_id │
│ 8 │ │ self.pipe = StableDiffusionPipeline.from_pretrained(self.model_id, torch_dtype=t │
│ 9 │ │ self.pipe.scheduler = DPMSolverMultistepScheduler.from_config(self.pipe.schedule │
│ ❱ 10 │ │ self.pipe = self.pipe.to("cuda") │
│ 11 │ │
│ 12 │ def run(self,prompt): │
│ 13 │ │ image = self.pipe(prompt).images[0] │
│ │
│ E:\voice2face\open-chat-video-editor\enve\lib\site-packages\diffusers\pipelines\pipeline_utils.p │
│ y:643 in to │
│ │
│ 640 │ │ │
│ 641 │ │ is_offloaded = pipeline_is_offloaded or pipeline_is_sequentially_offloaded │
│ 642 │ │ for module in modules: │
│ ❱ 643 │ │ │ module.to(torch_device, torch_dtype) │
│ 644 │ │ │ if ( │
│ 645 │ │ │ │ module.dtype == torch.float16 │
│ 646 │ │ │ │ and str(torch_device) in ["cpu"] │
│ │
│ E:\voice2face\open-chat-video-editor\enve\lib\site-packages\torch\nn\modules\module.py:1145 in │
│ to │
│ │
│ 1142 │ │ │ │ │ │ │ non_blocking, memory_format=convert_to_format) │
│ 1143 │ │ │ return t.to(device, dtype if t.is_floating_point() or t.is_complex() else No │
│ 1144 │ │ │
│ ❱ 1145 │ │ return self._apply(convert) │
│ 1146 │ │
│ 1147 │ def register_full_backward_pre_hook( │
│ 1148 │ │ self, │
│ │
│ E:\voice2face\open-chat-video-editor\enve\lib\site-packages\torch\nn\modules\module.py:797 in │
│ _apply │
│ │
│ 794 │ │
│ 795 │ def _apply(self, fn): │
│ 796 │ │ for module in self.children(): │
│ ❱ 797 │ │ │ module._apply(fn) │
│ 798 │ │ │
│ 799 │ │ def compute_should_use_set_data(tensor, tensor_applied): │
│ 800 │ │ │ if torch._has_compatible_shallow_copy_type(tensor, tensor_applied): │
│ │
│ E:\voice2face\open-chat-video-editor\enve\lib\site-packages\torch\nn\modules\module.py:820 in │
│ _apply │
│ │
│ 817 │ │ │ # track autograd history of param_applied, so we have to use │
│ 818 │ │ │ # with torch.no_grad():
│ 819 │ │ │ with torch.no_grad(): │
│ ❱ 820 │ │ │ │ param_applied = fn(param) │
│ 821 │ │ │ should_use_set_data = compute_should_use_set_data(param, param_applied) │
│ 822 │ │ │ if should_use_set_data: │
│ 823 │ │ │ │ param.data = param_applied │
│ │
│ E:\voice2face\open-chat-video-editor\enve\lib\site-packages\torch\nn\modules\module.py:1143 in │
│ convert │
│ │
│ 1140 │ │ │ if convert_to_format is not None and t.dim() in (4, 5): │
│ 1141 │ │ │ │ return t.to(device, dtype if t.is_floating_point() or t.is_complex() els │
│ 1142 │ │ │ │ │ │ │ non_blocking, memory_format=convert_to_format) │
│ ❱ 1143 │ │ │ return t.to(device, dtype if t.is_floating_point() or t.is_complex() else No │
│ 1144 │ │ │
│ 1145 │ │ return self.apply(convert) │
│ 1146 │
│ │
│ E:\voice2face\open-chat-video-editor\enve\lib\site-packages\torch\cuda_init
.py:239 in │
│ _lazy_init │
│ │
│ 236 │ │ │ │ "Cannot re-initialize CUDA in forked subprocess. To use CUDA with " │
│ 237 │ │ │ │ "multiprocessing, you must use the 'spawn' start method") │
│ 238 │ │ if not hasattr(torch._C, '_cuda_getDeviceCount'): │
│ ❱ 239 │ │ │ raise AssertionError("Torch not compiled with CUDA enabled") │
│ 240 │ │ if _cudart is None: │
│ 241 │ │ │ raise AssertionError( │
│ 242 │ │ │ │ "libcudart functions unavailable. It looks like you have a broken build? │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
AssertionError: Torch not compiled with CUDA enabled

python version 3.92
no GPU

CPU版本

pip3 install torch torchvision torchaudio

toolz 0.12.0
torch 2.0.1
torchaudio 2.0.2
torchvision 0.15.2

运行命令:>python app/app.py --func Text2VideoEditor --cfg configs\text2video\image_by_diffusion_text_by_chatgpt_zh.yaml

也遇到了这个问题,请问您修改好了吗

是包之间的依赖问题,要求改包的版本或者不指定具体的版本

具体忘了,没显卡,后来就没折腾了

是包之间的依赖问题,要求改包的版本或者不指定具体的版本

具体忘了,没显卡,后来就没折腾了

行吧,期待作者修复吧