bryandlee / animegan2-pytorch

PyTorch implementation of AnimeGANv2

Geek Repo:Geek Repo

Github PK Tool:Github PK Tool

Video anime

fengwang opened this issue · comments

I managed to convert a normal video to cartoon with Face Portrait v2 model. The result seems good to me. I, therefore, suggest adding a function/script to convert videos to their cartoon modes. And the video to image and image to video functions can be easily implemented using imageio or ffmpeg.

# -*- coding: utf-8 -*-
# @Time    : 2021/11/9 19:51
# @Author  :  Xin  Chen (Asher Chan)
# @File    : AnimeGANv2_pytorch_Video2Anime.py
# @Software: PyCharm

import cv2,os
import torch
import numpy as np
import argparse
from tqdm import tqdm
from model import Generator

def check_folder(path):
    if not os.path.exists(path):
        os.makedirs(path)
    return path

def process_image(img, x32=True):
    h, w = img.shape[:2]
    if x32: # resize image to multiple of 32s
        def to_32s(x):
            return 256 if x < 256 else x - x%32
        img = cv2.resize(img, (to_32s(w), to_32s(h)))
    img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB).astype(np.float32)/ 127.5 - 1.0
    return img


def animeganv2_face2anime(checkpoint_dir, video_path, out_path, dev):
    out_path = check_folder(out_path)
    video_in = cv2.VideoCapture(video_path)
    total = int(video_in.get(cv2.CAP_PROP_FRAME_COUNT))
    fps = int(video_in.get(cv2.CAP_PROP_FPS))
    width = int(video_in.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(video_in.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fourcc = cv2.VideoWriter_fourcc(*"mp4v")
    video_out = cv2.VideoWriter(os.path.join(out_path, os.path.basename(video_path).rsplit('.',1)[0] + "_AnimeGANv2.mp4"), fourcc, fps, (width, height))

    device = 'cuda:0' if 'gpu' == dev else 'cpu'
    net = Generator()
    net.load_state_dict(torch.load(checkpoint_dir, map_location=device ))
    net.to(device).eval()

    pbar = tqdm(total=total, ncols=80)
    pbar.set_description(f"Making: {os.path.basename(video_path).rsplit('.',1)[0] + '_AnimeGANv2.mp4'}")
    while True:
        ret, frame = video_in.read()
        if not ret:
            break
        frame = torch.from_numpy(process_image(frame)).cuda() if 'gpu' == dev else torch.from_numpy(process_image(frame))
        frame = frame.permute(2, 0, 1).unsqueeze(0)
        with torch.no_grad():
            out = net(frame, False)
        out = out.squeeze(0).permute(1, 2, 0).cpu()
        out = ((out + 1.) * 127.5).numpy().clip(0, 255).astype(np.uint8)
        out = cv2.resize(out, (width, height))
        video_out.write(cv2.cvtColor(out, cv2.COLOR_RGB2BGR))
        pbar.update(1)
    pbar.close()
    video_in.release()
    video_out.release()

def parse_args():
    desc = "AnimeGANv2"
    parser = argparse.ArgumentParser(description=desc)
    parser.add_argument('--checkpoint_dir', type=str, default= 'weights/face_paint_512_v2.pt',help='Directory name to save the checkpoints')
    parser.add_argument('--input_video_dir', type=str, default='input_video',help='Directory name of input video')
    parser.add_argument('--output_video_dir', type=str, default='output_video',help='Directory name of output video')
    parser.add_argument('--device', type=str.lower, default='gpu', choices=['cpu', 'gpu'], help='Running device of AnimeGANv2')
    return parser.parse_args()


if __name__ == '__main__':
    arg = parse_args()
    print(f"AnimeGANv2 model:  {arg.checkpoint_dir}")
    for v in os.listdir(arg.input_video_dir):
        if v.rsplit('.',1)[-1].lower() in ['mp4', 'avi']:
            print(f" video name: {v}")
            animeganv2_face2anime(arg.checkpoint_dir, os.path.join(arg.input_video_dir, v), arg.output_video_dir, arg.device)

You can run this script through the command:
python AnimeGANv2_pytorch_Video2Anime.py --checkpoint_dir weights/face_paint_512_v2.pt --input_video_dir input_video --output_video_dir output_video --device gpu