sail-sg / lorahub

The official repository of paper "LoraHub: Efficient Cross-Task Generalization via Dynamic LoRA Composition".

Geek Repo:Geek Repo

Github PK Tool:Github PK Tool

AttributeError: 'GenerationConfig' object has no attribute 'cache_implementation'

qxpBlog opened this issue · comments

@mavenlin @SivilTaram @P2333 @chenxwh @Boyu-Mi i want to evaluate my pruned llama-7b model (saved by torch.save), but the following error happend:
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮
│ /home/iotsc01/xinpengq/LLM-Pruner-main/lorahub-main/reproduce_bbh.py:181 in │
│ │
│ 178 │ │ # unzip │
│ 179 │ │ os.system("unzip data_bbh.zip") │
│ 180 │ # evaluate the model │
│ ❱ 181 │ evaluate_flan_results_zero_shot("data_bbh", args.ckpt) │
│ 182 │ # # five shot for flan models │
│ 183 │ # evaluate_flan_results_few_shot("data_bbh", "google/flan-t5-large") │
│ 184 │ # # five shot for lorahub models │
│ │
│ /home/iotsc01/xinpengq/LLM-Pruner-main/lorahub-main/reproduce_bbh.py:63 in │
│ evaluate_flan_results_zero_shot │
│ │
│ 60 │ │ │ │ return_tensors="pt", │
│ 61 │ │ │ │ padding=True, │
│ 62 │ │ │ ).to(model.device) │
│ ❱ 63 │ │ │ outputs = model.generate( │
│ 64 │ │ │ │ input_ids=inputs["input_ids"], max_new_tokens=5 │
│ 65 │ │ │ ) │
│ 66 │ │ │ outputs = tokenizer.batch_decode( │
│ │
│ /home/iotsc01/anaconda3/envs/xinpengq_env/lib/python3.10/site-packages/torch/utils/_contextlib.p │
│ y:115 in decorate_context │
│ │
│ 112 │ @functools.wraps(func) │
│ 113 │ def decorate_context(*args, **kwargs): │
│ 114 │ │ with ctx_factory(): │
│ ❱ 115 │ │ │ return func(*args, **kwargs) │
│ 116 │ │
│ 117 │ return decorate_context │
│ 118 │
│ │
│ /home/iotsc01/anaconda3/envs/xinpengq_env/lib/python3.10/site-packages/transformers/generation/u │
│ tils.py:1456 in generate │
│ │
│ 1453 │ │ │
│ 1454 │ │ # if we don't pass past_key_values and a cache_implementation is specified │
│ 1455 │ │ │
│ ❱ 1456 │ │ if generation_config.cache_implementation in NEED_SETUP_CACHE_CLASSES_MAPPING an │
│ 1457 │ │ │ "past_key_values", False │
│ 1458 │ │ ): │
│ 1459 │ │ │ cache_cls = NEED_SETUP_CACHE_CLASSES_MAPPING[generation_config.cache_impleme │
╰──────────────────────────────────────────────────────────────────────────────────────────────────╯
AttributeError: 'GenerationConfig' object has no attribute 'cache_implementation'

how can i solve it? the following is my code:

import argparse
import sys
sys.path.append("../LLaMA-Factory-main/src")

import torch
from LLMPruner.peft import PeftModel
from lorahub.algorithm import load_dataset, lorahub_inference
import os
import json
from lorahub.algorithm import lorahub_learning, lorahub_inference
from lorahub.constant import LORA_MODULE_NAMES
import random
from random import shuffle
from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer,AutoModel,AutoTokenizer


def evaluate_flan_results_zero_shot(folder, model_path, **kwargs):
    sub_dirs = os.listdir(folder)
    pruned_dict = torch.load(model_path, map_location='cpu')
    tokenizer, model = pruned_dict['tokenizer'], pruned_dict['model']
    model.config.pad_token_id = tokenizer.pad_token_id = 0
    model.config.bos_token_id = 1
    model.config.eos_token_id = 2
    res = [0, 0]
    for sub_dir in sub_dirs:
        test_file_path = os.path.join(folder, sub_dir, "zero_shot.jsonl")
        task_inputs, task_outputs = [], []
        for line in open(test_file_path, "r", encoding="utf-8"):
            example = json.loads(line)
            task_inputs.append(example["context"])
            task_outputs.append(example["completion"])
        print("Evaluating on task (zero shot): ", sub_dir)
        # _,task_perf = lorahub_inference(task_inputs,
        #                   model,
        #                   tokenizer,
        #                   16,
        #                   task_outputs)
        
        def accuracy_score(outputs, ground_truths):
            correct = 0
            total = 0
            for output, truth in zip(outputs, ground_truths):
                # if output.strip().lower().replace(".", "") == truth.strip().lower().replace(".", ""):
                if truth.strip().replace(".", "") in output.strip().replace(".", ""):
                    correct += 1
                total += 1
            return correct / total * 100

        example_predictions = []
           
        # process dataset
        dataset = load_dataset(task_inputs, task_outputs, tokenizer)
        # use gpu if available
        device = "cuda" if torch.cuda.is_available() else "cpu"
        model = model.to(device)
        for i in range(0, len(dataset["input"]), 4):
            inputs = tokenizer(
                dataset["input"][i : i + 4],
                max_length=2048,
                return_tensors="pt",
                padding=True,
            ).to(model.device)
            outputs = model.generate(
                input_ids=inputs["input_ids"], max_new_tokens=5
            )
            outputs = tokenizer.batch_decode(
                outputs.to("cpu"), skip_special_tokens=True
            )
            example_predictions.extend(outputs)
        for i in range(len(example_predictions)):
            index = example_predictions[i].find("A:")
            if index != -1:
                example_predictions[i] = example_predictions[i][index+2:]
            
        if task_outputs is not None:
            task_perf = accuracy_score(example_predictions, task_outputs)
        else:
            task_perf = None
        print("***",task_perf,"***")
        print(example_predictions)
        res[0] += 1
        res[1] += task_perf
        torch.cuda.empty_cache()
    print("result avg:",res[1]/res[0])

def evaluate_flan_results_few_shot(folder, flan_model_name):
    sub_dirs = os.listdir(folder)

    for sub_dir in sub_dirs:
        test_file_path = os.path.join(folder, sub_dir, "few_shot.jsonl")
        task_inputs, task_outputs = [], []
        for line in open(test_file_path, "r", encoding="utf-8"):
            example = json.loads(line)
            task_inputs.append(example["context"])
            task_outputs.append(example["completion"])
        print("Evaluating on task (five shot): ", sub_dir)
        lorahub_inference(task_inputs,
                          flan_model_name,
                          flan_model_name,
                          16,
                          task_outputs)


def evaluate_lorahub_results_few_shot(folder, flan_model_name):
    sub_dirs = os.listdir(folder)

    # 5 seeds used in our experiments
    for sub_dir in sub_dirs:
        # construct the few-shot examples for lorahub learning
        example_inputs, examples_outputs = [], []
        example_file_path = os.path.join(folder, sub_dir, "example.jsonl")
        for line in open(example_file_path, "r", encoding="utf-8"):
            example = json.loads(line)
            example_inputs.append(example["context"])
            examples_outputs.append(example["completion"])
            
        # random select 5 examples for each task
        random.seed(42)
        shuffled_set = list(zip(example_inputs, examples_outputs))
        random.shuffle(shuffled_set)
        example_inputs, examples_outputs = zip(*shuffled_set)
        # take the first 5 examples
        example_inputs, examples_outputs = example_inputs[:5], examples_outputs[:5]

        # load the zero-shot examples for evaluation
        test_file_path = os.path.join(folder, sub_dir, "zero_shot.jsonl")
        task_inputs, task_outputs = [], []
        for line in open(test_file_path, "r", encoding="utf-8"):
            example = json.loads(line)
            task_inputs.append(example["context"])
            task_outputs.append(example["completion"])

        task_perf_list = []
        for seed in range(1, 6):
            random.seed(seed)

            def get_lora_module_list():
                return random.sample(LORA_MODULE_NAMES, 20)
            # get a list of modules to be used in the composition
            modules = get_lora_module_list()

            # perform LoRAHub learning
            module_weights, model, tokenizer = lorahub_learning(lora_module_list=modules,
                                                                example_inputs=example_inputs,
                                                                example_outputs=examples_outputs,
                                                                max_inference_step=40,
                                                                batch_size=5)

            print("module_weights:", module_weights)

            """
            Perform inference to get predictions
            """
            _, task_acc = lorahub_inference(example_inputs=task_inputs,
                                            model_or_name_path=model,
                                            tokenizer_or_tokenizer_path=tokenizer,
                                            batch_size=10,
                                            # can set as None if you do not have the ground truth
                                            example_outputs=task_outputs)
            task_perf_list.append(task_acc)
        avg_perf, max_perf = sum(task_perf_list) / len(task_perf_list), max(task_perf_list)
        print("average perf:", avg_perf, "best perf:", max_perf)


if __name__ == "__main__":
    
    parser = argparse.ArgumentParser(description='Tuning Pruned LLaMA (huggingface version)')
    parser.add_argument('--base_model', type=str, default="/home/iotsc01/LLM-Pruner-main/llama-7b-hf", help='base model name')
    parser.add_argument('--ckpt', type=str, default=None, help='pruned model path')
    parser.add_argument('--lora_ckpt', type=str, default=None)
    parser.add_argument('--max_seq_len', type=int, default=128, help='max sequence length')
    parser.add_argument('--test_mod', type=str, default="pruned", help='choose from [pruned, tuned, base]')
    args = parser.parse_args()
    
    if not os.path.exists("data_bbh"):
        # download dataset
        os.system("wget https://github.com/sail-sg/lorahub/releases/download/0.1/data_bbh.zip")
        # unzip
        os.system("unzip data_bbh.zip")
    # evaluate the model
    evaluate_flan_results_zero_shot("data_bbh", args.ckpt)
    # # five shot for flan models
    # evaluate_flan_results_few_shot("data_bbh", "google/flan-t5-large")
    # # five shot for lorahub models
    # evaluate_lorahub_results_few_shot("data_bbh", "google/flan-t5-large")

commented

@qxpBlog Thanks for your interest on our work! It seems that you have modified the original code to use a custom model from another library LLMPruner. Can you first confirm that the forward (or the model.generate function) works well for the model itself?

@qxpBlog Thanks for your interest on our work! It seems that you have modified the original code to use a custom model from another library LLMPruner. Can you first confirm that the forward (or the model.generate function) works well for the model itself?

yes, my module LLMPruner can use the function model.generate():

    tokenizer = LlamaTokenizer.from_pretrained(args.base_model)
    model = LlamaForCausalLM.from_pretrained(
        args.base_model,
        low_cpu_mem_usage=True if args.torch_version >=1.9 else False
    )

    if args.device != "cpu":
        model.half()
    model.to(args.device)

    if args.test_before_train:
        logger.log("\n==================Generation Results before Pruning================\n")
        model.eval()
        with torch.no_grad():
            for prompt in prompts:
                input_ids = tokenizer(prompt, return_tensors="pt")['input_ids'].to(args.device)

                generation_output = model.generate(
                    input_ids=input_ids,
                    do_sample=True,
                    top_k=50,
                    max_length=args.max_seq_len,
                    top_p=args.top_p,
                    temperature=args.temperature,
                )
                
                result = tokenizer.decode(generation_output[0])
                logger.log(result)
    
        ppl = PPLMetric(model, tokenizer, ['wikitext2', 'ptb'], args.max_seq_len, device=args.device)
        logger.log("PPL before pruning: {}".format(ppl))
commented

The example generate function does not share the same hyper-parameter settings in your code when using inference flan zero-shot. And also, since you comments the part of lorahub in your code, I do not think it would be the reason which brokens your code. You may try more on the model generate function to see if the exact settings work well.

The example generate function does not share the same hyper-parameter settings in your code when using inference flan zero-shot. And also, since you comments the part of lorahub in your code, I do not think it would be the reason which brokens your code. You may try more on the model generate function to see if the exact settings work well.

thans for answer, but i think the config of function generate is not the key problem. When I finish prunning my model, I directly call the function evaluate_flan_results_zero_shot instead of saving it by ```torch.save ``and then loading it for use, so there won't be any errors:

import os
import gc
import sys
import time
import json
import copy
import random
import argparse
from typing import Tuple

import torch
import numpy as np
from transformers import LlamaTokenizer, GenerationConfig, LlamaConfig,AutoConfig
from LLMPruner.models.hf_llama.modeling_llama import LlamaForCausalLM, LlamaRMSNorm, LlamaAttention, LlamaMLP

import LLMPruner.torch_pruning as tp 
from LLMPruner.pruner import hf_llama_pruner as llama_pruner
from LLMPruner.utils.logger import LoggerWithDepth
from LLMPruner.evaluator.ppl import PPLMetric
from LLMPruner.datasets.example_samples import get_examples
from LLMPruner.templates.prompts import prompts
import lorahub1.bbh

def set_random_seed(seed):
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)
    
def main(args):
    set_random_seed(args.seed)

    logger = LoggerWithDepth(
        env_name="{}".format(args.save_ckpt_log_name), 
        config=args.__dict__,
        root_dir='prune_log',
        setup_sublogger=True
    )

    tokenizer = LlamaTokenizer.from_pretrained(args.base_model)
    model = LlamaForCausalLM.from_pretrained(
        args.base_model,
        low_cpu_mem_usage=True if args.torch_version >=1.9 else False
    )

    if args.device != "cpu":
        model.half()
    model.to(args.device)

    pruner_type = args.pruner_type.lower()
    assert pruner_type in ['random', 'l2', 'l1', 'taylor']

    for param in model.parameters():
        param.requires_grad_(True)
    before_pruning_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
    
    forward_prompts = torch.tensor([
        [    1,   306,  4658,   278,  6593,   310,  2834,   338],
        [    1,  3439, 17632,  1925, 29892,   278,  6368,   310],
    ]).to(args.device) # Only for building the dependency graph. Any input will be fine since the computation result are not taken into consideration.

    if pruner_type == 'random':
        imp = tp.importance.RandomImportance()
    elif pruner_type == 'l1':
        imp = llama_pruner.MagnitudeImportance(p=1)
    elif pruner_type == 'l2':
        imp = llama_pruner.MagnitudeImportance(p=2)
    elif pruner_type == 'taylor':
        imp = llama_pruner.TaylorImportance(group_reduction=args.grouping_strategy, taylor=args.taylor)
    else:
        raise NotImplementedError

    logger.log("Use {} pruner...".format(pruner_type))
    
    # 按block剪枝
    if args.block_wise:
        logger.log("Start Pruning")
        if len(args.pruning_ratio) == 1 and args.pruning_ratio[0] != 0:
            args.pruning_ratio *= 32
        for i, ratio in enumerate(args.pruning_ratio):
            kwargs = {
                "importance": imp,
                "global_pruning": args.global_pruning,
                "iterative_steps": args.iterative_steps,
                "ch_sparsity": ratio, 
                "ignored_layers":[],
                "channel_groups": {
                },
                "consecutive_groups": {
                    layer.self_attn.q_proj: layer.self_attn.head_dim for layer in model.model.layers
                },
                "customized_pruners": {
                    LlamaRMSNorm: llama_pruner.hf_rmsnorm_pruner,
                },
                "root_module_types": None, 
                "root_instances": [model.model.layers[i].self_attn.q_proj] +
                                [model.model.layers[i].mlp.gate_proj]
            }
            logger.log("Pruning Attention Layer = {}".format(i))
            logger.log("Pruning MLP Layer = {}".format(i))

            pruner = tp.pruner.MetaPruner(
                model,
                forward_prompts,
                **kwargs
            )
            model.zero_grad()
            logger.log("Pruning_ratio:{}".format(ratio))
            for i in range(args.iterative_steps):

                if pruner_type in ['taylor']:
                    example_prompts = get_examples('bookcorpus', tokenizer, args.num_examples, seq_len = 64).to(args.device)
                    logger.log("Start Backwarding in iterative steps = {}...".format(i))
                    if args.taylor in ['param_mix', 'param_second']:
                        for j in range(args.num_examples):
                            batch_input = example_prompts[j].unsqueeze(0)
                            loss = model(batch_input, labels=batch_input).loss
                            logger.log("Loss = {}".format(loss))
                            loss.backward()

                            for module_param in model.parameters():
                                module_param.grad = module_param.grad * module_param.grad / args.num_examples
                                if hasattr(module_param, 'acc_grad'):
                                    module_param.acc_grad += module_param.grad
                                else:
                                    module_param.acc_grad = copy.deepcopy(module_param.grad)
                            model.zero_grad()
                            del loss.grad
                        
                    loss = model(example_prompts, labels=example_prompts).loss
                    logger.log("Loss = {}".format(loss))
                    loss.backward()
                # 剪枝入口
                pruner.step()

                after_pruning_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
                logger.log("After Iter {}/{}, #parameters: {}".format(i+1, args.iterative_steps, after_pruning_parameters))
            
                # modify inferece-related attributes
                for layer in model.model.layers:
                    layer.self_attn.num_heads = layer.self_attn.q_proj.weight.data.shape[0] // layer.self_attn.head_dim

            # Clean the gradient in the model
            model.zero_grad()
            for name, module in model.named_parameters():
                if 'weight' in name:
                    module.grad = None

            del pruner

    elif args.channel_wise:
        kwargs = {
            "importance": imp,
            "global_pruning": args.global_pruning,
            "iterative_steps": args.iterative_steps,
            "ch_sparsity": args.pruning_ratio[0], # remove 50% channels, ResNet18 = {64, 128, 256, 512} => ResNet18_Half = {32, 64, 128, 256}
            "ignored_layers":[],
            #"round_to": model.config.num_attention_heads * 2,
            "channel_groups": {
                #layer.self_attn: layer.self_attn.num_heads for layer in model.model.layers
            },
            "customized_pruners": {
                LlamaRMSNorm: llama_pruner.hf_rmsnorm_pruner,
                #LlamaAttention: llama_pruner.hf_attention_pruner,
            },
            "root_module_types": [LlamaRMSNorm, LlamaAttention],
        }

        pruner = tp.pruner.MetaPruner(
            model,
            forward_prompts,
            **kwargs
        )
        model.zero_grad()
        
        logger.log("Start Pruning")
        for i in range(args.iterative_steps):

            if pruner_type in ['taylor']:
                example_prompts = get_examples('bookcorpus', tokenizer, 10, seq_len = 64)
                logger.log("Start Backwarding in iterative steps = {}...".format(i))
                loss = model(example_prompts, labels=example_prompts).loss
                logger.log("Loss = {}".format(loss))
                loss.backward()

            pruner.step()

            after_pruning_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
            logger.log("After Iter {}/{}, #parameters: {}".format(i+1, args.iterative_steps, after_pruning_parameters))

        # Clean the gradient in the model
        model.zero_grad()
        for name, module in model.named_parameters():
            if 'weight' in name:
                module.grad = None

        # modify inferece-related attributes
        model.config.hidden_size = model.model.embed_tokens.weight.shape[1]
        model.zero_grad()
        
        del pruner
            
    elif args.layer_wise:
        model.model.layers = model.model.layers[:args.layer]
        after_pruning_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)

    else:
        raise NotImplementedError
    logger.log("#Param before: {}, #Param after: {}, Ratio = {:.4f}%".format(before_pruning_parameters, after_pruning_parameters,  100.0*after_pruning_parameters/before_pruning_parameters))
    
    gc.collect()
    torch.cuda.empty_cache()
    if args.save_model:
        model.half()
        torch.save({
            'model': model, 
            'tokenizer': tokenizer,
        }, logger.best_checkpoint_path)
    
    if args.eval_device != "cuda":
        model.half()
    model.to(args.eval_device)

    model.config.pad_token_id = tokenizer.pad_token_id = 0 
    model.config.bos_token_id = 1
    model.config.eos_token_id = 2

    if args.test_after_train:
        logger.log("\n==================Generation Results After Pruning================\n")
        
        model.eval()
        with torch.no_grad():
            lorahub1.bbh.evaluate_flan_results_zero_shot("data_bbh", model, tokenizer)
        
        logger.log("\n==================Finish================\n")
    

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='Pruning LLaMA (huggingface version)')

    # argument for parsing
    parser.add_argument('--base_model', type=str, default="/home/iotsc01/xinpengq/LLM-Pruner-main/llama-7b-hf", help='base model name')
    parser.add_argument('--save_ckpt_log_name', type=str, default="llama_prune", help='the path for save the checkpoint and the log. The final path would be log/{your_name_here}_{pruner_type}_{pruning_ratio}')
    # argument for parsing
    parser.add_argument('--pruning_ratio', nargs='+', type=float, default=[], help='pruning ratio list')
    parser.add_argument('--pruner_type', type=str, default='l2', help='pruner type')

    # argument for generation
    parser.add_argument('--temperature', type=float, default=1.0, help='temperature')
    parser.add_argument('--top_p', type=float, default=0.95, help='top p')
    parser.add_argument('--max_seq_len', type=int, default=128, help='max sequence length')

    # argument for layer-wise pruning/column-wise pruning
    parser.add_argument('--channel_wise', action='store_true', help='channel wise')
    parser.add_argument('--block_wise', action='store_true', help='block wise')
    parser.add_argument('--layer_wise', action='store_true', help='layer wise')
    parser.add_argument('--layer', type=int, default=12, help='remain the previous n layers')

    parser.add_argument('--block_attention_layer_start', type=int, help='start layer of block attention layers', default=3)
    parser.add_argument('--block_attention_layer_end', type=int, help='end layer of block attention layers', default=31)
    parser.add_argument('--block_mlp_layer_start', type=int, help='start layer of block mlp layers', default=3)
    parser.add_argument('--block_mlp_layer_end', type=int, help='end layer of block mlp layers', default=31)

    parser.add_argument('--iterative_steps', type=int, default=1, help="Iteration step for pruning. Default=1")
    parser.add_argument('--grouping_strategy', type=str, default='sum', help='Reduce method for grouping')
    parser.add_argument('--global_pruning', action='store_true', help='whether global pruning')
    parser.add_argument('--taylor', type=str, default='param_first', help='choose from [vectorize, param_second, param_first, param_mix]')
    parser.add_argument('--num_examples', type=int, default=10)

    # general argument
    parser.add_argument('--device', type=str, default="cuda", help='device')
    parser.add_argument('--test_before_train', action='store_true', help='whether test before train')
    parser.add_argument('--eval_device', type=str, default="cuda", help='eval device')
    parser.add_argument('--test_after_train', action='store_true', help='whether test after train')

    parser.add_argument('--seed', type=int, default=42, help='seed')
    parser.add_argument('--save_model', action='store_true', help='if save model')
    args = parser.parse_args()

    torch_version = float('.'.join(torch.__version__.split('.')[:2]))
    args.torch_version = torch_version
    main(args)