From 5701af6f333524858fdcab339f3e80c891ea610d Mon Sep 17 00:00:00 2001 From: "hongliang.yuan" Date: Fri, 13 Dec 2024 05:00:25 +0000 Subject: [PATCH] sync deepspeed llm --- .../deepspeed/inference/chatbot.py | 9 +- .../deepspeed/requirements.txt | 7 +- .../step2_reward_model_finetuning/main.py | 161 +++++------------- .../step2_reward_model_finetuning/rw_eval.py | 69 +++----- .../training_scripts/llama2/run_llama2_7b.sh | 12 +- 5 files changed, 77 insertions(+), 181 deletions(-) diff --git a/nlp/llm/llama2-7b_reward_sft/deepspeed/inference/chatbot.py b/nlp/llm/llama2-7b_reward_sft/deepspeed/inference/chatbot.py index 5a4e36895..38b900d7d 100644 --- a/nlp/llm/llama2-7b_reward_sft/deepspeed/inference/chatbot.py +++ b/nlp/llm/llama2-7b_reward_sft/deepspeed/inference/chatbot.py @@ -10,7 +10,7 @@ import transformers # noqa: F401 import os import json from transformers import pipeline, set_seed -from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM +from transformers import AutoConfig, OPTForCausalLM, AutoTokenizer def parse_args(): @@ -43,10 +43,9 @@ def get_generator(path): tokenizer.pad_token = tokenizer.eos_token model_config = AutoConfig.from_pretrained(path) - model_class = AutoModelForCausalLM.from_config(model_config) - model = model_class.from_pretrained(path, - from_tf=bool(".ckpt" in path), - config=model_config).half() + model = OPTForCausalLM.from_pretrained(path, + from_tf=bool(".ckpt" in path), + config=model_config).half() model.config.end_token_id = tokenizer.eos_token_id model.config.pad_token_id = model.config.eos_token_id diff --git a/nlp/llm/llama2-7b_reward_sft/deepspeed/requirements.txt b/nlp/llm/llama2-7b_reward_sft/deepspeed/requirements.txt index be1589b11..b6774b87f 100644 --- a/nlp/llm/llama2-7b_reward_sft/deepspeed/requirements.txt +++ b/nlp/llm/llama2-7b_reward_sft/deepspeed/requirements.txt @@ -2,9 +2,6 @@ datasets>=2.8.0 sentencepiece>=0.1.97 protobuf==3.20.3 accelerate>=0.15.0 -torch>=1.12.0 -deepspeed>=0.9.0 -transformers>=4.31.0,!=4.33.2 +transformers>=4.30.2 tensorboard -pandas>=1.5.2 -numpy>=1.22.2 \ No newline at end of file +numpy==1.22.4 \ No newline at end of file diff --git a/nlp/llm/llama2-7b_reward_sft/deepspeed/training/step2_reward_model_finetuning/main.py b/nlp/llm/llama2-7b_reward_sft/deepspeed/training/step2_reward_model_finetuning/main.py index 28e1860db..ec0e2573e 100644 --- a/nlp/llm/llama2-7b_reward_sft/deepspeed/training/step2_reward_model_finetuning/main.py +++ b/nlp/llm/llama2-7b_reward_sft/deepspeed/training/step2_reward_model_finetuning/main.py @@ -1,12 +1,12 @@ #!/usr/bin/env python # Copyright (c) Microsoft Corporation. -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. -# All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team import argparse +import os import math +import sys import torch from torch.utils.data import DataLoader, RandomSampler, SequentialSampler @@ -19,13 +19,14 @@ from transformers import ( import deepspeed from deepspeed.ops.adam import DeepSpeedCPUAdam, FusedAdam -from deepspeed.accelerator import get_accelerator -from dschat.utils.model.model_utils import create_critic_model -from dschat.utils.data.data_utils import create_prompt_dataset, DataCollatorReward -from dschat.utils.utils import print_rank_0, to_device, save_hf_format, set_random_seed, get_all_reduce_mean, get_optimizer_grouped_parameters, save_zero_three_model, load_hf_tokenizer -from dschat.utils.ds_utils import get_train_ds_config -from dschat.utils.module.lora import convert_linear_layer_to_lora, convert_lora_to_linear_layer, only_optimize_lora_parameters, make_model_gradient_checkpointing_compatible +sys.path.append( + os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))) +from utils.model.model_utils import create_critic_model +from utils.data.data_utils import create_prompt_dataset, DataCollatorReward +from utils.utils import print_rank_0, to_device, save_hf_format, set_random_seed, get_all_reduce_mean, get_optimizer_grouped_parameters, save_zero_three_model, load_hf_tokenizer +from utils.ds_utils import get_train_ds_config +from utils.module.lora import convert_linear_layer_to_lora, convert_lora_to_linear_layer, only_optimize_lora_parameters, make_model_gradient_checkpointing_compatible def parse_args(): @@ -136,21 +137,13 @@ def parse_args(): '--gradient_checkpointing', action='store_true', help='Enable HF gradient checkpointing for Actor model.') - parser.add_argument( - "--dropout", - type=float, - default=None, - help="If dropout configured, use it. " - "Otherwise, keep the default dropout configuration of the model.") + parser.add_argument('--disable_dropout', + action='store_true', + help='Disable the dropout of the model.') # deepspeed features parser.add_argument('--offload', action='store_true', help='Enable ZeRO Offload techniques.') - parser.add_argument('--dtype', - type=str, - default='fp16', - choices=['fp16', 'bf16'], - help='Training data type') parser.add_argument( '--zero_stage', type=int, @@ -175,23 +168,6 @@ def parse_args(): help= "Initial LoRA learning rate (after the potential warmup period) to use." ) - - # Evaluation - parser.add_argument("--eval_interval", - type=int, - default=0, - help="If > 0, perform evaluation at this interval") - parser.add_argument("--eval_iters", - type=int, - default=100, - help="Maximum evaluation iterations") - ## low precision - parser.add_argument( - '--compute_fp32_loss', - action='store_true', - help='Relevant for low precision dtypes (fp16, bf16, etc.). ' - 'If specified, loss is calculated in fp32.') - ## Tensorboard logging parser.add_argument('--enable_tensorboard', action='store_true', @@ -199,11 +175,6 @@ def parse_args(): parser.add_argument('--tensorboard_path', type=str, default="step2_tensorboard") - ## Tokenizer - parser.add_argument( - "--add_eot_token", - action='store_true', - help="Add <|endoftext|> as additional special token to tokenizer") parser = deepspeed.add_config_arguments(parser) args = parser.parse_args() @@ -214,10 +185,10 @@ def main(): args = parse_args() if args.local_rank == -1: - device = torch.device(get_accelerator().device_name()) + device = torch.device("cuda") else: - get_accelerator().set_device(args.local_rank) - device = torch.device(get_accelerator().device_name(), args.local_rank) + torch.cuda.set_device(args.local_rank) + device = torch.device("cuda", args.local_rank) # Initializes the distributed backend which will take care of sychronizing nodes/GPUs # torch.distributed.init_process_group(backend='nccl') deepspeed.init_distributed() @@ -225,7 +196,6 @@ def main(): args.global_rank = torch.distributed.get_rank() ds_config = get_train_ds_config(offload=args.offload, - dtype=args.dtype, stage=args.zero_stage, enable_tensorboard=args.enable_tensorboard, tb_path=args.tensorboard_path, @@ -241,47 +211,19 @@ def main(): torch.distributed.barrier() # load_hf_tokenizer will get the correct tokenizer and set padding tokens based on the model family - args.end_of_conversation_token = "<|endoftext|>" - additional_special_tokens = args.end_of_conversation_token if args.add_eot_token else None - tokenizer = load_hf_tokenizer(args.model_name_or_path, - fast_tokenizer=True, - add_special_tokens=additional_special_tokens) + tokenizer = load_hf_tokenizer(args.model_name_or_path, fast_tokenizer=True) rm_model = create_critic_model(args.model_name_or_path, tokenizer, ds_config, args.num_padding_at_beginning, - dropout=args.dropout, - zero_stage=args.zero_stage, - compute_fp32_loss=args.compute_fp32_loss) - - # Model bigscience/bloom-560m has large variance at ln_f.weight parameter - # This makes bf16 finetuning hard. - # In general, since we are replacing the model head, it makes sense to reset - # the LN that precedes it. - force_optimize_params = [] - if "bigscience/bloom-" in args.model_name_or_path: - zero_init_enabled = (args.zero_stage == 3) - params = [ - rm_model.rwtranrsformer.ln_f.weight, - rm_model.rwtranrsformer.ln_f.bias - ] - with deepspeed.zero.GatheredParameters(params, - modifier_rank=0, - enabled=zero_init_enabled): - if deepspeed.comm.get_rank() == 0 or not zero_init_enabled: - torch.nn.init.ones_(rm_model.rwtransformer.ln_f.weight) - torch.nn.init.zeros_(rm_model.rwtransformer.ln_f.bias) - force_optimize_params.extend( - ['rwtransformer.ln_f.weight', 'rwtransformer.ln_f.bias']) + disable_dropout=args.disable_dropout) if args.lora_dim > 0: rm_model = convert_linear_layer_to_lora(rm_model, args.lora_module_name, args.lora_dim) if args.only_optimize_lora: - force_optimize_params.append('v_head.weight') - rm_model = only_optimize_lora_parameters(rm_model, - force_optimize_params) + rm_model = only_optimize_lora_parameters(rm_model) rm_model = make_model_gradient_checkpointing_compatible(rm_model) train_phase = 2 @@ -302,40 +244,37 @@ def main(): collate_fn=data_collator, sampler=train_sampler, batch_size=args.per_device_train_batch_size) + eval_sampler = SequentialSampler(eval_dataset) eval_dataloader = DataLoader(eval_dataset, collate_fn=data_collator, sampler=eval_sampler, batch_size=args.per_device_eval_batch_size) - def evaluation_reward(model, dataloader, eval_iters): + def evaluation_reward(model, eval_dataloader): model.eval() correct_predictions = 0 total_predictions = 0 - chosen_scores = 0. - rejected_scores = 0. - for _step, _batch in enumerate(dataloader): - _batch = to_device(_batch, device) + scores = 0 + for step, batch in enumerate(eval_dataloader): + batch = to_device(batch, device) with torch.no_grad(): - _outputs = model(**_batch) + outputs = model(**batch) - chosen = _outputs["chosen_mean_scores"] - rejected = _outputs["rejected_mean_scores"] + chosen = outputs["chosen_mean_scores"] + rejected = outputs["rejected_mean_scores"] correct_predictions += (chosen > rejected).sum() total_predictions += chosen.shape[0] - chosen_scores += _outputs["chosen_mean_scores"].mean().float() - rejected_scores += _outputs["rejected_mean_scores"].mean().float() - if (_step + 1) == eval_iters: + scores += outputs["chosen_mean_scores"].mean().float() + if step == 99: # For faster evaluation and debugging break - _acc = correct_predictions / total_predictions - chosen_scores = chosen_scores / (_step + 1) - rejected_scores = rejected_scores / (_step + 1) + acc = correct_predictions / total_predictions + scores = scores / (step + 1) try: - _acc = get_all_reduce_mean(_acc).item() - chosen_scores = get_all_reduce_mean(chosen_scores).item() - rejected_scores = get_all_reduce_mean(rejected_scores).item() + acc = get_all_reduce_mean(acc).item() + scores = get_all_reduce_mean(scores).item() except: pass - return chosen_scores, rejected_scores, _acc + return scores, acc # Split weights in two groups, one with weight decay and the other not. optimizer_grouped_parameters = get_optimizer_grouped_parameters( @@ -373,14 +312,11 @@ def main(): print_rank_0( f"***** Evaluating reward, Epoch {0}/{args.num_train_epochs} *****", args.global_rank) - reward_score, reject_score, acc = evaluation_reward( - rm_model, eval_dataloader, args.eval_iters) + reward_score, acc = evaluation_reward(rm_model, eval_dataloader) print_rank_0( - f"chosen_last_scores (higher is better) : {reward_score}, " - f"rejected_last_scores (lower is better) : {reject_score}, " - f"acc (higher is better) : {acc}", args.global_rank) + f"chosen_last_scores (higher is better) : {reward_score}, acc (higher is better) : {acc}", + args.global_rank) - total_micro_steps = 0 for epoch in range(args.num_train_epochs): print_rank_0( f"Beginning of Epoch {epoch+1}/{args.num_train_epochs}, Total Micro Batches {len(train_dataloader)}", @@ -394,23 +330,6 @@ def main(): rm_model.backward(loss) rm_model.step() mean_loss += loss.item() - total_micro_steps += 1 - gas_boundary = (total_micro_steps % - args.gradient_accumulation_steps == 0) - total_steps = total_micro_steps // args.gradient_accumulation_steps - print_rank_0(f"step {step}: loss:{loss.item():.4f}", args.global_rank) - if args.eval_interval and gas_boundary and ( - total_steps % args.eval_interval == 0): - print_rank_0(f"Iter {total_steps}: Evaluating reward", - args.global_rank) - reward_score, reject_score, acc = evaluation_reward( - rm_model, eval_dataloader, args.eval_iters) - print_rank_0( - f"Iter {total_steps}: c_scores: {reward_score}, r_scores: {reject_score}, " - f"diff: {reward_score - reject_score}, acc: {acc}, loss:{loss.item():.4f}", - args.global_rank) - rm_model.train() - print_rank_0( f"Epoch {epoch+1}/{args.num_train_epochs} with loss {mean_loss/(step+1)}", args.global_rank) @@ -418,12 +337,10 @@ def main(): print_rank_0( f"***** Evaluating reward, Epoch {epoch+1}/{args.num_train_epochs} *****", args.global_rank) - reward_score, reject_score, acc = evaluation_reward( - rm_model, eval_dataloader, args.eval_iters) + reward_score, acc = evaluation_reward(rm_model, eval_dataloader) print_rank_0( - f"chosen_last_scores (higher is better) : {reward_score}, " - f"rejected_last_scores (lower is better) : {reject_score}, " - f"acc (higher is better) : {acc}", args.global_rank) + f"chosen_last_scores (higher is better) : {reward_score}, acc (higher is better) : {acc}", + args.global_rank) rm_model.tput_timer.update_epoch_count() if args.output_dir is not None: diff --git a/nlp/llm/llama2-7b_reward_sft/deepspeed/training/step2_reward_model_finetuning/rw_eval.py b/nlp/llm/llama2-7b_reward_sft/deepspeed/training/step2_reward_model_finetuning/rw_eval.py index 23f9a66af..0ab7ecfbc 100644 --- a/nlp/llm/llama2-7b_reward_sft/deepspeed/training/step2_reward_model_finetuning/rw_eval.py +++ b/nlp/llm/llama2-7b_reward_sft/deepspeed/training/step2_reward_model_finetuning/rw_eval.py @@ -4,11 +4,16 @@ # DeepSpeed Team import argparse +import os import torch -from dschat.utils.model.model_utils import create_critic_model -from dschat.utils.utils import to_device, load_hf_tokenizer -from deepspeed import get_accelerator +import sys + +sys.path.append( + os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))) +from utils.model.model_utils import create_critic_model +from utils.utils import to_device +from utils.utils import load_hf_tokenizer def parse_args(): @@ -29,26 +34,16 @@ def parse_args(): "OPT model has a fixed number (1) of padding tokens at the beginning of the input. " "We did not see this in other models but keep it as an option for now.", ) - parser.add_argument( - "--add_eot_token", - action='store_true', - help="Add <|endoftext|> as additional special token to tokenizer") args = parser.parse_args() return args -def load_stuff(model_name_or_path, num_padding_at_beginning, - additional_special_tokens): +def load_stuff(model_name_or_path, num_padding_at_beginning): - tokenizer = load_hf_tokenizer(model_name_or_path, - fast_tokenizer=True, - add_special_tokens=additional_special_tokens) + tokenizer = load_hf_tokenizer(model_name_or_path, fast_tokenizer=True) tokenizer.pad_token = tokenizer.eos_token - model = create_critic_model(model_name_or_path, - tokenizer, - None, - num_padding_at_beginning, - dropout=0.) + model = create_critic_model(model_name_or_path, tokenizer, None, + num_padding_at_beginning, True) return model, tokenizer @@ -105,14 +100,10 @@ def prepare_singlesample(prompt, def run_pair_comparison(): args = parse_args() - device = torch.device(get_accelerator().device_name(0)) - - args.end_of_conversation_token = "<|endoftext|>" - additional_special_tokens = args.end_of_conversation_token if args.add_eot_token else None + device = torch.device("cuda:0") rm_model, tokenizer = load_stuff(args.model_name_or_path, - args.num_padding_at_beginning, - additional_special_tokens) + args.num_padding_at_beginning) rm_model.to(device) rm_model.eval() @@ -131,13 +122,12 @@ def run_pair_comparison(): for prompt, good_ans, bad_ans in zip(prompt_list, good_ans_list, bad_ans_list): - batch = prepare_datapair( - prompt, - good_ans, - bad_ans, - tokenizer, - max_seq_len=512, - end_of_conversation_token=args.end_of_conversation_token) + batch = prepare_datapair(prompt, + good_ans, + bad_ans, + tokenizer, + max_seq_len=512, + end_of_conversation_token="<|endoftext|>") batch = to_device(batch, device) # Run inference with torch.no_grad(): @@ -154,25 +144,20 @@ def run_pair_comparison(): def run_single_sample(): args = parse_args() - device = torch.device(get_accelerator().device_name()) - - args.end_of_conversation_token = "<|endoftext|>" - additional_special_tokens = args.end_of_conversation_token if args.add_eot_token else None + device = torch.device("cuda") rm_model, tokenizer = load_stuff(args.model_name_or_path, - args.num_padding_at_beginning, - additional_special_tokens) + args.num_padding_at_beginning) rm_model.to(device) prompt = "Human: Explain the moon landing to a 6 year old in a few sentences." my_ans = "Assistant: The moon landing was a major milestone in the history of human exploration of the solar system. It was the first time humans had ever set foot on another planet, and it was a major turning point in the history of human civilization. The astronauts, Neil Armstrong, Buzz Aldrin, and Michael Collins, successfully landed the Apollo 11 spacecraft on the moon, marking the first time humans had ever set foot on another" - batch = prepare_singlesample( - prompt, - my_ans, - tokenizer, - max_seq_len=512, - end_of_conversation_token=args.end_of_conversation_token) + batch = prepare_singlesample(prompt, + my_ans, + tokenizer, + max_seq_len=512, + end_of_conversation_token="<|endoftext|>") batch = to_device(batch, device) rm_model.eval() diff --git a/nlp/llm/llama2-7b_reward_sft/deepspeed/training/step2_reward_model_finetuning/training_scripts/llama2/run_llama2_7b.sh b/nlp/llm/llama2-7b_reward_sft/deepspeed/training/step2_reward_model_finetuning/training_scripts/llama2/run_llama2_7b.sh index dc98e2832..5ee57171d 100644 --- a/nlp/llm/llama2-7b_reward_sft/deepspeed/training/step2_reward_model_finetuning/training_scripts/llama2/run_llama2_7b.sh +++ b/nlp/llm/llama2-7b_reward_sft/deepspeed/training/step2_reward_model_finetuning/training_scripts/llama2/run_llama2_7b.sh @@ -1,7 +1,5 @@ #!/bin/bash # Copyright (c) Microsoft Corporation. -# Copyright (c) 2024, Shanghai Iluvatar CoreX Semiconductor Co., Ltd. -# All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 # DeepSpeed Team @@ -15,10 +13,10 @@ if [ "$ZERO_STAGE" == "" ]; then fi mkdir -p $OUTPUT -deepspeed ../../main.py \ - --data_path "../../../../datasets/Dahoas/rm-static/" \ +deepspeed main.py \ + --data_path Dahoas/rm-static \ --data_split 2,4,4 \ - --model_name_or_path "../../../../datasets/Llama-2-7b-hf/" \ + --model_name_or_path meta-llama/Llama-2-7b-hf \ --per_device_train_batch_size 8 \ --per_device_eval_batch_size 8 \ --max_seq_len 512 \ @@ -33,6 +31,6 @@ deepspeed ../../main.py \ --gradient_checkpointing \ --zero_stage $ZERO_STAGE \ --deepspeed \ - --output_dir $OUTPUT \ --offload \ - |& tee $OUTPUT/training.log + --output_dir $OUTPUT \ + &> $OUTPUT/training.log -- Gitee