-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtrain.py
More file actions
122 lines (100 loc) · 3.54 KB
/
train.py
File metadata and controls
122 lines (100 loc) · 3.54 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import os
from transformers import AutoModelForCausalLM, AutoTokenizer, Trainer, TrainingArguments
from datasets import load_dataset
import torch
from peft import get_peft_model, LoraConfig, TaskType
import numpy as np
from transformers import default_data_collator
#load model
model_name = 'meta-llama/Llama-3.1-8B-Instruct'
model = AutoModelForCausalLM.from_pretrained(model_name, torch_dtype=torch.float16)
#load tokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name)
# set padding tokenTokenizer
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
model.config.pad_token_id = model.config.eos_token_id
#set lora
peft_config = LoraConfig(
task_type=TaskType.CAUSAL_LM,
r=8,
lora_alpha=32,
lora_dropout=0.1,
target_modules=["q_proj", "v_proj"]
)
model = get_peft_model(model, peft_config)
dataset = load_dataset("json", data_files="./list.jsonl")
train_dataset = dataset["train"]
max_token_length = 0
longest_instance = None
# Step 1: Calculate token lengths across the dataset
def calculate_token_lengths(batch):
lengths = []
for messages in batch["messages"]:
inputs = [
msg["content"][0]["content"]
for msg in messages
if msg["role"] == "user"
]
if len(inputs) > 1:
inputs = [" ".join(inputs)]
model_inputs = tokenizer(inputs, truncation=False, padding=False)
lengths.extend([len(seq) for seq in model_inputs["input_ids"]])
return {"lengths": lengths}
# Step 2: Apply the function to the dataset and collect lengths
lengths_dataset = train_dataset.map(calculate_token_lengths, batched=True)
all_lengths = lengths_dataset["lengths"]
def preprocess_function(examples):
all_inputs = []
all_targets = []
for messages in examples["messages"]: # Loop through each "messages" entry in the batch
# Extract user message content
inputs = [
msg["content"][0]["content"]
for msg in messages
if msg["role"] == "user"
]
# Extract assistant message content (target responses)
targets = [
msg["content"][0]["content"]
for msg in messages
if msg["role"] == "assistant"
]
# Add processed inputs and targets to the batch lists
all_inputs.extend(inputs)
all_targets.extend(targets)
# Tokenize inputs and targets
model_inputs = tokenizer(all_inputs, max_length=2048, truncation=True, padding="max_length")
with tokenizer.as_target_tokenizer():
labels = tokenizer(all_targets, max_length=2048, truncation=True, padding="max_length")
# Assign labels and replace padding tokens with -100
model_inputs["labels"] = [
[-100 if token == tokenizer.pad_token_id else token for token in label]
for label in labels["input_ids"]
]
return model_inputs
# Apply the preprocessing function to the dataset
tokenized_dataset = train_dataset.map(preprocess_function, batched=True)
training_args = TrainingArguments(
output_dir="./results",
num_train_epochs=1,
per_device_train_batch_size=2,
gradient_accumulation_steps=16,
fp16=True,
learning_rate=2e-5,
warmup_steps=50,
lr_scheduler_type="cosine",
max_steps=6000,
save_strategy="steps",
save_steps=1000,
save_total_limit=6,
report_to="none",
)
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_dataset,
data_collator=default_data_collator,
)
trainer.train()
trainer.save_model("trained_model")