-
Notifications
You must be signed in to change notification settings - Fork 1
/
distill.py
73 lines (61 loc) · 2.29 KB
/
distill.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
# Colab:
# Turn ON GPU
# !git clone https://github.com/nikitakapitan/nlphub.git
# !mv nlphub/distill.yaml .
# !mkdir logs
# !pip install datasets transformers evaluate accelerate
# python distill.py --config distill.yaml
import os
import yaml
import logging
import argparse
import time
from nlphub import DistillationTrainingArguments, DistillationTrainer
from nlphub import Distiller
# Initialize logging
if not os.path.exists('/content/logs/'):
os.makedirs('/content/logs/')
logging.basicConfig(filename=f"logs/train_{time.strftime('%Y-%m-%d_%H-%M-%S')}.log", level=logging.INFO)
def main(args):
with open(args, 'r') as f:
config = yaml.safe_load(f)
distiller = Distiller(config)
# Tokenize
tokenize = lambda batch: distiller.student_tokenizer(batch['text'], truncation=True)
dataset_encoded = distiller.dataset.map(tokenize, batched=True)
# Distill Training Arguments
student_training_args = DistillationTrainingArguments(
output_dir=f'{config["TEACHER"]}-distilled-{config["DATASET_NAME"]}',
num_train_epochs=config['NUM_EPOCHS'],
learning_rate=config['LEARNING_RATE'],
per_device_train_batch_size=config['BATCH_SIZE'],
per_device_eval_batch_size=config['BATCH_SIZE'],
alpha = config['ALPHA'],
weight_decay=0.01,
evaluation_strategy='epoch',
disable_tqdm=False,
logging_dir='./logs',
push_to_hub=True,
log_level=config['LOG_LEVEL'],
)
trainer = DistillationTrainer(
model_init=distiller.student_init,
teacher_model=distiller.teacher,
args=student_training_args,
train_dataset=dataset_encoded['train'],
eval_dataset=dataset_encoded[config['EVAL_DATASET']],
tokenizer=distiller.student_tokenizer,
compute_metrics=distiller.compute_metrics_func,
)
# Train and Evaluate
logging.info("Start TRAINING")
trainer.train()
trainer.evaluate()
# Push to Hub
trainer.push_to_hub()
logging.info("Model pushed to Hugging Face Hub.")
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Distilling models with Hugging Face Transformers')
parser.add_argument('--config', type=str, required=True, help='Path to the YAML config file')
args = parser.parse_args()
main(args.config)