mirror of
https://github.com/aladdinpersson/Machine-Learning-Collection.git
synced 2026-02-21 11:18:01 +00:00
581 lines
18 KiB
Plaintext
581 lines
18 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "fc8e5ea0",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import torch\n",
|
|
"print(torch.cuda.is_available())"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "d8a1e039",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from transformers import pipeline\n",
|
|
"import numpy as np"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "6ad73024",
|
|
"metadata": {
|
|
"scrolled": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"classifier = pipeline(\"zero-shot-classification\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "04f7e02c",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"classifier(\n",
|
|
" \"This is a course about the Transformers library\",\n",
|
|
" candidate_labels=[\"machine learning\", \"gym\", \"food\"],\n",
|
|
")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "6fb246c2",
|
|
"metadata": {
|
|
"scrolled": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"from transformers import pipeline\n",
|
|
"generator = pipeline(task=\"text-generation\", model=\"bigscience/bloom-1b7\", device=0)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "c4e174f0",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from transformers import AutoModelForTokenClassification, AutoModel, AutoTokenizer\n",
|
|
"import torch\n",
|
|
"\n",
|
|
"# Define input text and pre-trained model checkpoint\n",
|
|
"text = \"My name is wolfgang and I live in berlin\"\n",
|
|
"checkpoint = \"Jean-Baptiste/roberta-large-ner-english\"\n",
|
|
"\n",
|
|
"# Instantiate tokenizer and encode input text\n",
|
|
"tokenizer = AutoTokenizer.from_pretrained(checkpoint)\n",
|
|
"inputs = tokenizer(text, padding=True, truncation=True, return_tensors=\"pt\")\n",
|
|
"\n",
|
|
"# Instantiate model and generate output\n",
|
|
"model = AutoModel.from_pretrained(checkpoint)\n",
|
|
"outputs = model(**inputs)\n",
|
|
"print(outputs[0].shape)\n",
|
|
"\n",
|
|
"# Instantiate token classification model and generate predictions\n",
|
|
"model = AutoModelForTokenClassification.from_pretrained(checkpoint)\n",
|
|
"outputs = model(**inputs)\n",
|
|
"predictions = torch.nn.functional.softmax(outputs.logits, dim=-1)\n",
|
|
"print(predictions)\n",
|
|
"print(model.config.id2label)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "8212bbaa",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from transformers import AutoTokenizer, AutoModelForMaskedLM\n",
|
|
"\n",
|
|
"tokenizer = AutoTokenizer.from_pretrained('xlm-roberta-large')\n",
|
|
"model = AutoModelForMaskedLM.from_pretrained(\"xlm-roberta-large\")\n",
|
|
"\n",
|
|
"# prepare input\n",
|
|
"text = \"Replace me by any text you'd like.\"\n",
|
|
"encoded_input = tokenizer(text, return_tensors='pt')\n",
|
|
"\n",
|
|
"# forward pass\n",
|
|
"output = model(**encoded_input)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "314cba41",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from transformers import AutoTokenizer, AutoModelForMaskedLM\n",
|
|
"\n",
|
|
"# Load the pre-trained tokenizer and model\n",
|
|
"tokenizer = AutoTokenizer.from_pretrained('xlm-roberta-large')\n",
|
|
"model = AutoModelForMaskedLM.from_pretrained(\"xlm-roberta-large\")\n",
|
|
"\n",
|
|
"# Define the input sentence with a masked token\n",
|
|
"text = \"I want to <mask> a new car tomorrow.\"\n",
|
|
"\n",
|
|
"# Tokenize the input sentence, replacing the masked token with a special [MASK] token\n",
|
|
"encoded_input = tokenizer(text, padding=True, truncation=True, return_tensors='pt')\n",
|
|
"\n",
|
|
"print(output.logits.shape)\n",
|
|
"print(encoded_input['input_ids'][0].tolist().index(tokenizer.mask_token_id))\n",
|
|
"\n",
|
|
"# Extract the predicted probabilities for the masked token\n",
|
|
"predicted_probabilities = output.logits[0, encoded_input['input_ids'][0].tolist().index(tokenizer.mask_token_id)]\n",
|
|
"predicted_probabilities = torch.nn.functional.softmax(predicted_probabilities, dim=-1)\n",
|
|
"\n",
|
|
"# Get the top-k most probable predictions for the masked token\n",
|
|
"k = 5\n",
|
|
"top_k = torch.topk(predicted_probabilities, k)\n",
|
|
"for i in range(k):\n",
|
|
" token = tokenizer.convert_ids_to_tokens(top_k.indices[i].item())\n",
|
|
" score = top_k.values[i].item()\n",
|
|
" print(f\"Prediction {i+1}: '{token}' with probability {score:.5f}\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "6187e77e",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"%%time\n",
|
|
"tokenizer = AutoTokenizer.from_pretrained(\"bert-base-cased\")\n",
|
|
"\n",
|
|
"sequences = [\n",
|
|
" \"Using a Transformer network is simple\",\n",
|
|
" \"The quick brown fox jumps over the lazy dog\",\n",
|
|
" \"To be or not to be, that is the question\"\n",
|
|
"]\n",
|
|
"\n",
|
|
"# Tokenize the input sequences and convert them to padded and truncated integer token IDs\n",
|
|
"inputs = tokenizer(\n",
|
|
" sequences,\n",
|
|
" padding=True,\n",
|
|
" truncation=True,\n",
|
|
" return_tensors=\"pt\"\n",
|
|
")\n",
|
|
"\n",
|
|
"# Print the resulting input IDs and attention masks\n",
|
|
"print(inputs['input_ids'])\n",
|
|
"print(inputs['attention_mask'])"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "fc259c5a",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "43466db6",
|
|
"metadata": {},
|
|
"source": [
|
|
"Huggingface:\n",
|
|
"\n",
|
|
"1. Understanding how to use the Pipeline (probably most useful) for various tasks, easy to use, and the different subtasks it can do like translation, QA, zero shot, sentiment analysis, token classification, etc. \n",
|
|
"2. Understood how pipeline works in more detail by using AutoModel for various tasks as well as AutoTokenizer\n",
|
|
"3. Load dataset\n",
|
|
"4. How to finetune\n",
|
|
"5. How to evaluate\n",
|
|
"6. "
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "97c474f2",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "3ed5d8c2",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import torch\n",
|
|
"from transformers import AdamW, AutoTokenizer, AutoModelForSequenceClassification\n",
|
|
"\n",
|
|
"# Same as before\n",
|
|
"checkpoint = \"bert-base-uncased\"\n",
|
|
"tokenizer = AutoTokenizer.from_pretrained(checkpoint)\n",
|
|
"model = AutoModelForSequenceClassification.from_pretrained(checkpoint)\n",
|
|
"sequences = [\n",
|
|
" \"I've been waiting for a HuggingFace course my whole life.\",\n",
|
|
" \"This course is amazing!\",\n",
|
|
"]\n",
|
|
"batch = tokenizer(sequences, padding=True, truncation=True, return_tensors=\"pt\")\n",
|
|
"\n",
|
|
"# This is new\n",
|
|
"batch[\"labels\"] = torch.tensor([1, 1])\n",
|
|
"\n",
|
|
"optimizer = AdamW(model.parameters())\n",
|
|
"loss = model(**batch).loss\n",
|
|
"loss.backward()\n",
|
|
"optimizer.step()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "c598624f",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from datasets import load_dataset\n",
|
|
"raw_datasets = load_dataset(\"glue\", \"mrpc\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "cd296227",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"raw_train_dataset = raw_datasets[\"train\"]\n",
|
|
"raw_train_dataset[0]"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "e462947a",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from datasets import load_dataset\n",
|
|
"from transformers import AutoTokenizer, DataCollatorWithPadding\n",
|
|
"raw_datasets = load_dataset(\"glue\", \"mrpc\")\n",
|
|
"\n",
|
|
"checkpoint = \"bert-base-uncased\"\n",
|
|
"tokenizer = AutoTokenizer.from_pretrained(checkpoint)\n",
|
|
"\n",
|
|
"def tokenize_function(example):\n",
|
|
" return tokenizer(example[\"sentence1\"], example[\"sentence2\"], truncation=True)\n",
|
|
"\n",
|
|
"\n",
|
|
"tokenized_datasets = raw_datasets.map(tokenize_function, batched=True)\n",
|
|
"data_collator = DataCollatorWithPadding(tokenizer=tokenizer)\n",
|
|
"\n",
|
|
"\n",
|
|
"from transformers import TrainingArguments\n",
|
|
"training_args = TrainingArguments(\"test-trainer\")\n",
|
|
"\n",
|
|
"from transformers import AutoModelForSequenceClassification\n",
|
|
"model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2)\n",
|
|
"\n",
|
|
"import numpy as np\n",
|
|
"import evaluate\n",
|
|
"\n",
|
|
"def compute_metrics(eval_preds):\n",
|
|
" metric = evaluate.load(\"glue\", \"mrpc\")\n",
|
|
" logits, labels = eval_preds\n",
|
|
" predictions = np.argmax(logits, axis=-1)\n",
|
|
" return metric.compute(predictions=predictions, references=labels)\n",
|
|
"\n",
|
|
"training_args = TrainingArguments(\"test-trainer\", evaluation_strategy=\"epoch\")\n",
|
|
"model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2)\n",
|
|
"\n",
|
|
"trainer = Trainer(\n",
|
|
" model,\n",
|
|
" training_args,\n",
|
|
" train_dataset=tokenized_datasets[\"train\"],\n",
|
|
" eval_dataset=tokenized_datasets[\"validation\"],\n",
|
|
" data_collator=data_collator,\n",
|
|
" tokenizer=tokenizer,\n",
|
|
" compute_metrics=compute_metrics,\n",
|
|
")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "0e2795dc",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from transformers import TrainingArguments\n",
|
|
"training_args = TrainingArguments(\"test-trainer\")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "3af29cd5",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from transformers import AutoModelForSequenceClassification\n",
|
|
"model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "817f644e",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import numpy as np\n",
|
|
"import evaluate"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "42819a6c",
|
|
"metadata": {
|
|
"scrolled": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"def compute_metrics(eval_preds):\n",
|
|
" metric = evaluate.load(\"glue\", \"mrpc\")\n",
|
|
" logits, labels = eval_preds\n",
|
|
" predictions = np.argmax(logits, axis=-1)\n",
|
|
" return metric.compute(predictions=predictions, references=labels)\n",
|
|
"\n",
|
|
"training_args = TrainingArguments(\"test-trainer\", evaluation_strategy=\"epoch\")\n",
|
|
"model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2)\n",
|
|
"\n",
|
|
"trainer = Trainer(\n",
|
|
" model,\n",
|
|
" training_args,\n",
|
|
" train_dataset=tokenized_datasets[\"train\"],\n",
|
|
" eval_dataset=tokenized_datasets[\"validation\"],\n",
|
|
" data_collator=data_collator,\n",
|
|
" tokenizer=tokenizer,\n",
|
|
" compute_metrics=compute_metrics,\n",
|
|
")"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "eb5986b0",
|
|
"metadata": {
|
|
"scrolled": false
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"from transformers import AutoModelForSeq2SeqLM, AutoTokenizer, DataCollatorForSeq2Seq, Seq2SeqTrainingArguments, Seq2SeqTrainer\n",
|
|
"from datasets import load_dataset\n",
|
|
"batch_size=32\n",
|
|
"\n",
|
|
"# Define the generator function to preprocess the data in batches\n",
|
|
"def preprocess_generator(examples):\n",
|
|
" for i in range(0, len(examples[\"article\"]), batch_size):\n",
|
|
" batch = examples[\"article\"][i:i+batch_size]\n",
|
|
" targets = examples[\"highlights\"][i:i+batch_size]\n",
|
|
" model_inputs = tokenizer(batch, max_length=512, padding=\"max_length\", truncation=True)\n",
|
|
" with tokenizer.as_target_tokenizer():\n",
|
|
" model_targets = tokenizer(targets, max_length=128, padding=\"max_length\", truncation=True)\n",
|
|
" model_inputs[\"labels\"] = model_targets[\"input_ids\"]\n",
|
|
" yield model_inputs\n",
|
|
"\n",
|
|
"def preprocess_function(examples):\n",
|
|
" articles = [ex for ex in examples[\"article\"]]\n",
|
|
" summaries = [ex for ex in examples[\"highlights\"]]\n",
|
|
"\n",
|
|
" model_inputs = tokenizer(articles, max_length=512, padding=\"max_length\", truncation=True)\n",
|
|
" with tokenizer.as_target_tokenizer():\n",
|
|
" model_targets = tokenizer(summaries, max_length=128, padding=\"max_length\", truncation=True)\n",
|
|
" \n",
|
|
" model_inputs[\"labels\"] = model_targets[\"input_ids\"]\n",
|
|
" return model_inputs\n",
|
|
" \n",
|
|
"# Load the dataset\n",
|
|
"raw_datasets = load_dataset(\"cnn_dailymail\", \"3.0.0\")\n",
|
|
"preprocessed_datasets = raw_datasets.map(preprocess_function, batched=True, num_proc=4)\n",
|
|
"\n",
|
|
"# Load the pre-trained model and tokenizer\n",
|
|
"model_name = \"t5-small\"\n",
|
|
"tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
|
|
"model = AutoModelForSeq2SeqLM.from_pretrained(model_name)\n",
|
|
"\n",
|
|
"# Define the data collator\n",
|
|
"data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)\n",
|
|
"\n",
|
|
"# Initialize the trainer arguments\n",
|
|
"training_args = Seq2SeqTrainingArguments(\n",
|
|
" output_dir=\"./results\",\n",
|
|
" evaluation_strategy = \"epoch\",\n",
|
|
" learning_rate=2e-5,\n",
|
|
" per_device_train_batch_size=batch_size,\n",
|
|
" max_steps=1000,\n",
|
|
" weight_decay=0.01,\n",
|
|
" push_to_hub=False,\n",
|
|
")\n",
|
|
"\n",
|
|
"# Initialize the trainer\n",
|
|
"trainer = Seq2SeqTrainer(\n",
|
|
" model=model,\n",
|
|
" args=training_args,\n",
|
|
" train_dataset=train_ds,\n",
|
|
" data_collator=data_collator,\n",
|
|
" tokenizer=tokenizer,\n",
|
|
")\n",
|
|
"\n",
|
|
"# Start the training\n",
|
|
"trainer.train()\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "7d62583e",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"from datasets import load_metric"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "d310a7b3",
|
|
"metadata": {
|
|
"scrolled": true
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"preprocessed_datasets"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "99d422cc",
|
|
"metadata": {
|
|
"scrolled": false
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"# Load the pre-trained model and tokenizer\n",
|
|
"model_name = \"t5-small\"\n",
|
|
"tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
|
|
"model = AutoModelForSeq2SeqLM.from_pretrained(model_name)\n",
|
|
"\n",
|
|
"# Define the data collator\n",
|
|
"data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)\n",
|
|
"\n",
|
|
"# Initialize the trainer arguments\n",
|
|
"training_args = Seq2SeqTrainingArguments(\n",
|
|
" output_dir=\"./results\",\n",
|
|
" learning_rate=2e-5,\n",
|
|
" per_device_train_batch_size=batch_size,\n",
|
|
" max_steps=5000,\n",
|
|
" weight_decay=0.01,\n",
|
|
" push_to_hub=False,\n",
|
|
" evaluation_strategy = \"steps\",\n",
|
|
" eval_steps = 50,\n",
|
|
")\n",
|
|
"\n",
|
|
"# Load the ROUGE metric\n",
|
|
"metric = load_metric(\"rouge\")\n",
|
|
"\n",
|
|
"# Define the evaluation function\n",
|
|
"def compute_metrics(pred):\n",
|
|
" labels = pred.label_ids\n",
|
|
" preds = pred.predictions\n",
|
|
" \n",
|
|
" decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)\n",
|
|
" decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)\n",
|
|
" \n",
|
|
" scores = metric.compute(predictions=decoded_preds, references=decoded_labels, rouge_types=[\"rouge1\"])[\"rouge1\"].mid\n",
|
|
" \n",
|
|
" return {\"rouge1_precision\": scores.precision, \"rouge1_recall\": scores.recall, \"rouge1_fmeasure\": scores.fmeasure}\n",
|
|
"\n",
|
|
"\n",
|
|
"# Initialize the trainer\n",
|
|
"trainer = Seq2SeqTrainer(\n",
|
|
" model=model,\n",
|
|
" args=training_args,\n",
|
|
" train_dataset=preprocessed_datasets[\"train\"],\n",
|
|
" eval_dataset=preprocessed_datasets[\"validation\"],\n",
|
|
" data_collator=data_collator,\n",
|
|
" tokenizer=tokenizer,\n",
|
|
" compute_metrics=compute_metrics,\n",
|
|
")\n",
|
|
"\n",
|
|
"# Start the training\n",
|
|
"trainer.train()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "a5e97b57",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"!pip install nltk\n",
|
|
"!pip install rouge_score"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "558c3e66",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Goal:\n",
|
|
"\n",
|
|
"1. Implement full training from dataloading (dailycnn dataset), to model training, evaluation, etc, using HF. \n",
|
|
"* Right now: stuck on on the fly dataset loading, we don't want to cache because this would take a lot of disk space etc.\n",
|
|
"\n",
|
|
"2. After we get step 1) working, we want to go deeper on every step, so download the dataset and load it as a custom dataset rather than using huggingface simple API, in order to make it more general. Compare with loading the ds as a custom HF dataset or using pytorch class together with lightning. Speed difference? Convenience? Also we want to use the lightning Trainer so see how we can integrate that. And then compare HF to the lightning + hf model approach and see what we like the most."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "624d49ca",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3 (ipykernel)",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.10.9"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|