{ "cells": [ { "cell_type": "markdown", "id": "80517dbc", "metadata": { "id": "80517dbc" }, "source": [ "# Teach an LLM to do additions" ] }, { "cell_type": "markdown", "id": "0aaca18f", "metadata": {}, "source": [ "The goal of this project is to teach an LLM to do additions, playing only with two parts:\n", "* the tokenizer\n", "* the positional embedding\n", "\n", "Both the model and the dataset are fixed.\n", "\n", "You are allowed to tune the hyperparameters, but this is not the main goal. Depending on the quality of your tokenizer and positional embedding, you may change the number of bits. The initial value of 3 is very small." ] }, { "cell_type": "code", "execution_count": 2, "id": "ae993bb9", "metadata": { "id": "ae993bb9" }, "outputs": [], "source": [ "import torch\n", "from torch import nn\n", "from torch.nn import functional as F\n", "\n", "import random\n", "import math\n", "import re\n", "import time" ] }, { "cell_type": "code", "execution_count": 3, "id": "OzGh9ahKF17h", "metadata": { "id": "OzGh9ahKF17h" }, "outputs": [], "source": [ "number_bits = 3\n", "\n", "dataset_size = 64_000\n", "train_proportion = 0.9\n", "\n", "log_interval = 200\n", "batch_size = 64\n", "epochs = 4\n", "learning_rate = 8e-4" ] }, { "cell_type": "markdown", "id": "6c054bed", "metadata": { "id": "6c054bed" }, "source": [ "## Step 1: Construct a tokenizer" ] }, { "cell_type": "code", "execution_count": 4, "id": "t6aC9uNeIR6C", "metadata": { "id": "t6aC9uNeIR6C" }, "outputs": [], "source": [ "pad_token=\"[PAD]\"\n", "eos_token=\"[EOS]\"" ] }, { "cell_type": "markdown", "id": "BMvT0B-MGBnY", "metadata": { "id": "BMvT0B-MGBnY" }, "source": [ "### Baseline: character-level tokenizer" ] }, { "cell_type": "code", "execution_count": 5, "id": "g2QiF-otFur3", "metadata": { "id": "g2QiF-otFur3" }, "outputs": [], "source": [ "class character_level_tokenizer:\n", " \"\"\"\n", " character-level\n", " \"\"\"\n", " def __init__(self):\n", " self.vocab = [str(x) for x in range(10)] + [\"+\", \"=\"] + [pad_token, eos_token]\n", " self.token_to_id = {v : k for k, v in enumerate(self.vocab)}\n", " self.id_to_token = {k : v for k, v in enumerate(self.vocab)}\n", " self.ntokens = len(self.vocab)\n", " self.pattern = f\"[^{re.escape(''.join(self.vocab))}]\"\n", " \n", " def clean(self, text):\n", " \"\"\"\n", " removes all characters not in the vocabulary\n", " \"\"\"\n", " out = re.sub(self.pattern, \"\", text)\n", " return out\n", "\n", " def pre_tokenization(self, text):\n", " \"\"\"\n", " character-level\n", " \"\"\"\n", " return [c for c in text]\n", "\n", " def encode(self, text):\n", " text_list = self.pre_tokenization(self.clean(text))\n", " return [self.token_to_id[c] for c in text_list]\n", "\n", " def decode(self, token_list):\n", " return \"\".join([self.id_to_token[x] for x in token_list])" ] }, { "cell_type": "code", "execution_count": 6, "id": "QuCc6jF5F8hK", "metadata": { "id": "QuCc6jF5F8hK" }, "outputs": [ { "data": { "text/plain": [ "14" ] }, "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "tokenizer = character_level_tokenizer()\n", "ntokens = tokenizer.ntokens\n", "ntokens" ] }, { "cell_type": "code", "execution_count": 7, "id": "8FXW2K-1Jd-P", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "8FXW2K-1Jd-P", "outputId": "349a4033-9fce-462b-f0d5-1bb3a7ffd340" }, "outputs": [ { "data": { "text/plain": [ "([1, 2, 10, 4, 2, 11], '12+42=')" ] }, "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ "prompt = \"12 + 42 =\"\n", "inputs = tokenizer.encode(prompt)\n", "inputs, tokenizer.decode(inputs)" ] }, { "cell_type": "markdown", "id": "j3gckvebGGYt", "metadata": { "id": "j3gckvebGGYt" }, "source": [ "# Implement your tokenizer here!\n", "\n", "You can do anything (as long as you do not compute the addition!).\n", "Some ideas:\n", "* reversing numbers left to right\n", "* arranging by groups (of, 2, 3,...)\n", "* aligning numbers" ] }, { "cell_type": "markdown", "id": "491af297", "metadata": { "id": "491af297" }, "source": [ "## Step 2: Create a dataset for arithmetic operations" ] }, { "cell_type": "code", "execution_count": 8, "id": "daa90f31", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "daa90f31", "outputId": "3e8719ee-d8fa-4984-8b51-4db3457f7dbc" }, "outputs": [ { "data": { "text/plain": [ "('148+114=', '262')" ] }, "execution_count": 8, "metadata": {}, "output_type": "execute_result" } ], "source": [ "def sample_datapoint(number_bits = 3):\n", " \"\"\"\n", " returns a string containing two random numbers on `number_bits` many bits and their sum.\n", " \"\"\"\n", " a_list = [random.randint(0, 9) for _ in range(number_bits)]\n", " b_list = [random.randint(0, 9) for _ in range(number_bits)]\n", " a_int = int(\"\".join([str(x) for x in a_list]))\n", " b_int = int(\"\".join([str(x) for x in b_list]))\n", " sum_int = a_int + b_int\n", " return (str(a_int) + \"+\" + str(b_int) + \"=\", str(sum_int))\n", "\n", "sample_datapoint(3)" ] }, { "cell_type": "code", "execution_count": 9, "id": "b6e861d2", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "b6e861d2", "outputId": "c88c2226-0546-473c-c296-88a52823886b" }, "outputs": [ { "data": { "text/plain": [ "[('34+52=', '86'),\n", " ('382+258=', '640'),\n", " ('926+692=', '1618'),\n", " ('876+405=', '1281')]" ] }, "execution_count": 9, "metadata": {}, "output_type": "execute_result" } ], "source": [ "data = []\n", "for _ in range(dataset_size):\n", " data.append(sample_datapoint(number_bits))\n", "data[:4]" ] }, { "cell_type": "code", "execution_count": 10, "id": "fee85050", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "fee85050", "outputId": "f080f4b0-fd76-48d8-d59f-7c118b6e6fe9" }, "outputs": [ { "data": { "text/plain": [ "(57600, 6400)" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "data_train = data[: int(train_proportion * dataset_size)]\n", "data_test = data[int(train_proportion * dataset_size):]\n", "\n", "len(data_train),len(data_test)" ] }, { "cell_type": "markdown", "id": "37200598", "metadata": { "id": "37200598" }, "source": [ "## Step 3: Construct a model" ] }, { "cell_type": "markdown", "id": "0fd7d2eb", "metadata": {}, "source": [ "### Basline: the classical Positional Embedding" ] }, { "cell_type": "code", "execution_count": 14, "id": "91674239", "metadata": { "id": "91674239" }, "outputs": [], "source": [ "class PositionalEmbedding(nn.Module):\n", " r\"\"\"Inject some information about the relative or absolute position of the tokens in the sequence.\n", " The positional encodings have the same dimension as the embeddings, so that the two can be summed.\n", " Here, we use sine and cosine functions of different frequencies.\n", " .. math:\n", " \\text{PosEmbedder}(pos, 2i) = sin(pos/10000^(2i/d_model))\n", " \\text{PosEmbedder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))\n", " \\text{where pos is the word position and i is the embed idx)\n", " Args:\n", " d_model: the embed dim (required).\n", " dropout: the dropout value (default=0.1).\n", " max_len: the max. length of the incoming sequence (default=5000).\n", " \"\"\"\n", "\n", " def __init__(self, d_model, dropout=0.1, max_len=5000):\n", " super(PositionalEmbedding, self).__init__()\n", " self.dropout = nn.Dropout(p=dropout)\n", "\n", " pe = torch.zeros(max_len, d_model)\n", " position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)\n", " div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))\n", " pe[:, 0::2] = torch.sin(position * div_term)\n", " pe[:, 1::2] = torch.cos(position * div_term)\n", " pe = pe.unsqueeze(0).transpose(0, 1)\n", " self.register_buffer('pe', pe)\n", "\n", " def forward(self, x):\n", " r\"\"\"Inputs of forward function\n", " Args:\n", " x: the sequence fed to the positional encoder model (required).\n", " Shape:\n", " x: [sequence length, batch size, embed dim]\n", " output: [sequence length, batch size, embed dim]\n", " \"\"\"\n", "\n", " x = x + self.pe[:x.size(0), :]\n", " return self.dropout(x)" ] }, { "cell_type": "markdown", "id": "8296ceb2", "metadata": {}, "source": [ "# Implement your positional embedding here!\n", "\n", "You can do anything. Some ideas:\n", "* RoPE\n", "* (randomised) FIRE\n", "* Abacus\n", "\n", "**!!! IMPORTANT !!!** This model of Transformers is \"input first\", meaning that an input is a tensor with shape\n", "(length_prompts, batch_size)" ] }, { "cell_type": "code", "execution_count": 15, "id": "4eb278ab", "metadata": { "id": "4eb278ab" }, "outputs": [], "source": [ "class TransformerModel(nn.Transformer):\n", " def __init__(self, ntoken, ninp, nhead, nhid, nlayers, dropout=0.5):\n", " super(TransformerModel, self).__init__(d_model=ninp,\n", " nhead=nhead,\n", " dim_feedforward=nhid,\n", " num_encoder_layers=nlayers)\n", " self.input_emb = nn.Embedding(ntoken, ninp)\n", " self.pos_encoder = PositionalEmbedding(ninp, dropout)\n", " self.decoder = nn.Linear(ninp, ntoken)\n", "\n", " self.ninp = ninp\n", " self.init_weights()\n", "\n", " def init_weights(self):\n", " initrange = 0.1\n", " nn.init.uniform_(self.input_emb.weight, -initrange, initrange)\n", " nn.init.zeros_(self.decoder.bias)\n", " nn.init.uniform_(self.decoder.weight, -initrange, initrange)\n", "\n", " def _generate_square_subsequent_mask(self, sz):\n", " return torch.log(torch.tril(torch.ones(sz,sz)))\n", "\n", " def forward(self, src):\n", " mask = self._generate_square_subsequent_mask(len(src)).to(device)\n", " self.src_mask = mask\n", "\n", " src = self.input_emb(src) * math.sqrt(self.ninp)\n", " src = self.pos_encoder(src)\n", " output_enc = self.encoder(src, mask=self.src_mask)\n", " output_dec = self.decoder(output_enc)\n", " return F.log_softmax(output_dec, dim=-1), output_enc" ] }, { "cell_type": "code", "execution_count": 16, "id": "42f9d1ee", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "cpu\n" ] } ], "source": [ "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n", "print(device)" ] }, { "cell_type": "markdown", "id": "a30e093a", "metadata": {}, "source": [ "Please do not change these parameters!" ] }, { "cell_type": "code", "execution_count": 17, "id": "1d568cc4", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "1d568cc4", "outputId": "f7f78975-2bdf-4c36-de35-3e140636d476" }, "outputs": [ { "data": { "text/plain": [ "TransformerModel(\n", " (encoder): TransformerEncoder(\n", " (layers): ModuleList(\n", " (0-7): 8 x TransformerEncoderLayer(\n", " (self_attn): MultiheadAttention(\n", " (out_proj): NonDynamicallyQuantizableLinear(in_features=128, out_features=128, bias=True)\n", " )\n", " (linear1): Linear(in_features=128, out_features=64, bias=True)\n", " (dropout): Dropout(p=0.1, inplace=False)\n", " (linear2): Linear(in_features=64, out_features=128, bias=True)\n", " (norm1): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " (norm2): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " (dropout1): Dropout(p=0.1, inplace=False)\n", " (dropout2): Dropout(p=0.1, inplace=False)\n", " )\n", " )\n", " (norm): LayerNorm((128,), eps=1e-05, elementwise_affine=True)\n", " )\n", " (decoder): Linear(in_features=128, out_features=14, bias=True)\n", " (input_emb): Embedding(14, 128)\n", " (pos_encoder): PositionalEmbedding(\n", " (dropout): Dropout(p=0.5, inplace=False)\n", " )\n", ")" ] }, "execution_count": 17, "metadata": {}, "output_type": "execute_result" } ], "source": [ "model = TransformerModel(ntoken = ntokens,\n", " ninp = 128,\n", " nhead = 16,\n", " nhid = 64,\n", " nlayers = 8)\n", "model.to(device)" ] }, { "cell_type": "code", "execution_count": 18, "id": "8f2f06e0", "metadata": { "id": "8f2f06e0" }, "outputs": [], "source": [ "def generate(model, prompts, new_tokens = 5):\n", " input_tensor = prompts # (length_prompts, batch_size)\n", " input_tensor = input_tensor.to(device)\n", " for _ in range(new_tokens):\n", " output, _ = model(input_tensor) # (length_prompts, batch_size, ntokens)\n", " last_output = output[-1,:,:] # (batch_size, ntokens)\n", " token = torch.argmax(last_output, -1).view((1,-1)) # (1, batch_size)\n", " input_tensor = torch.cat((input_tensor, token), 0)\n", " return input_tensor" ] }, { "cell_type": "code", "execution_count": 26, "id": "d76d1b19", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "d76d1b19", "outputId": "a1df1dc9-2ecc-4de4-85b2-6bc5bd460439" }, "outputs": [ { "data": { "text/plain": [ "(tensor([[ 2, 10, 3, 11, 4, 4, 4, 4, 4]]), '2+3=44444')" ] }, "execution_count": 26, "metadata": {}, "output_type": "execute_result" } ], "source": [ "model.eval()\n", "\n", "prompt = \"2+3=\"\n", "prompt_tensor = torch.tensor(tokenizer.encode(prompt)).view((-1,1))\n", "output = generate(model, prompt_tensor).view((1,-1))\n", "output, tokenizer.decode(output.tolist()[0])" ] }, { "cell_type": "code", "execution_count": 29, "id": "00954ddc", "metadata": { "id": "00954ddc" }, "outputs": [], "source": [ "def pad(token_list, type_list = \"prompts\"):\n", " max_length = max([len(x) for x in token_list])\n", " out = []\n", " for x in token_list:\n", " if type_list == \"prompts\":\n", " out.append([tokenizer.token_to_id[pad_token]] * (max_length - len(x)) + x)\n", " if type_list == \"answers\":\n", " out.append(x + [tokenizer.token_to_id[eos_token]] + [tokenizer.token_to_id[pad_token]] * (max_length - len(x)))\n", " return out, max_length" ] }, { "cell_type": "code", "execution_count": 35, "id": "2c84beab", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "2c84beab", "outputId": "fc1bea13-d6e1-4a55-b70d-36de00bcec9b" }, "outputs": [ { "data": { "text/plain": [ "(['[PAD][PAD]1+1=', '21+35='], ['2[EOS][PAD]', '56[EOS]'])" ] }, "execution_count": 35, "metadata": {}, "output_type": "execute_result" } ], "source": [ "prompts = [tokenizer.encode(\"1+1=\"), tokenizer.encode(\"21+35=\")]\n", "answers = [tokenizer.encode(\"2\"), tokenizer.encode(\"56\")]\n", "padded_prompts, _ = pad(prompts, \"prompts\")\n", "padded_answers, _ = pad(answers, \"answers\")\n", "padded_prompts, padded_answers\n", "[tokenizer.decode(p) for p in padded_prompts], [tokenizer.decode(p) for p in padded_answers]" ] }, { "cell_type": "code", "execution_count": 36, "id": "264f9227", "metadata": { "id": "264f9227" }, "outputs": [], "source": [ "def get_batch(split, i):\n", " data = data_train if split == 'train' else data_test\n", " prompts = [tokenizer.encode(data[i][0]) for i in range(i, i + batch_size)]\n", " padded_prompts, length_prompts = pad(prompts, \"prompts\")\n", " answers = [tokenizer.encode(data[i][1]) for i in range(i, i + batch_size)]\n", " padded_answers, length_answers = pad(answers, \"answers\")\n", " X = torch.stack([torch.tensor(x) for x in padded_prompts], 1)\n", " Y = torch.stack([torch.tensor(x) for x in padded_answers], 1)\n", " return X, Y, length_prompts, length_answers" ] }, { "cell_type": "code", "execution_count": 32, "id": "91e281ad", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "91e281ad", "outputId": "22e2d0ee-ede4-41f8-e089-fb63ac2d9787" }, "outputs": [ { "data": { "text/plain": [ "(torch.Size([8, 64]), torch.Size([5, 64]), 8, 4)" ] }, "execution_count": 32, "metadata": {}, "output_type": "execute_result" } ], "source": [ "X, Y, length_prompts, length_answers = get_batch(\"train\", 243)\n", "X.shape, Y.shape, length_prompts, length_answers" ] }, { "cell_type": "markdown", "id": "113e1fd1", "metadata": { "id": "113e1fd1" }, "source": [ "## Step 4: Evaluate" ] }, { "cell_type": "code", "execution_count": 37, "id": "1cfcd10a", "metadata": { "id": "1cfcd10a" }, "outputs": [], "source": [ "def evaluate():\n", " # Turn on evaluation mode disables dropout.\n", " model.eval()\n", " correct = 0.\n", " with torch.no_grad():\n", " for batch, i in enumerate(range(0, len(data_test) - 1, batch_size)):\n", " prompts, target_answers, length_prompts, length_answers = get_batch(\"test\", i)\n", " prompts = prompts.to(device) # (length_prompts, batch_size)\n", " target_answers = target_answers.to(device) # (length_answers + 1, batch_size)\n", " output = generate(model, prompts, length_answers + 1) # (length_prompts + length_answers + 1, batch_size)\n", " answers_tokens = output[length_prompts:, :] # (length_answers + 1, batch_size), contains tokens\n", " equality_test = answers_tokens == target_answers # (length_answers + 1, batch_size), contains boolean values\n", " correct += torch.all(equality_test, axis=0).float().sum()\n", " accuracy = correct / len(data_test)\n", " return accuracy.item()" ] }, { "cell_type": "code", "execution_count": 38, "id": "ac335b05", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "ac335b05", "outputId": "b475e943-51b3-401d-d18b-c9d32a49ffb6" }, "outputs": [ { "data": { "text/plain": [ "0.0" ] }, "execution_count": 38, "metadata": {}, "output_type": "execute_result" } ], "source": [ "evaluate()" ] }, { "cell_type": "markdown", "id": "4c54061a", "metadata": { "id": "4c54061a" }, "source": [ "## Step 4: Train the model" ] }, { "cell_type": "code", "execution_count": 39, "id": "3638a75d", "metadata": { "id": "3638a75d" }, "outputs": [], "source": [ "def train_epoch():\n", " model.train()\n", " optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)\n", " total_loss = 0.\n", " start_time = time.time()\n", " for batch, i in enumerate(range(0, len(data_train) - 1, batch_size)):\n", " prompts, target_answers, length_prompts, length_answers = get_batch(\"train\", i)\n", " prompts = prompts.to(device) # (length_prompts, batch_size)\n", " target_answers = target_answers.to(device) # (length_answers, batch_size)\n", " input_tensor = torch.cat((prompts, target_answers), 0) # (length_prompts + length_answers, batch_size)\n", " model.zero_grad()\n", " output, _ = model(input_tensor) # (length_prompts + length_answers, batch_size, ntokens)\n", " output_answers = output[length_prompts-1:-1,:,:].reshape(-1, ntokens) # (length_answers * batch_size, ntokens)\n", " target_answers = target_answers.view(-1)\n", " loss = F.cross_entropy(output_answers, target_answers)\n", " loss.backward()\n", " optimizer.step()\n", "\n", " total_loss += loss.item()\n", "\n", " if batch % log_interval == 0 and batch > 0:\n", " cur_loss = total_loss / log_interval\n", " elapsed = time.time() - start_time\n", " print('| {:5d}/{:5d} batches | ms/batch {:5.2f} | loss {:5.2f} | perplexity {:8.2f}'.format(batch, len(data_train) // batch_size,\n", " elapsed * 1000 / log_interval, cur_loss, math.exp(cur_loss)))\n", " total_loss = 0\n", " start_time = time.time()\n", "\n", "def train():\n", " best_test_accuracy = None\n", " test_accuracy = evaluate()\n", " print('-' * 89)\n", " print('| initialisation | test accuracy {:5.2f}'.format(test_accuracy))\n", " print('-' * 89)\n", " for epoch in range(1, epochs+1):\n", " epoch_start_time = time.time()\n", " train_epoch()\n", " test_accuracy = evaluate()\n", " print('-' * 89)\n", " print('| end of epoch {:3d} | time: {:5.2f}s | test accuracy {:5.2f}'.format(epoch, (time.time() - epoch_start_time), test_accuracy))\n", " print('-' * 89)\n", " # Save the model if the test accuracy is the best we've seen so far.\n", " if not best_test_accuracy or test_accuracy < best_test_accuracy:\n", " with open(\"arithmetic.pt\", 'wb') as f:\n", " torch.save(model, f)\n", " best_test_accuracy = test_accuracy" ] }, { "cell_type": "code", "execution_count": 40, "id": "4e2a8490", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "4e2a8490", "outputId": "f70dcac2-5891-4266-8748-85df050f4881" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "-----------------------------------------------------------------------------------------\n", "| initialisation | test accuracy 0.00\n", "-----------------------------------------------------------------------------------------\n", "| 200/ 900 batches | ms/batch 66.91 | loss 1.79 | perplexity 6.00\n", "| 400/ 900 batches | ms/batch 81.77 | loss 1.44 | perplexity 4.24\n", "| 600/ 900 batches | ms/batch 68.35 | loss 1.32 | perplexity 3.73\n", "| 800/ 900 batches | ms/batch 67.58 | loss 1.23 | perplexity 3.41\n", "-----------------------------------------------------------------------------------------\n", "| end of epoch 1 | time: 75.38s | test accuracy 0.01\n", "-----------------------------------------------------------------------------------------\n", "| 200/ 900 batches | ms/batch 99.65 | loss 1.17 | perplexity 3.22\n", "| 400/ 900 batches | ms/batch 92.11 | loss 1.13 | perplexity 3.11\n", "| 600/ 900 batches | ms/batch 117.84 | loss 1.12 | perplexity 3.05\n", "| 800/ 900 batches | ms/batch 70.46 | loss 1.11 | perplexity 3.02\n", "-----------------------------------------------------------------------------------------\n", "| end of epoch 2 | time: 88.86s | test accuracy 0.01\n", "-----------------------------------------------------------------------------------------\n", "| 200/ 900 batches | ms/batch 67.21 | loss 1.09 | perplexity 2.98\n", "| 400/ 900 batches | ms/batch 71.27 | loss 1.08 | perplexity 2.94\n", "| 600/ 900 batches | ms/batch 91.96 | loss 1.06 | perplexity 2.90\n", "| 800/ 900 batches | ms/batch 84.17 | loss 1.06 | perplexity 2.90\n", "-----------------------------------------------------------------------------------------\n", "| end of epoch 3 | time: 78.11s | test accuracy 0.01\n", "-----------------------------------------------------------------------------------------\n", "| 200/ 900 batches | ms/batch 84.22 | loss 1.06 | perplexity 2.88\n", "| 400/ 900 batches | ms/batch 68.48 | loss 1.05 | perplexity 2.86\n", "| 600/ 900 batches | ms/batch 85.26 | loss 1.05 | perplexity 2.86\n", "| 800/ 900 batches | ms/batch 76.09 | loss 1.04 | perplexity 2.84\n", "-----------------------------------------------------------------------------------------\n", "| end of epoch 4 | time: 79.32s | test accuracy 0.01\n", "-----------------------------------------------------------------------------------------\n" ] } ], "source": [ "train()" ] }, { "cell_type": "code", "execution_count": 41, "id": "56d9d440", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "56d9d440", "outputId": "1872232b-b120-440b-e1a6-666e079efa3b" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "829+987=1814\t actual result: 1816\n", "591+117=622\t actual result: 708\n", "70+449=121\t actual result: 519\n", "175+490=622\t actual result: 665\n", "148+4=561\t actual result: 152\n", "509+877=1361\t actual result: 1386\n", "657+104=721\t actual result: 761\n", "12+335=411\t actual result: 347\n", "383+163=521\t actual result: 546\n", "430+391=822\t actual result: 821\n", "422+228=622\t actual result: 650\n", "274+579=841\t actual result: 853\n", "747+132=864\t actual result: 879\n", "925+574=1521\t actual result: 1499\n", "724+546=1266\t actual result: 1270\n", "89+488=136\t actual result: 577\n", "9+871=171\t actual result: 880\n", "272+296=511\t actual result: 568\n", "577+399=924\t actual result: 976\n", "697+6=146\t actual result: 703\n" ] } ], "source": [ "model.eval()\n", "\n", "for i in range(20):\n", " prompt, answers = data_test[i]\n", " prompt_tensor = torch.tensor(tokenizer.encode(prompt)).view((-1,1))\n", " output = generate(model, prompt_tensor, len(answers)).view((1,-1))\n", " print(tokenizer.decode(output.tolist()[0]) + \"\\t actual result: \" + answers)" ] }, { "cell_type": "markdown", "id": "qJ9IOZu8Xo4Y", "metadata": { "id": "qJ9IOZu8Xo4Y" }, "source": [ "## Probing" ] }, { "cell_type": "markdown", "id": "78be1213", "metadata": {}, "source": [ "This is just for fun..." ] }, { "cell_type": "code", "execution_count": 42, "id": "yomPfirhXkLb", "metadata": { "id": "yomPfirhXkLb" }, "outputs": [], "source": [ "import numpy as np\n", "\n", "train_size = 1000\n", "test_size = 100\n", "\n", "model.eval()\n", "\n", "def data_probing(size):\n", " X = []\n", " y = np.zeros(size)\n", " for i in range(size):\n", " input = torch.tensor(tokenizer.encode(data[i][0])).view((-1, 1)).to(device)\n", " _, output = model(input)\n", " output = output[-1,:,:].flatten()\n", " # determine whether there was a carry in the result:\n", " carry = len(data[i][1]) > len(data[i][0]) / 2\n", " X.append(output.cpu().detach().numpy())\n", " y[i] = carry\n", " return np.array(X), y" ] }, { "cell_type": "code", "execution_count": 43, "id": "QGmfXVxkppfP", "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "QGmfXVxkppfP", "outputId": "6601c884-004f-40bb-8a1a-71995b17d860" }, "outputs": [ { "data": { "text/plain": [ "1.0" ] }, "execution_count": 43, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from sklearn.linear_model import LogisticRegression\n", "from sklearn.preprocessing import StandardScaler\n", "\n", "X_train, y_train = data_probing(train_size)\n", "X_test, y_test = data_probing(test_size)\n", "\n", "scaler = StandardScaler()\n", "X_train = scaler.fit_transform(X_train)\n", "X_test = scaler.fit_transform(X_test)\n", "\n", "reg = LogisticRegression()\n", "reg.fit(X_train,y_train)\n", "reg.score(X_test, y_test)" ] } ], "metadata": { "accelerator": "GPU", "colab": { "gpuType": "T4", "provenance": [] }, "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.12" } }, "nbformat": 4, "nbformat_minor": 5 }