{ "cells": [ { "cell_type": "markdown", "metadata": { "id": "Pdval7tUZwdZ" }, "source": [ "# Training a tokenizer for code" ] }, { "cell_type": "markdown", "metadata": { "id": "CorBhMaiZwdb" }, "source": [ "Install the Transformers, Datasets, and Evaluate libraries to run this notebook." ] }, { "cell_type": "code", "execution_count": 1, "metadata": { "id": "CPGVVOEHZwdb" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Defaulting to user installation because normal site-packages is not writeable\n", "Requirement already satisfied: datasets in /home/nathanael/.local/lib/python3.10/site-packages (3.1.0)\n", "Requirement already satisfied: evaluate in /home/nathanael/.local/lib/python3.10/site-packages (0.4.3)\n", "Requirement already satisfied: transformers[sentencepiece] in /home/nathanael/.local/lib/python3.10/site-packages (4.41.2)\n", "Requirement already satisfied: filelock in /home/nathanael/.local/lib/python3.10/site-packages (from datasets) (3.13.1)\n", "Requirement already satisfied: numpy>=1.17 in /home/nathanael/.local/lib/python3.10/site-packages (from datasets) (1.26.4)\n", "Requirement already satisfied: pyarrow>=15.0.0 in /home/nathanael/.local/lib/python3.10/site-packages (from datasets) (18.0.0)\n", "Requirement already satisfied: dill<0.3.9,>=0.3.0 in /home/nathanael/.local/lib/python3.10/site-packages (from datasets) (0.3.8)\n", "Requirement already satisfied: pandas in /home/nathanael/.local/lib/python3.10/site-packages (from datasets) (1.5.0)\n", "Requirement already satisfied: requests>=2.32.2 in /home/nathanael/.local/lib/python3.10/site-packages (from datasets) (2.32.3)\n", "Requirement already satisfied: tqdm>=4.66.3 in /home/nathanael/.local/lib/python3.10/site-packages (from datasets) (4.67.0)\n", "Requirement already satisfied: xxhash in /home/nathanael/.local/lib/python3.10/site-packages (from datasets) (3.5.0)\n", "Requirement already satisfied: multiprocess<0.70.17 in /home/nathanael/.local/lib/python3.10/site-packages (from datasets) (0.70.16)\n", "Requirement already satisfied: fsspec<=2024.9.0,>=2023.1.0 in /home/nathanael/.local/lib/python3.10/site-packages (from fsspec[http]<=2024.9.0,>=2023.1.0->datasets) (2024.2.0)\n", "Requirement already satisfied: aiohttp in /home/nathanael/.local/lib/python3.10/site-packages (from datasets) (3.9.3)\n", "Requirement already satisfied: huggingface-hub>=0.23.0 in /home/nathanael/.local/lib/python3.10/site-packages (from datasets) (0.26.2)\n", "Requirement already satisfied: packaging in /home/nathanael/.local/lib/python3.10/site-packages (from datasets) (23.2)\n", "Requirement already satisfied: pyyaml>=5.1 in /home/nathanael/.local/lib/python3.10/site-packages (from datasets) (6.0.1)\n", "Requirement already satisfied: regex!=2019.12.17 in /home/nathanael/.local/lib/python3.10/site-packages (from transformers[sentencepiece]) (2023.3.23)\n", "Requirement already satisfied: tokenizers<0.20,>=0.19 in /home/nathanael/.local/lib/python3.10/site-packages (from transformers[sentencepiece]) (0.19.1)\n", "Requirement already satisfied: safetensors>=0.4.1 in /home/nathanael/.local/lib/python3.10/site-packages (from transformers[sentencepiece]) (0.4.3)\n", "Requirement already satisfied: sentencepiece!=0.1.92,>=0.1.91 in /home/nathanael/.local/lib/python3.10/site-packages (from transformers[sentencepiece]) (0.2.0)\n", "Requirement already satisfied: protobuf in /home/nathanael/.local/lib/python3.10/site-packages (from transformers[sentencepiece]) (4.25.3)\n", "Requirement already satisfied: aiosignal>=1.1.2 in /home/nathanael/.local/lib/python3.10/site-packages (from aiohttp->datasets) (1.3.1)\n", "Requirement already satisfied: attrs>=17.3.0 in /home/nathanael/.local/lib/python3.10/site-packages (from aiohttp->datasets) (23.2.0)\n", "Requirement already satisfied: frozenlist>=1.1.1 in /home/nathanael/.local/lib/python3.10/site-packages (from aiohttp->datasets) (1.4.1)\n", "Requirement already satisfied: multidict<7.0,>=4.5 in /home/nathanael/.local/lib/python3.10/site-packages (from aiohttp->datasets) (6.0.5)\n", "Requirement already satisfied: yarl<2.0,>=1.0 in /home/nathanael/.local/lib/python3.10/site-packages (from aiohttp->datasets) (1.9.4)\n", "Requirement already satisfied: async-timeout<5.0,>=4.0 in /home/nathanael/.local/lib/python3.10/site-packages (from aiohttp->datasets) (4.0.3)\n", "Requirement already satisfied: typing-extensions>=3.7.4.3 in /home/nathanael/.local/lib/python3.10/site-packages (from huggingface-hub>=0.23.0->datasets) (4.12.2)\n", "Requirement already satisfied: charset-normalizer<4,>=2 in /home/nathanael/.local/lib/python3.10/site-packages (from requests>=2.32.2->datasets) (3.3.2)\n", "Requirement already satisfied: idna<4,>=2.5 in /usr/lib/python3/dist-packages (from requests>=2.32.2->datasets) (3.3)\n", "Requirement already satisfied: urllib3<3,>=1.21.1 in /home/nathanael/.local/lib/python3.10/site-packages (from requests>=2.32.2->datasets) (2.3.0)\n", "Requirement already satisfied: certifi>=2017.4.17 in /home/nathanael/.local/lib/python3.10/site-packages (from requests>=2.32.2->datasets) (2024.2.2)\n", "Requirement already satisfied: python-dateutil>=2.8.1 in /home/nathanael/.local/lib/python3.10/site-packages (from pandas->datasets) (2.8.2)\n", "Requirement already satisfied: pytz>=2020.1 in /usr/lib/python3/dist-packages (from pandas->datasets) (2022.1)\n", "Requirement already satisfied: six>=1.5 in /usr/lib/python3/dist-packages (from python-dateutil>=2.8.1->pandas->datasets) (1.16.0)\n" ] } ], "source": [ "!pip install datasets evaluate transformers[sentencepiece]" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "The goal is to train a tokenizer for code.\n", "We'll use this dataset, consisting of pairs (comment, code):\n", "https://huggingface.co/datasets/code-search-net/code_search_net" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "id": "mR5Hw12jZwde", "scrolled": true }, "outputs": [ { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "d2450fa1a45f4f1f919a60a67ec2a8c6", "version_major": 2, "version_minor": 0 }, "text/plain": [ "README.md: 0%| | 0.00/12.9k [00:00', 'eos_token': '<|endoftext|>', 'unk_token': '<|endoftext|>'}, clean_up_tokenization_spaces=True), added_tokens_decoder={\n", "\t50256: AddedToken(\"<|endoftext|>\", rstrip=False, lstrip=False, single_word=False, normalized=True, special=True),\n", "}" ] }, "execution_count": 1, "metadata": {}, "output_type": "execute_result" } ], "source": [ "from transformers import AutoTokenizer\n", "\n", "old_tokenizer = AutoTokenizer.from_pretrained(\"gpt2\")\n", "old_tokenizer" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Question 2: Figure out how many tokens `old_tokenizer` uses" ] }, { "cell_type": "code", "execution_count": 20, "metadata": { "id": "IuZAgGY3Zwdi", "outputId": "5994892c-1389-4cdf-e817-a00911581c30" }, "outputs": [ { "data": { "text/plain": [ "['def',\n", " 'Ġadd',\n", " '_',\n", " 'n',\n", " 'umbers',\n", " '(',\n", " 'a',\n", " ',',\n", " 'Ġb',\n", " '):',\n", " 'Ċ',\n", " 'Ġ',\n", " 'Ġ',\n", " 'Ġ',\n", " 'Ġ\"\"\"',\n", " 'Add',\n", " 'Ġthe',\n", " 'Ġtwo',\n", " 'Ġnumbers',\n", " 'Ġ`',\n", " 'a',\n", " '`',\n", " 'Ġand',\n", " 'Ġ`',\n", " 'b',\n", " '`',\n", " '.\"',\n", " '\"\"',\n", " 'Ċ',\n", " 'Ġ',\n", " 'Ġ',\n", " 'Ġ',\n", " 'Ġreturn',\n", " 'Ġa',\n", " 'Ġ+',\n", " 'Ġb']" ] }, "execution_count": 20, "metadata": {}, "output_type": "execute_result" } ], "source": [ "example = '''def add_numbers(a, b):\n", " \"\"\"Add the two numbers `a` and `b`.\"\"\"\n", " return a + b'''\n", "\n", "tokens = old_tokenizer.tokenize(example)\n", "tokens" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Question 3:\n", "* What do the special symbols Ġ and Ċ denote?\n", "* Why is this tokenization not optimal?" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "We train a new tokenizer on our training corpus:" ] }, { "cell_type": "code", "execution_count": 21, "metadata": { "id": "IZoDNfuZZwdi" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "\n", "\n" ] } ], "source": [ "tokenizer = old_tokenizer.train_new_from_iterator(training_corpus, vocab_size=52000)" ] }, { "cell_type": "code", "execution_count": 22, "metadata": { "id": "WtFHzYclZwdj", "outputId": "ed4dae8b-71e6-4b11-f083-b37f7adb741d" }, "outputs": [ { "data": { "text/plain": [ "['def',\n", " 'Ġadd',\n", " '_',\n", " 'numbers',\n", " '(',\n", " 'a',\n", " ',',\n", " 'Ġb',\n", " '):',\n", " 'ĊĠĠĠ',\n", " 'Ġ\"\"\"',\n", " 'Add',\n", " 'Ġthe',\n", " 'Ġtwo',\n", " 'Ġnumbers',\n", " 'Ġ`',\n", " 'a',\n", " '`',\n", " 'Ġand',\n", " 'Ġ`',\n", " 'b',\n", " '`.\"\"\"',\n", " 'ĊĠĠĠ',\n", " 'Ġreturn',\n", " 'Ġa',\n", " 'Ġ+',\n", " 'Ġb']" ] }, "execution_count": 22, "metadata": {}, "output_type": "execute_result" } ], "source": [ "tokens = tokenizer.tokenize(example)\n", "tokens" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Question 4: Why is this tokenization better?" ] }, { "cell_type": "code", "execution_count": 23, "metadata": { "id": "cDYv9wXdZwdj", "outputId": "523ecca3-932a-47e3-a276-674deecbc0ea" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "27\n", "36\n" ] } ], "source": [ "print(len(tokens))\n", "print(len(old_tokenizer.tokenize(example)))" ] }, { "cell_type": "code", "execution_count": 24, "metadata": { "id": "2O5-ovFQZwdj", "outputId": "f5c38c35-d692-43f0-bdbc-61f4b7be1437" }, "outputs": [ { "data": { "text/plain": [ "['class',\n", " 'ĠLinear',\n", " 'Layer',\n", " '():',\n", " 'ĊĠĠĠ',\n", " 'Ġdef',\n", " 'Ġ__',\n", " 'init',\n", " '__(',\n", " 'self',\n", " ',',\n", " 'Ġinput',\n", " '_',\n", " 'size',\n", " ',',\n", " 'Ġoutput',\n", " '_',\n", " 'size',\n", " '):',\n", " 'ĊĠĠĠĠĠĠĠ',\n", " 'Ġself',\n", " '.',\n", " 'weight',\n", " 'Ġ=',\n", " 'Ġtorch',\n", " '.',\n", " 'randn',\n", " '(',\n", " 'input',\n", " '_',\n", " 'size',\n", " ',',\n", " 'Ġoutput',\n", " '_',\n", " 'size',\n", " ')',\n", " 'ĊĠĠĠĠĠĠĠ',\n", " 'Ġself',\n", " '.',\n", " 'bias',\n", " 'Ġ=',\n", " 'Ġtorch',\n", " '.',\n", " 'zeros',\n", " '(',\n", " 'output',\n", " '_',\n", " 'size',\n", " ')',\n", " 'ĊĊĠĠĠ',\n", " 'Ġdef',\n", " 'Ġ__',\n", " 'call',\n", " '__(',\n", " 'self',\n", " ',',\n", " 'Ġx',\n", " '):',\n", " 'ĊĠĠĠĠĠĠĠ',\n", " 'Ġreturn',\n", " 'Ġx',\n", " 'Ġ@',\n", " 'Ġself',\n", " '.',\n", " 'weights',\n", " 'Ġ+',\n", " 'Ġself',\n", " '.',\n", " 'bias',\n", " 'ĊĠĠĠĠ']" ] }, "execution_count": 24, "metadata": {}, "output_type": "execute_result" } ], "source": [ "example = \"\"\"class LinearLayer():\n", " def __init__(self, input_size, output_size):\n", " self.weight = torch.randn(input_size, output_size)\n", " self.bias = torch.zeros(output_size)\n", "\n", " def __call__(self, x):\n", " return x @ self.weights + self.bias\n", " \"\"\"\n", "tokenizer.tokenize(example)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "Question 5: Further evidence that this tokenization is better?" ] } ], "metadata": { "colab": { "name": "Training a new tokenizer from an old one", "provenance": [] }, "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.12" } }, "nbformat": 4, "nbformat_minor": 1 }