{ "cells": [ { "cell_type": "code", "execution_count": 2, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "zJvyUmZdktu0", "outputId": "25275dd8-4787-4b77-f7bf-4f2bbe66f0b8" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "/workspace/axolotl\n" ] } ], "source": [ "%cd /workspace/axolotl" ] }, { "cell_type": "code", "execution_count": 2, "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "mC48y25Lkqa5", "outputId": "2757a3c8-3790-4fd3-be39-03be8f533b35" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Setting ds_accelerator to cuda (auto detect)\n", "accelerate configuration saved at /root/.cache/huggingface/accelerate/default_config.yaml\n" ] } ], "source": [ "!accelerate config --config_file configs/accelerate/default_config.yaml default" ] }, { "cell_type": "code", "execution_count": 5, "metadata": { "scrolled": true }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "# Based on https://gist.github.com/fearnworks/723709806cebc67bafe1eb8138e7efbd\n", "base_model: huggyllama/llama-7b\n", "base_model_config: huggyllama/llama-7b\n", "model_type: LlamaForCausalLM\n", "tokenizer_type: LlamaTokenizer\n", "load_in_8bit: false\n", "load_in_4bit: true\n", "strict: false\n", "push_dataset_to_hub:\n", "datasets:\n", " # - path: AtlasUnified/Code-Instruct-Sets\n", " # data_files:\n", " # - unmasked-set-1.jsonl\n", " # - unmasked-set-2.jsonl\n", " # - unmasked-set-3.jsonl\n", " # - unmasked-set-4.jsonl\n", " # type: alpaca_code_instruct\n", " # - path: winglian/pygmalion-cleaned\n", " # data_files:\n", " # - v13_no_ai.cleaned.jsonl\n", " # type: pygmalion\n", " # shards: 4\n", " # - path: winglian/evals\n", " # data_files:\n", " # - hf/ARC-Challenge.jsonl\n", " # - hf/ARC-Easy.jsonl\n", " # - hf/riddle_sense.jsonl\n", " # type: explainchoice:chat\n", " # - path: winglian/evals\n", " # data_files:\n", " # - hf/gsm8k.jsonl\n", " # - custom/logic_inference_oa.jsonl\n", " # type: alpaca_chat.load_qa\n", " # - path: winglian/evals\n", " # data_files:\n", " # - custom/in_context_qa.jsonl\n", " # type: context_qa\n", " # - path: winglian/evals\n", " # data_files:\n", " # - custom/in_context_qa.jsonl\n", " # type: context_qa.load_404\n", " # - path: winglian/evals\n", " # data_files:\n", " # - custom/jokes_explained_500up.jsonl\n", " # type: sharegpt_jokes\n", " # - path: winglian/evals\n", " # data_files:\n", " # - custom/classify-self-chat.sharegpt.jsonl\n", " # - custom/coding-self-chat.sharegpt.jsonl\n", " # - custom/prose-gpt4.sharegpt.jsonl\n", " # - custom/prose-rewrite-gpt4.sharegpt.jsonl\n", " # type: sharegpt_simple\n", " # - path: winglian/evals\n", " # data_files:\n", " # - custom/guanaco-cleaned.en.jsonl\n", " # type: sharegpt_simple.load_guanaco\n", " # - path: winglian/evals\n", " # data_files:\n", " # - openai/tldr.jsonl\n", " # type: summarizetldr:chat\n", " # - path: winglian/evals\n", " # data_files:\n", " # - hellaswag/hellaswag.jsonl\n", " # type: explainchoice:chat\n", " # shards: 60\n", " # - path: metaeval/ScienceQA_text_only\n", " # type: concisechoice:chat\n", " # shards: 13\n", " # - path: teknium/GPTeacher-General-Instruct\n", " # data_files: \n", " # - gpt4-instruct-similarity-0.6-dataset.json\n", " # type: gpteacher:chat\n", " - path: QingyiSi/Alpaca-CoT\n", " data_files:\n", " # - chain-of-thought/formatted_cot_data/aqua_train.jsonl\n", " # - Chain-of-Thought/formatted_cot_data/creak_train.json\n", " # - Chain-of-Thought/formatted_cot_data/ecqa_train.json\n", " # - Chain-of-Thought/formatted_cot_data/esnli_train.json\n", " - Chain-of-Thought/formatted_cot_data/gsm8k_train.json\n", " # - Chain-of-Thought/formatted_cot_data/qasc_train.json\n", " # - Chain-of-Thought/formatted_cot_data/qed_train.json\n", " # - Chain-of-Thought/formatted_cot_data/sensemaking_train.json\n", " # - Chain-of-Thought/formatted_cot_data/strategyqa_train.json\n", " # - GPTeacher/Roleplay/formatted_roleplay-similarity_0.6-instruct-dataset.json\n", " type: \"alpaca:chat\"\n", "dataset_prepared_path: last_run_prepared\n", "val_set_size: 0.01\n", "adapter: qlora\n", "lora_model_dir:\n", "sequence_len: 2048\n", "max_packed_sequence_len: 2048\n", "lora_r: 64\n", "lora_alpha: 16\n", "lora_dropout: 0.05\n", "lora_target_modules:\n", "lora_target_linear: true\n", "lora_fan_in_fan_out:\n", "wandb_project: huggyllama-qlora\n", "wandb_watch:\n", "wandb_run_id:\n", "wandb_log_model: checkpoint\n", "output_dir: ./qlora-out\n", "batch_size: 16\n", "micro_batch_size: 4\n", "num_epochs: 3\n", "optimizer: paged_adamw_32bit\n", "torchdistx_path:\n", "lr_scheduler: cosine\n", "learning_rate: 0.0002\n", "train_on_inputs: false\n", "group_by_length: false\n", "bf16: true\n", "fp16: false\n", "tf32: true\n", "gradient_checkpointing: true\n", "# stop training after this many evaluation losses have increased in a row\n", "# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback\n", "early_stopping_patience: 3\n", "resume_from_checkpoint:\n", "auto_resume_from_checkpoints: true\n", "local_rank:\n", "logging_steps: 1\n", "xformers_attention: false\n", "flash_attention:\n", "gptq_groupsize:\n", "gptq_model_v1:\n", "warmup_steps: 10\n", "eval_steps: 5\n", "save_steps: 10\n", "debug:\n", "deepspeed:\n", "weight_decay: 0.000001\n", "fsdp:\n", "fsdp_config:\n", "special_tokens:\n", " bos_token: \"\"\n", " eos_token: \"\"\n", " unk_token: \"\"" ] } ], "source": [ "!cat examples/huggyllama/qlora.yml" ] }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "jICMPJuomFsx" }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Setting ds_accelerator to cuda (auto detect)\n", "\n", "===================================BUG REPORT===================================\n", "Welcome to bitsandbytes. For bug reports, please run\n", "\n", "python -m bitsandbytes\n", "\n", " and submit this information together with your error trace to: https://github.com/TimDettmers/bitsandbytes/issues\n", "================================================================================\n", "bin /root/miniconda3/envs/py3.9/lib/python3.9/site-packages/bitsandbytes-0.39.0-py3.9.egg/bitsandbytes/libbitsandbytes_cuda118.so\n", "/root/miniconda3/envs/py3.9/lib/python3.9/site-packages/bitsandbytes-0.39.0-py3.9.egg/bitsandbytes/cuda_setup/main.py:149: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('/usr/local/nvidia/lib'), PosixPath('/usr/local/nvidia/lib64')}\n", " warn(msg)\n", "/root/miniconda3/envs/py3.9/lib/python3.9/site-packages/bitsandbytes-0.39.0-py3.9.egg/bitsandbytes/cuda_setup/main.py:149: UserWarning: /usr/local/nvidia/lib:/usr/local/nvidia/lib64 did not contain ['libcudart.so', 'libcudart.so.11.0', 'libcudart.so.12.0'] as expected! Searching further paths...\n", " warn(msg)\n", "/root/miniconda3/envs/py3.9/lib/python3.9/site-packages/bitsandbytes-0.39.0-py3.9.egg/bitsandbytes/cuda_setup/main.py:149: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('module'), PosixPath('//matplotlib_inline.backend_inline')}\n", " warn(msg)\n", "/root/miniconda3/envs/py3.9/lib/python3.9/site-packages/bitsandbytes-0.39.0-py3.9.egg/bitsandbytes/cuda_setup/main.py:149: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIK1tFOFrWbmoa2ckCJYhzgBHKTSMeR/AeuScCCzugqlI utensilcandel@gmail.com')}\n", " warn(msg)\n", "CUDA_SETUP: WARNING! libcudart.so not found in any environmental path. Searching in backup paths...\n", "/root/miniconda3/envs/py3.9/lib/python3.9/site-packages/bitsandbytes-0.39.0-py3.9.egg/bitsandbytes/cuda_setup/main.py:149: UserWarning: Found duplicate ['libcudart.so', 'libcudart.so.11.0', 'libcudart.so.12.0'] files: {PosixPath('/usr/local/cuda/lib64/libcudart.so.11.0'), PosixPath('/usr/local/cuda/lib64/libcudart.so')}.. We'll flip a coin and try one of these, in order to fail forward.\n", "Either way, this might cause trouble in the future:\n", "If you get `CUDA error: invalid device function` errors, the above might be the cause and the solution is to make sure only one ['libcudart.so', 'libcudart.so.11.0', 'libcudart.so.12.0'] in the paths that we search based on your env.\n", " warn(msg)\n", "CUDA SETUP: CUDA runtime path found: /usr/local/cuda/lib64/libcudart.so.11.0\n", "CUDA SETUP: Highest compute capability among GPUs detected: 8.6\n", "CUDA SETUP: Detected CUDA version 118\n", "CUDA SETUP: Loading binary /root/miniconda3/envs/py3.9/lib/python3.9/site-packages/bitsandbytes-0.39.0-py3.9.egg/bitsandbytes/libbitsandbytes_cuda118.so...\n", "Setting ds_accelerator to cuda (auto detect)\n", "INFO:root:loading tokenizer...\n", "Using pad_token, but it is not set yet.\n", "INFO:root:Loading prepared packed dataset from disk at last_run_prepared/21a0611c6c2b67b31f00097fa2a91c26...\n", "INFO:root:Prepared packed dataset loaded from disk...\n", "INFO:root:loading model and peft_config...\n", "Loading checkpoint shards: 100%|██████████████████| 2/2 [00:19<00:00, 9.86s/it]\n", "INFO:root:converting PEFT model w/ prepare_model_for_int8_training\n", "/root/miniconda3/envs/py3.9/lib/python3.9/site-packages/peft/utils/other.py:76: FutureWarning: prepare_model_for_int8_training is deprecated and will be removed in a future version. Use prepare_model_for_kbit_training instead.\n", " warnings.warn(\n", "INFO:root:found linear modules: ['v_proj', 'k_proj', 'gate_proj', 'q_proj', 'o_proj', 'down_proj', 'up_proj']\n", "trainable params: 159907840 || all params: 3660320768 || trainable%: 4.368683788535114\n", "INFO:root:Compiling torch model\n", "INFO:root:Pre-saving adapter config to ./qlora-out\n", "INFO:root:Starting trainer...\n", "INFO:root:Using Auto-resume functionality to start with checkpoint at qlora-out/checkpoint-130\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mutensil\u001b[0m. Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Tracking run with wandb version 0.15.3\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Run data is saved locally in \u001b[35m\u001b[1m/workspace/axolotl/wandb/run-20230531_121630-p5lvijpv\u001b[0m\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Run \u001b[1m`wandb offline`\u001b[0m to turn off syncing.\n", "\u001b[34m\u001b[1mwandb\u001b[0m: Syncing run \u001b[33msummer-gorge-6\u001b[0m\n", "\u001b[34m\u001b[1mwandb\u001b[0m: ⭐️ View project at \u001b[34m\u001b[4mhttps://wandb.ai/utensil/huggyllama-qlora\u001b[0m\n", "\u001b[34m\u001b[1mwandb\u001b[0m: 🚀 View run at \u001b[34m\u001b[4mhttps://wandb.ai/utensil/huggyllama-qlora/runs/p5lvijpv\u001b[0m\n", "{'loss': 0.4474, 'learning_rate': 8.952245334118414e-06, 'epoch': 2.62} \n", "{'loss': 0.4717, 'learning_rate': 8.047222744854943e-06, 'epoch': 2.64} \n", "{'loss': 0.4533, 'learning_rate': 7.1885011480961164e-06, 'epoch': 2.66} \n", "{'loss': 0.4353, 'learning_rate': 6.37651293602628e-06, 'epoch': 2.68} \n", "{'loss': 0.4545, 'learning_rate': 5.611666969163243e-06, 'epoch': 2.7} \n", " 90%|████████████████████████████████████▉ | 135/150 [03:37<00:53, 3.56s/it]\n", " 0%| | 0/3 [00:00