{
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "zJvyUmZdktu0",
"outputId": "25275dd8-4787-4b77-f7bf-4f2bbe66f0b8"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"/workspace/axolotl\n"
]
}
],
"source": [
"%cd /workspace/axolotl"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "mC48y25Lkqa5",
"outputId": "2757a3c8-3790-4fd3-be39-03be8f533b35"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Setting ds_accelerator to cuda (auto detect)\n",
"accelerate configuration saved at /root/.cache/huggingface/accelerate/default_config.yaml\n"
]
}
],
"source": [
"!accelerate config --config_file configs/accelerate/default_config.yaml default"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"# Based on https://gist.github.com/fearnworks/723709806cebc67bafe1eb8138e7efbd\n",
"base_model: huggyllama/llama-7b\n",
"base_model_config: huggyllama/llama-7b\n",
"model_type: LlamaForCausalLM\n",
"tokenizer_type: LlamaTokenizer\n",
"load_in_8bit: false\n",
"load_in_4bit: true\n",
"strict: false\n",
"push_dataset_to_hub:\n",
"datasets:\n",
" # - path: AtlasUnified/Code-Instruct-Sets\n",
" # data_files:\n",
" # - unmasked-set-1.jsonl\n",
" # - unmasked-set-2.jsonl\n",
" # - unmasked-set-3.jsonl\n",
" # - unmasked-set-4.jsonl\n",
" # type: alpaca_code_instruct\n",
" # - path: winglian/pygmalion-cleaned\n",
" # data_files:\n",
" # - v13_no_ai.cleaned.jsonl\n",
" # type: pygmalion\n",
" # shards: 4\n",
" # - path: winglian/evals\n",
" # data_files:\n",
" # - hf/ARC-Challenge.jsonl\n",
" # - hf/ARC-Easy.jsonl\n",
" # - hf/riddle_sense.jsonl\n",
" # type: explainchoice:chat\n",
" # - path: winglian/evals\n",
" # data_files:\n",
" # - hf/gsm8k.jsonl\n",
" # - custom/logic_inference_oa.jsonl\n",
" # type: alpaca_chat.load_qa\n",
" # - path: winglian/evals\n",
" # data_files:\n",
" # - custom/in_context_qa.jsonl\n",
" # type: context_qa\n",
" # - path: winglian/evals\n",
" # data_files:\n",
" # - custom/in_context_qa.jsonl\n",
" # type: context_qa.load_404\n",
" # - path: winglian/evals\n",
" # data_files:\n",
" # - custom/jokes_explained_500up.jsonl\n",
" # type: sharegpt_jokes\n",
" # - path: winglian/evals\n",
" # data_files:\n",
" # - custom/classify-self-chat.sharegpt.jsonl\n",
" # - custom/coding-self-chat.sharegpt.jsonl\n",
" # - custom/prose-gpt4.sharegpt.jsonl\n",
" # - custom/prose-rewrite-gpt4.sharegpt.jsonl\n",
" # type: sharegpt_simple\n",
" # - path: winglian/evals\n",
" # data_files:\n",
" # - custom/guanaco-cleaned.en.jsonl\n",
" # type: sharegpt_simple.load_guanaco\n",
" # - path: winglian/evals\n",
" # data_files:\n",
" # - openai/tldr.jsonl\n",
" # type: summarizetldr:chat\n",
" # - path: winglian/evals\n",
" # data_files:\n",
" # - hellaswag/hellaswag.jsonl\n",
" # type: explainchoice:chat\n",
" # shards: 60\n",
" # - path: metaeval/ScienceQA_text_only\n",
" # type: concisechoice:chat\n",
" # shards: 13\n",
" # - path: teknium/GPTeacher-General-Instruct\n",
" # data_files: \n",
" # - gpt4-instruct-similarity-0.6-dataset.json\n",
" # type: gpteacher:chat\n",
" - path: QingyiSi/Alpaca-CoT\n",
" data_files:\n",
" # - chain-of-thought/formatted_cot_data/aqua_train.jsonl\n",
" # - Chain-of-Thought/formatted_cot_data/creak_train.json\n",
" # - Chain-of-Thought/formatted_cot_data/ecqa_train.json\n",
" # - Chain-of-Thought/formatted_cot_data/esnli_train.json\n",
" - Chain-of-Thought/formatted_cot_data/gsm8k_train.json\n",
" # - Chain-of-Thought/formatted_cot_data/qasc_train.json\n",
" # - Chain-of-Thought/formatted_cot_data/qed_train.json\n",
" # - Chain-of-Thought/formatted_cot_data/sensemaking_train.json\n",
" # - Chain-of-Thought/formatted_cot_data/strategyqa_train.json\n",
" # - GPTeacher/Roleplay/formatted_roleplay-similarity_0.6-instruct-dataset.json\n",
" type: \"alpaca:chat\"\n",
"dataset_prepared_path: last_run_prepared\n",
"val_set_size: 0.01\n",
"adapter: qlora\n",
"lora_model_dir:\n",
"sequence_len: 2048\n",
"max_packed_sequence_len: 2048\n",
"lora_r: 64\n",
"lora_alpha: 16\n",
"lora_dropout: 0.05\n",
"lora_target_modules:\n",
"lora_target_linear: true\n",
"lora_fan_in_fan_out:\n",
"wandb_project: huggyllama-qlora\n",
"wandb_watch:\n",
"wandb_run_id:\n",
"wandb_log_model: checkpoint\n",
"output_dir: ./qlora-out\n",
"batch_size: 16\n",
"micro_batch_size: 4\n",
"num_epochs: 3\n",
"optimizer: paged_adamw_32bit\n",
"torchdistx_path:\n",
"lr_scheduler: cosine\n",
"learning_rate: 0.0002\n",
"train_on_inputs: false\n",
"group_by_length: false\n",
"bf16: true\n",
"fp16: false\n",
"tf32: true\n",
"gradient_checkpointing: true\n",
"# stop training after this many evaluation losses have increased in a row\n",
"# https://huggingface.co/transformers/v4.2.2/_modules/transformers/trainer_callback.html#EarlyStoppingCallback\n",
"early_stopping_patience: 3\n",
"resume_from_checkpoint:\n",
"auto_resume_from_checkpoints: true\n",
"local_rank:\n",
"logging_steps: 1\n",
"xformers_attention: false\n",
"flash_attention:\n",
"gptq_groupsize:\n",
"gptq_model_v1:\n",
"warmup_steps: 10\n",
"eval_steps: 5\n",
"save_steps: 10\n",
"debug:\n",
"deepspeed:\n",
"weight_decay: 0.000001\n",
"fsdp:\n",
"fsdp_config:\n",
"special_tokens:\n",
" bos_token: \"\"\n",
" eos_token: \"\"\n",
" unk_token: \"\""
]
}
],
"source": [
"!cat examples/huggyllama/qlora.yml"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "jICMPJuomFsx"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Setting ds_accelerator to cuda (auto detect)\n",
"\n",
"===================================BUG REPORT===================================\n",
"Welcome to bitsandbytes. For bug reports, please run\n",
"\n",
"python -m bitsandbytes\n",
"\n",
" and submit this information together with your error trace to: https://github.com/TimDettmers/bitsandbytes/issues\n",
"================================================================================\n",
"bin /root/miniconda3/envs/py3.9/lib/python3.9/site-packages/bitsandbytes-0.39.0-py3.9.egg/bitsandbytes/libbitsandbytes_cuda118.so\n",
"/root/miniconda3/envs/py3.9/lib/python3.9/site-packages/bitsandbytes-0.39.0-py3.9.egg/bitsandbytes/cuda_setup/main.py:149: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('/usr/local/nvidia/lib'), PosixPath('/usr/local/nvidia/lib64')}\n",
" warn(msg)\n",
"/root/miniconda3/envs/py3.9/lib/python3.9/site-packages/bitsandbytes-0.39.0-py3.9.egg/bitsandbytes/cuda_setup/main.py:149: UserWarning: /usr/local/nvidia/lib:/usr/local/nvidia/lib64 did not contain ['libcudart.so', 'libcudart.so.11.0', 'libcudart.so.12.0'] as expected! Searching further paths...\n",
" warn(msg)\n",
"/root/miniconda3/envs/py3.9/lib/python3.9/site-packages/bitsandbytes-0.39.0-py3.9.egg/bitsandbytes/cuda_setup/main.py:149: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('module'), PosixPath('//matplotlib_inline.backend_inline')}\n",
" warn(msg)\n",
"/root/miniconda3/envs/py3.9/lib/python3.9/site-packages/bitsandbytes-0.39.0-py3.9.egg/bitsandbytes/cuda_setup/main.py:149: UserWarning: WARNING: The following directories listed in your path were found to be non-existent: {PosixPath('ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIK1tFOFrWbmoa2ckCJYhzgBHKTSMeR/AeuScCCzugqlI utensilcandel@gmail.com')}\n",
" warn(msg)\n",
"CUDA_SETUP: WARNING! libcudart.so not found in any environmental path. Searching in backup paths...\n",
"/root/miniconda3/envs/py3.9/lib/python3.9/site-packages/bitsandbytes-0.39.0-py3.9.egg/bitsandbytes/cuda_setup/main.py:149: UserWarning: Found duplicate ['libcudart.so', 'libcudart.so.11.0', 'libcudart.so.12.0'] files: {PosixPath('/usr/local/cuda/lib64/libcudart.so.11.0'), PosixPath('/usr/local/cuda/lib64/libcudart.so')}.. We'll flip a coin and try one of these, in order to fail forward.\n",
"Either way, this might cause trouble in the future:\n",
"If you get `CUDA error: invalid device function` errors, the above might be the cause and the solution is to make sure only one ['libcudart.so', 'libcudart.so.11.0', 'libcudart.so.12.0'] in the paths that we search based on your env.\n",
" warn(msg)\n",
"CUDA SETUP: CUDA runtime path found: /usr/local/cuda/lib64/libcudart.so.11.0\n",
"CUDA SETUP: Highest compute capability among GPUs detected: 8.6\n",
"CUDA SETUP: Detected CUDA version 118\n",
"CUDA SETUP: Loading binary /root/miniconda3/envs/py3.9/lib/python3.9/site-packages/bitsandbytes-0.39.0-py3.9.egg/bitsandbytes/libbitsandbytes_cuda118.so...\n",
"Setting ds_accelerator to cuda (auto detect)\n",
"INFO:root:loading tokenizer...\n",
"Using pad_token, but it is not set yet.\n",
"INFO:root:Loading prepared packed dataset from disk at last_run_prepared/21a0611c6c2b67b31f00097fa2a91c26...\n",
"INFO:root:Prepared packed dataset loaded from disk...\n",
"INFO:root:loading model and peft_config...\n",
"Loading checkpoint shards: 100%|██████████████████| 2/2 [00:19<00:00, 9.86s/it]\n",
"INFO:root:converting PEFT model w/ prepare_model_for_int8_training\n",
"/root/miniconda3/envs/py3.9/lib/python3.9/site-packages/peft/utils/other.py:76: FutureWarning: prepare_model_for_int8_training is deprecated and will be removed in a future version. Use prepare_model_for_kbit_training instead.\n",
" warnings.warn(\n",
"INFO:root:found linear modules: ['v_proj', 'k_proj', 'gate_proj', 'q_proj', 'o_proj', 'down_proj', 'up_proj']\n",
"trainable params: 159907840 || all params: 3660320768 || trainable%: 4.368683788535114\n",
"INFO:root:Compiling torch model\n",
"INFO:root:Pre-saving adapter config to ./qlora-out\n",
"INFO:root:Starting trainer...\n",
"INFO:root:Using Auto-resume functionality to start with checkpoint at qlora-out/checkpoint-130\n",
"\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33mutensil\u001b[0m. Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n",
"\u001b[34m\u001b[1mwandb\u001b[0m: Tracking run with wandb version 0.15.3\n",
"\u001b[34m\u001b[1mwandb\u001b[0m: Run data is saved locally in \u001b[35m\u001b[1m/workspace/axolotl/wandb/run-20230531_121630-p5lvijpv\u001b[0m\n",
"\u001b[34m\u001b[1mwandb\u001b[0m: Run \u001b[1m`wandb offline`\u001b[0m to turn off syncing.\n",
"\u001b[34m\u001b[1mwandb\u001b[0m: Syncing run \u001b[33msummer-gorge-6\u001b[0m\n",
"\u001b[34m\u001b[1mwandb\u001b[0m: ⭐️ View project at \u001b[34m\u001b[4mhttps://wandb.ai/utensil/huggyllama-qlora\u001b[0m\n",
"\u001b[34m\u001b[1mwandb\u001b[0m: 🚀 View run at \u001b[34m\u001b[4mhttps://wandb.ai/utensil/huggyllama-qlora/runs/p5lvijpv\u001b[0m\n",
"{'loss': 0.4474, 'learning_rate': 8.952245334118414e-06, 'epoch': 2.62} \n",
"{'loss': 0.4717, 'learning_rate': 8.047222744854943e-06, 'epoch': 2.64} \n",
"{'loss': 0.4533, 'learning_rate': 7.1885011480961164e-06, 'epoch': 2.66} \n",
"{'loss': 0.4353, 'learning_rate': 6.37651293602628e-06, 'epoch': 2.68} \n",
"{'loss': 0.4545, 'learning_rate': 5.611666969163243e-06, 'epoch': 2.7} \n",
" 90%|████████████████████████████████████▉ | 135/150 [03:37<00:53, 3.56s/it]\n",
" 0%| | 0/3 [00:00, ?it/s]\u001b[A\n",
" 67%|██████████████████████████████ | 2/3 [00:03<00:01, 1.52s/it]\u001b[A\n",
" \u001b[A\n",
"\u001b[A{'eval_loss': 0.4905628561973572, 'eval_runtime': 6.2813, 'eval_samples_per_second': 1.433, 'eval_steps_per_second': 0.478, 'epoch': 2.7}\n",
" 90%|████████████████████████████████████▉ | 135/150 [03:44<00:53, 3.56s/it]\n",
"100%|█████████████████████████████████████████████| 3/3 [00:03<00:00, 1.00s/it]\u001b[A\n",
"{'loss': 0.4715, 'learning_rate': 4.8943483704846475e-06, 'epoch': 2.72} \u001b[A\n",
"{'loss': 0.4942, 'learning_rate': 4.224918331506955e-06, 'epoch': 2.74} \n",
"{'loss': 0.4535, 'learning_rate': 3.6037139304146762e-06, 'epoch': 2.76} \n",
"{'loss': 0.4396, 'learning_rate': 3.0310479623313127e-06, 'epoch': 2.78} \n",
"{'loss': 0.4367, 'learning_rate': 2.5072087818176382e-06, 'epoch': 2.8} \n",
" 93%|██████████████████████████████████████▎ | 140/150 [07:21<02:47, 16.75s/it]\n",
" 0%| | 0/3 [00:00, ?it/s]\u001b[A\n",
" 67%|██████████████████████████████ | 2/3 [00:03<00:01, 1.52s/it]\u001b[A\n",
" \u001b[A\n",
"\u001b[A{'eval_loss': 0.4902206063270569, 'eval_runtime': 6.2828, 'eval_samples_per_second': 1.432, 'eval_steps_per_second': 0.477, 'epoch': 2.8}\n",
" 93%|██████████████████████████████████████▎ | 140/150 [07:27<02:47, 16.75s/it]\n",
"100%|█████████████████████████████████████████████| 3/3 [00:03<00:00, 1.00s/it]\u001b[A\n",
" \u001b[A\u001b[34m\u001b[1mwandb\u001b[0m: Adding directory to artifact (./qlora-out/checkpoint-140)... Done. 8.7s\n",
"{'loss': 0.4674, 'learning_rate': 2.032460157676452e-06, 'epoch': 2.82} \n",
"{'loss': 0.4613, 'learning_rate': 1.6070411401370334e-06, 'epoch': 2.84} \n",
"{'loss': 0.4699, 'learning_rate': 1.231165940486234e-06, 'epoch': 2.86} \n",
"{'loss': 0.4253, 'learning_rate': 9.0502382320653e-07, 'epoch': 2.88} \n",
"{'loss': 0.436, 'learning_rate': 6.287790106757396e-07, 'epoch': 2.9} \n",
" 97%|███████████████████████████████████████▋ | 145/150 [11:19<02:57, 35.58s/it]\n",
" 0%| | 0/3 [00:00, ?it/s]\u001b[A\n",
" 67%|██████████████████████████████ | 2/3 [00:03<00:01, 1.52s/it]\u001b[A\n",
" \u001b[A\n",
"\u001b[A{'eval_loss': 0.4897999167442322, 'eval_runtime': 6.2814, 'eval_samples_per_second': 1.433, 'eval_steps_per_second': 0.478, 'epoch': 2.9}\n",
" 97%|███████████████████████████████████████▋ | 145/150 [11:25<02:57, 35.58s/it]\n",
"100%|█████████████████████████████████████████████| 3/3 [00:03<00:00, 1.00s/it]\u001b[A\n",
"{'loss': 0.4491, 'learning_rate': 4.025706004760932e-07, 'epoch': 2.92} \u001b[A\n",
"{'loss': 0.4556, 'learning_rate': 2.265124953543918e-07, 'epoch': 2.94} \n",
"{'loss': 0.4459, 'learning_rate': 1.0069334586854107e-07, 'epoch': 2.96} \n",
"{'loss': 0.448, 'learning_rate': 2.5176505749346936e-08, 'epoch': 2.98} \n",
" 99%|████████████████████████████████████████▋| 149/150 [14:19<00:41, 41.79s/it]"
]
}
],
"source": [
"!accelerate launch scripts/finetune.py examples/huggyllama/qlora.yml"
]
},
{
"attachments": {},
"cell_type": "markdown",
"metadata": {},
"source": [
"## Below are ad hoc cells handling issues during training"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"!ps aux|grep python|grep finetune|awk '{print $2}'|xargs kill"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!apt install zip\n",
"!zip -r last_run_prepared.zip -xi last_run_prepared"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"!nvitop -m full"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"!apt install lsof"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"!lsof /dev/nvidia*"
]
}
],
"metadata": {
"accelerator": "GPU",
"colab": {
"authorship_tag": "ABX9TyP0jHt4WNuLaC5ecmX0YtWl",
"gpuType": "T4",
"include_colab_link": true,
"provenance": [],
"toc_visible": true
},
"kernelspec": {
"display_name": "Python 3 (ipykernel)",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.16"
}
},
"nbformat": 4,
"nbformat_minor": 4
}