# Config for single device LoRA finetuning in lora_finetune_single_device.py # using a Llama3 8B Instruct model # # This config assumes that you've run the following command before launching # this run: # tune download meta-llama/Meta-Llama-3-8B-Instruct --output-dir /tmp/Meta-Llama-3-8B-Instruct --ignore-patterns "original/consolidated.00.pth" --hf-token # # To launch on a single device, run the following command from root: # tune run lora_finetune_single_device --config llama3/8B_lora_single_device # # You can add specific overrides through the command line. For example # to override the checkpointer directory while launching training # you can run: # tune run lora_finetune_single_device --config llama3/8B_lora_single_device checkpointer.checkpoint_dir= # # This config works only for training on single device. output_dir: /tmp/torchtune/llama3_8B/lora_single_device # /tmp may be deleted by your system. Change it to your preference. # Model Arguments model: _component_: torchtune.models.llama3.lora_llama3_8b lora_attn_modules: ['q_proj', 'v_proj', 'output_proj'] apply_lora_to_mlp: True apply_lora_to_output: False lora_rank: 8 # higher increases accuracy and memory lora_alpha: 16 # usually alpha=2*rank lora_dropout: 0.0 # Tokenizer tokenizer: _component_: torchtune.models.llama3.llama3_tokenizer path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model max_seq_len: null checkpointer: _component_: torchtune.training.FullModelHFCheckpointer checkpoint_dir: /tmp/Meta-Llama-3-8B-Instruct/ checkpoint_files: [ model-00001-of-00004.safetensors, model-00002-of-00004.safetensors, model-00003-of-00004.safetensors, model-00004-of-00004.safetensors ] recipe_checkpoint: null output_dir: ${output_dir} model_type: LLAMA3 resume_from_checkpoint: False save_adapter_weights_only: False # Dataset and Sampler dataset: _component_: torchtune.datasets.alpaca_cleaned_dataset packed: False # True increases speed seed: null shuffle: True batch_size: 2 # Optimizer and Scheduler optimizer: _component_: torch.optim.AdamW fused: True weight_decay: 0.01 lr: 3e-4 lr_scheduler: _component_: torchtune.training.lr_schedulers.get_cosine_schedule_with_warmup num_warmup_steps: 100 loss: _component_: torchtune.modules.loss.LinearCrossEntropyLoss # Training epochs: 1 max_steps_per_epoch: null gradient_accumulation_steps: 8 # Use to increase effective batch size clip_grad_norm: null compile: False # torch.compile the model + loss, True increases speed + decreases memory # Logging metric_logger: _component_: torchtune.training.metric_logging.DiskLogger log_dir: ${output_dir}/logs log_every_n_steps: 1 log_peak_memory_stats: True log_level: INFO # DEBUG, WARN, etc. # Environment device: cuda dtype: bf16 # Activations Memory enable_activation_checkpointing: True # True reduces memory enable_activation_offloading: False # True reduces memory # Profiler (disabled) profiler: _component_: torchtune.training.setup_torch_profiler enabled: False #Output directory of trace artifacts output_dir: ${output_dir}/profiling_outputs #`torch.profiler.ProfilerActivity` types to trace cpu: True cuda: True #trace options passed to `torch.profiler.profile` profile_memory: False with_stack: False record_shapes: True with_flops: False # `torch.profiler.schedule` options: # wait_steps -> wait, warmup_steps -> warmup, active_steps -> active, num_cycles -> repeat wait_steps: 5 warmup_steps: 3 active_steps: 2 num_cycles: 1