# Config for single device LoRA finetuning in lora_finetune_single_device.py # using a Llama3 8B Instruct model # # This config assumes that you've run the following command before launching # this run: # tune download meta-llama/Meta-Llama-3-8B-Instruct --output-dir /tmp/Meta-Llama-3-8B-Instruct --hf-token # # To launch on a single device, run the following command from root: # tune run lora_finetune_single_device --config llama3/8B_lora_single_device # # You can add specific overrides through the command line. For example # to override the checkpointer directory while launching training # you can run: # tune run lora_finetune_single_device --config llama3/8B_lora_single_device checkpointer.checkpoint_dir= # # This config works only for training on single device. # Model Arguments model: _component_: torchtune.models.llama3.lora_llama3_8b lora_attn_modules: ['q_proj', 'v_proj'] apply_lora_to_mlp: False apply_lora_to_output: False lora_rank: 8 lora_alpha: 16 lora_dropout: 0.0 # Tokenizer tokenizer: _component_: torchtune.models.llama3.llama3_tokenizer path: /tmp/Meta-Llama-3-8B-Instruct/original/tokenizer.model max_seq_len: null checkpointer: _component_: torchtune.training.FullModelMetaCheckpointer checkpoint_dir: /tmp/Meta-Llama-3-8B-Instruct/original/ checkpoint_files: [ consolidated.00.pth ] recipe_checkpoint: null output_dir: /tmp/Meta-Llama-3-8B-Instruct/ model_type: LLAMA3 resume_from_checkpoint: False save_adapter_weights_only: False # Dataset and Sampler dataset: _component_: torchtune.datasets.alpaca_cleaned_dataset seed: null shuffle: True batch_size: 2 # Optimizer and Scheduler optimizer: _component_: torch.optim.AdamW fused: True weight_decay: 0.01 lr: 3e-4 lr_scheduler: _component_: torchtune.training.lr_schedulers.get_cosine_schedule_with_warmup num_warmup_steps: 100 loss: _component_: torchtune.modules.loss.CEWithChunkedOutputLoss # Training epochs: 1 max_steps_per_epoch: null gradient_accumulation_steps: 64 compile: False # Logging output_dir: /tmp/lora_finetune_output metric_logger: _component_: torchtune.training.metric_logging.DiskLogger log_dir: ${output_dir} log_every_n_steps: 1 log_peak_memory_stats: False # Environment device: cuda dtype: bf16 # Activations Memory enable_activation_checkpointing: True enable_activation_offloading: False # Profiler (disabled) profiler: _component_: torchtune.training.setup_torch_profiler enabled: False #Output directory of trace artifacts output_dir: ${output_dir}/profiling_outputs #`torch.profiler.ProfilerActivity` types to trace cpu: True cuda: True #trace options passed to `torch.profiler.profile` profile_memory: False with_stack: False record_shapes: True with_flops: False # `torch.profiler.schedule` options: # wait_steps -> wait, warmup_steps -> warmup, active_steps -> active, num_cycles -> repeat wait_steps: 5 warmup_steps: 5 active_steps: 2 num_cycles: 1