# @package _global_ # specify here default configuration # order of defaults determines the order in which configs override each other defaults: - _self_ - datamodule: kitti_datamodule.yaml - model: regressor.yaml - callbacks: default.yaml - logger: null # set logger here or use command line (e.g. `python train.py logger=tensorboard`) - trainer: dgx.yaml - paths: default.yaml - extras: default.yaml - hydra: default.yaml # experiment configs allow for version control of specific hyperparameters # e.g. best hyperparameters for given model and datamodule - experiment: null # config for hyperparameter optimization - hparams_search: null # optional local config for machine/user specific settings # it's optional since it doesn't need to exist and is excluded from version control - optional local: default.yaml # debugging config (enable through command line, e.g. `python train.py debug=default) - debug: null # task name, determines output directory path task_name: "train" # tags to help you identify your experiments # you can overwrite this in experiment configs # overwrite from command line with `python train.py tags="[first_tag, second_tag]"` # appending lists from command line is currently not supported :( # https://github.com/facebookresearch/hydra/issues/1547 tags: ["dev"] # set False to skip model training train: True # evaluate on test set, using best model weights achieved during training # lightning chooses best weights based on the metric specified in checkpoint callback test: False # simply provide checkpoint path to resume training # ckpt_path: weights/last.ckpt ckpt_path: null # seed for random number generators in pytorch, numpy and python.random seed: null