Namespace(avg_reprojection=False, batch_size=12, ctx=[gpu(0)], data_path='/home/ubuntu/.mxnet/datasets/kitti/kitti_data', dataset='kitti', disable_automasking=False, disable_median_scaling=False, disparity_smoothness=0.001, eval_eigen_to_benchmark=False, eval_model=None, eval_mono=False, eval_split='eigen', eval_stereo=False, ext_disp_to_eval=None, frame_ids=[0], gpu=0, height=192, hybridize=False, learning_rate=0.0001, load_weights_folder=None, log_dir='./tmp/stereo/', log_frequency=250, max_depth=100.0, min_depth=0.1, model_zoo='monodepth2_resnet18_kitti_stereo_640x192', no_eval=False, no_gpu=False, no_ssim=False, num_epochs=20, num_workers=12, png=False, pose_model_input='pairs', pred_depth_scale_factor=1, pretrained_base=True, pretrained_type='customer', resume=None, save_frequency=1, save_pred_disps=False, scales=[0, 1, 2, 3], scheduler_step_size=15, split='eigen_full', start_epoch=0, use_stereo=True, v1_multiscale=False, warmup_epochs=0, width=640) MonoDepth2( (encoder): ResnetEncoder( (encoder): ResNetV1b( (conv1): Conv2D(3 -> 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu): Activation(relu) (maxpool): MaxPool2D(size=(3, 3), stride=(2, 2), padding=(1, 1), ceil_mode=False, global_pool=False, pool_type=max, layout=NCHW) (layer1): HybridSequential( (0): BasicBlockV1b( (conv1): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu1): Activation(relu) (conv2): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu2): Activation(relu) ) (1): BasicBlockV1b( (conv1): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu1): Activation(relu) (conv2): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu2): Activation(relu) ) ) (layer2): HybridSequential( (0): BasicBlockV1b( (conv1): Conv2D(64 -> 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) (relu1): Activation(relu) (conv2): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) (relu2): Activation(relu) (downsample): HybridSequential( (0): Conv2D(64 -> 128, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) ) ) (1): BasicBlockV1b( (conv1): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) (relu1): Activation(relu) (conv2): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) (relu2): Activation(relu) ) ) (layer3): HybridSequential( (0): BasicBlockV1b( (conv1): Conv2D(128 -> 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) (relu1): Activation(relu) (conv2): Conv2D(256 -> 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) (relu2): Activation(relu) (downsample): HybridSequential( (0): Conv2D(128 -> 256, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) ) ) (1): BasicBlockV1b( (conv1): Conv2D(256 -> 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) (relu1): Activation(relu) (conv2): Conv2D(256 -> 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) (relu2): Activation(relu) ) ) (layer4): HybridSequential( (0): BasicBlockV1b( (conv1): Conv2D(256 -> 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) (relu1): Activation(relu) (conv2): Conv2D(512 -> 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) (relu2): Activation(relu) (downsample): HybridSequential( (0): Conv2D(256 -> 512, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) ) ) (1): BasicBlockV1b( (conv1): Conv2D(512 -> 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) (relu1): Activation(relu) (conv2): Conv2D(512 -> 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) (relu2): Activation(relu) ) ) (avgpool): GlobalAvgPool2D(size=(1, 1), stride=(1, 1), padding=(0, 0), ceil_mode=True, global_pool=True, pool_type=avg, layout=NCHW) (flat): Flatten (fc): Dense(512 -> 1000, linear) ) ) (decoder): DepthDecoder( (decoder): HybridSequential( (0): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(512 -> 256, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (1): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(512 -> 256, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (2): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(256 -> 128, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (3): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(256 -> 128, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (4): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(128 -> 64, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (5): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(128 -> 64, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (6): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(64 -> 32, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (7): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(96 -> 32, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (8): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(32 -> 16, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (9): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(16 -> 16, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (10): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(16 -> 1, kernel_size=(3, 3), stride=(1, 1)) ) (11): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(32 -> 1, kernel_size=(3, 3), stride=(1, 1)) ) (12): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(64 -> 1, kernel_size=(3, 3), stride=(1, 1)) ) (13): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(128 -> 1, kernel_size=(3, 3), stride=(1, 1)) ) ) (sigmoid): Activation(sigmoid) ) ) Starting Epoch: 0 Total Epochs: 20 Epoch 0 iteration 0000/3766: training loss 0.204 Epoch 0 iteration 0250/3766: training loss 0.175 Epoch 0 iteration 0500/3766: training loss 0.163 Epoch 0 iteration 0750/3766: training loss 0.157 Epoch 0 iteration 1000/3766: training loss 0.154 Epoch 0 iteration 1250/3766: training loss 0.151 Epoch 0 iteration 1500/3766: training loss 0.149 Epoch 0 iteration 1750/3766: training loss 0.147 Epoch 0 iteration 2000/3766: training loss 0.146 Epoch 0 iteration 2250/3766: training loss 0.144 Epoch 0 iteration 2500/3766: training loss 0.143 Epoch 0 iteration 2750/3766: training loss 0.142 Epoch 0 iteration 3000/3766: training loss 0.141 Epoch 0 iteration 3250/3766: training loss 0.141 Epoch 0 iteration 3500/3766: training loss 0.140 Epoch 0 iteration 3750/3766: training loss 0.139 Epoch 0, validation abs_REL: 0.114 sq_REL: 0.951 RMSE: 5.026, RMSE_log: 0.212 Delta_1: 0.861 Delta_2: 0.949 Delta_2: 0.978 Epoch 1 iteration 0000/3766: training loss 0.125 Epoch 1 iteration 0250/3766: training loss 0.129 Epoch 1 iteration 0500/3766: training loss 0.129 Epoch 1 iteration 0750/3766: training loss 0.128 Epoch 1 iteration 1000/3766: training loss 0.128 Epoch 1 iteration 1250/3766: training loss 0.127 Epoch 1 iteration 1500/3766: training loss 0.127 Epoch 1 iteration 1750/3766: training loss 0.127 Epoch 1 iteration 2000/3766: training loss 0.126 Epoch 1 iteration 2250/3766: training loss 0.126 Epoch 1 iteration 2500/3766: training loss 0.126 Epoch 1 iteration 2750/3766: training loss 0.125 Epoch 1 iteration 3000/3766: training loss 0.125 Epoch 1 iteration 3250/3766: training loss 0.125 Epoch 1 iteration 3500/3766: training loss 0.125 Epoch 1 iteration 3750/3766: training loss 0.124 Epoch 1, validation abs_REL: 0.112 sq_REL: 0.913 RMSE: 4.928, RMSE_log: 0.207 Delta_1: 0.882 Delta_2: 0.954 Delta_2: 0.978 Epoch 2 iteration 0000/3766: training loss 0.112 Epoch 2 iteration 0250/3766: training loss 0.121 Epoch 2 iteration 0500/3766: training loss 0.121 Epoch 2 iteration 0750/3766: training loss 0.120 Epoch 2 iteration 1000/3766: training loss 0.120 Epoch 2 iteration 1250/3766: training loss 0.120 Epoch 2 iteration 1500/3766: training loss 0.120 Epoch 2 iteration 1750/3766: training loss 0.120 Epoch 2 iteration 2000/3766: training loss 0.120 Epoch 2 iteration 2250/3766: training loss 0.119 Epoch 2 iteration 2500/3766: training loss 0.119 Epoch 2 iteration 2750/3766: training loss 0.119 Epoch 2 iteration 3000/3766: training loss 0.119 Epoch 2 iteration 3250/3766: training loss 0.119 Epoch 2 iteration 3500/3766: training loss 0.119 Epoch 2 iteration 3750/3766: training loss 0.119 Epoch 2, validation abs_REL: 0.099 sq_REL: 0.840 RMSE: 4.720, RMSE_log: 0.197 Delta_1: 0.893 Delta_2: 0.957 Delta_2: 0.980 Epoch 3 iteration 0000/3766: training loss 0.146 Epoch 3 iteration 0250/3766: training loss 0.115 Epoch 3 iteration 0500/3766: training loss 0.116 Epoch 3 iteration 0750/3766: training loss 0.116 Epoch 3 iteration 1000/3766: training loss 0.116 Epoch 3 iteration 1250/3766: training loss 0.116 Epoch 3 iteration 1500/3766: training loss 0.116 Epoch 3 iteration 1750/3766: training loss 0.116 Epoch 3 iteration 2000/3766: training loss 0.116 Epoch 3 iteration 2250/3766: training loss 0.116 Epoch 3 iteration 2500/3766: training loss 0.115 Epoch 3 iteration 2750/3766: training loss 0.115 Epoch 3 iteration 3000/3766: training loss 0.115 Epoch 3 iteration 3250/3766: training loss 0.115 Epoch 3 iteration 3500/3766: training loss 0.115 Epoch 3 iteration 3750/3766: training loss 0.115 Epoch 3, validation abs_REL: 0.103 sq_REL: 0.931 RMSE: 4.911, RMSE_log: 0.198 Delta_1: 0.895 Delta_2: 0.958 Delta_2: 0.980 Epoch 4 iteration 0000/3766: training loss 0.131 Epoch 4 iteration 0250/3766: training loss 0.113 Epoch 4 iteration 0500/3766: training loss 0.113 Epoch 4 iteration 0750/3766: training loss 0.113 Epoch 4 iteration 1000/3766: training loss 0.113 Epoch 4 iteration 1250/3766: training loss 0.113 Epoch 4 iteration 1500/3766: training loss 0.113 Epoch 4 iteration 1750/3766: training loss 0.113 Epoch 4 iteration 2000/3766: training loss 0.113 Epoch 4 iteration 2250/3766: training loss 0.113 Epoch 4 iteration 2500/3766: training loss 0.113 Epoch 4 iteration 2750/3766: training loss 0.112 Epoch 4 iteration 3000/3766: training loss 0.112 Epoch 4 iteration 3250/3766: training loss 0.112 Epoch 4 iteration 3500/3766: training loss 0.112 Epoch 4 iteration 3750/3766: training loss 0.112 Epoch 4, validation abs_REL: 0.091 sq_REL: 0.820 RMSE: 4.636, RMSE_log: 0.196 Delta_1: 0.903 Delta_2: 0.959 Delta_2: 0.979 Epoch 5 iteration 0000/3766: training loss 0.126 Epoch 5 iteration 0250/3766: training loss 0.112 Epoch 5 iteration 0500/3766: training loss 0.111 Epoch 5 iteration 0750/3766: training loss 0.111 Epoch 5 iteration 1000/3766: training loss 0.111 Epoch 5 iteration 1250/3766: training loss 0.111 Epoch 5 iteration 1500/3766: training loss 0.111 Epoch 5 iteration 1750/3766: training loss 0.111 Epoch 5 iteration 2000/3766: training loss 0.111 Epoch 5 iteration 2250/3766: training loss 0.111 Epoch 5 iteration 2500/3766: training loss 0.111 Epoch 5 iteration 2750/3766: training loss 0.110 Epoch 5 iteration 3000/3766: training loss 0.110 Epoch 5 iteration 3250/3766: training loss 0.110 Epoch 5 iteration 3500/3766: training loss 0.110 Epoch 5 iteration 3750/3766: training loss 0.110 Epoch 5, validation abs_REL: 0.085 sq_REL: 0.730 RMSE: 4.532, RMSE_log: 0.196 Delta_1: 0.902 Delta_2: 0.958 Delta_2: 0.979 Epoch 6 iteration 0000/3766: training loss 0.105 Epoch 6 iteration 0250/3766: training loss 0.110 Epoch 6 iteration 0500/3766: training loss 0.110 Epoch 6 iteration 0750/3766: training loss 0.109 Epoch 6 iteration 1000/3766: training loss 0.109 Epoch 6 iteration 1250/3766: training loss 0.109 Epoch 6 iteration 1500/3766: training loss 0.109 Epoch 6 iteration 1750/3766: training loss 0.109 Epoch 6 iteration 2000/3766: training loss 0.109 Epoch 6 iteration 2250/3766: training loss 0.109 Epoch 6 iteration 2500/3766: training loss 0.109 Epoch 6 iteration 2750/3766: training loss 0.109 Epoch 6 iteration 3000/3766: training loss 0.109 Epoch 6 iteration 3250/3766: training loss 0.109 Epoch 6 iteration 3500/3766: training loss 0.109 Epoch 6 iteration 3750/3766: training loss 0.109 Epoch 6, validation abs_REL: 0.104 sq_REL: 0.899 RMSE: 4.850, RMSE_log: 0.198 Delta_1: 0.904 Delta_2: 0.960 Delta_2: 0.979 Epoch 7 iteration 0000/3766: training loss 0.090 Epoch 7 iteration 0250/3766: training loss 0.108 Epoch 7 iteration 0500/3766: training loss 0.108 Epoch 7 iteration 0750/3766: training loss 0.108 Epoch 7 iteration 1000/3766: training loss 0.108 Epoch 7 iteration 1250/3766: training loss 0.108 Epoch 7 iteration 1500/3766: training loss 0.107 Epoch 7 iteration 1750/3766: training loss 0.107 Epoch 7 iteration 2000/3766: training loss 0.107 Epoch 7 iteration 2250/3766: training loss 0.108 Epoch 7 iteration 2500/3766: training loss 0.107 Epoch 7 iteration 2750/3766: training loss 0.107 Epoch 7 iteration 3000/3766: training loss 0.107 Epoch 7 iteration 3250/3766: training loss 0.107 Epoch 7 iteration 3500/3766: training loss 0.107 Epoch 7 iteration 3750/3766: training loss 0.107 Epoch 7, validation abs_REL: 0.097 sq_REL: 0.844 RMSE: 4.679, RMSE_log: 0.198 Delta_1: 0.908 Delta_2: 0.959 Delta_2: 0.978 Epoch 8 iteration 0000/3766: training loss 0.104 Epoch 8 iteration 0250/3766: training loss 0.106 Epoch 8 iteration 0500/3766: training loss 0.106 Epoch 8 iteration 0750/3766: training loss 0.106 Epoch 8 iteration 1000/3766: training loss 0.106 Epoch 8 iteration 1250/3766: training loss 0.106 Epoch 8 iteration 1500/3766: training loss 0.106 Epoch 8 iteration 1750/3766: training loss 0.106 Epoch 8 iteration 2000/3766: training loss 0.106 Epoch 8 iteration 2250/3766: training loss 0.106 Epoch 8 iteration 2500/3766: training loss 0.106 Epoch 8 iteration 2750/3766: training loss 0.106 Epoch 8 iteration 3000/3766: training loss 0.106 Epoch 8 iteration 3250/3766: training loss 0.106 Epoch 8 iteration 3500/3766: training loss 0.106 Epoch 8 iteration 3750/3766: training loss 0.106 Epoch 8, validation abs_REL: 0.088 sq_REL: 0.816 RMSE: 4.612, RMSE_log: 0.195 Delta_1: 0.910 Delta_2: 0.960 Delta_2: 0.979 Epoch 9 iteration 0000/3766: training loss 0.115 Epoch 9 iteration 0250/3766: training loss 0.105 Epoch 9 iteration 0500/3766: training loss 0.105 Epoch 9 iteration 0750/3766: training loss 0.105 Epoch 9 iteration 1000/3766: training loss 0.105 Epoch 9 iteration 1250/3766: training loss 0.106 Epoch 9 iteration 1500/3766: training loss 0.106 Epoch 9 iteration 1750/3766: training loss 0.105 Epoch 9 iteration 2000/3766: training loss 0.106 Epoch 9 iteration 2250/3766: training loss 0.106 Epoch 9 iteration 2500/3766: training loss 0.105 Epoch 9 iteration 2750/3766: training loss 0.105 Epoch 9 iteration 3000/3766: training loss 0.105 Epoch 9 iteration 3250/3766: training loss 0.105 Epoch 9 iteration 3500/3766: training loss 0.105 Epoch 9 iteration 3750/3766: training loss 0.105 Epoch 9, validation abs_REL: 0.082 sq_REL: 0.782 RMSE: 4.579, RMSE_log: 0.198 Delta_1: 0.909 Delta_2: 0.958 Delta_2: 0.978 Epoch 10 iteration 0000/3766: training loss 0.105 Epoch 10 iteration 0250/3766: training loss 0.105 Epoch 10 iteration 0500/3766: training loss 0.105 Epoch 10 iteration 0750/3766: training loss 0.106 Epoch 10 iteration 1000/3766: training loss 0.105 Epoch 10 iteration 1250/3766: training loss 0.105 Epoch 10 iteration 1500/3766: training loss 0.105 Epoch 10 iteration 1750/3766: training loss 0.105 Epoch 10 iteration 2000/3766: training loss 0.105 Epoch 10 iteration 2250/3766: training loss 0.105 Epoch 10 iteration 2500/3766: training loss 0.105 Epoch 10 iteration 2750/3766: training loss 0.105 Epoch 10 iteration 3000/3766: training loss 0.104 Epoch 10 iteration 3250/3766: training loss 0.104 Epoch 10 iteration 3500/3766: training loss 0.104 Epoch 10 iteration 3750/3766: training loss 0.104 Epoch 10, validation abs_REL: 0.085 sq_REL: 0.730 RMSE: 4.420, RMSE_log: 0.194 Delta_1: 0.913 Delta_2: 0.960 Delta_2: 0.979 Epoch 11 iteration 0000/3766: training loss 0.103 Epoch 11 iteration 0250/3766: training loss 0.103 Epoch 11 iteration 0500/3766: training loss 0.104 Epoch 11 iteration 0750/3766: training loss 0.104 Epoch 11 iteration 1000/3766: training loss 0.104 Epoch 11 iteration 1250/3766: training loss 0.104 Epoch 11 iteration 1500/3766: training loss 0.104 Epoch 11 iteration 1750/3766: training loss 0.104 Epoch 11 iteration 2000/3766: training loss 0.104 Epoch 11 iteration 2250/3766: training loss 0.104 Epoch 11 iteration 2500/3766: training loss 0.104 Epoch 11 iteration 2750/3766: training loss 0.104 Epoch 11 iteration 3000/3766: training loss 0.104 Epoch 11 iteration 3250/3766: training loss 0.104 Epoch 11 iteration 3500/3766: training loss 0.104 Epoch 11 iteration 3750/3766: training loss 0.104 Epoch 11, validation abs_REL: 0.103 sq_REL: 0.947 RMSE: 4.969, RMSE_log: 0.200 Delta_1: 0.906 Delta_2: 0.960 Delta_2: 0.979 Epoch 12 iteration 0000/3766: training loss 0.084 Epoch 12 iteration 0250/3766: training loss 0.103 Epoch 12 iteration 0500/3766: training loss 0.103 Epoch 12 iteration 0750/3766: training loss 0.103 Epoch 12 iteration 1000/3766: training loss 0.103 Epoch 12 iteration 1250/3766: training loss 0.103 Epoch 12 iteration 1500/3766: training loss 0.103 Epoch 12 iteration 1750/3766: training loss 0.103 Epoch 12 iteration 2000/3766: training loss 0.103 Epoch 12 iteration 2250/3766: training loss 0.103 Epoch 12 iteration 2500/3766: training loss 0.103 Epoch 12 iteration 2750/3766: training loss 0.103 Epoch 12 iteration 3000/3766: training loss 0.103 Epoch 12 iteration 3250/3766: training loss 0.103 Epoch 12 iteration 3500/3766: training loss 0.103 Epoch 12 iteration 3750/3766: training loss 0.103 Epoch 12, validation abs_REL: 0.084 sq_REL: 0.832 RMSE: 4.679, RMSE_log: 0.197 Delta_1: 0.913 Delta_2: 0.959 Delta_2: 0.978 Epoch 13 iteration 0000/3766: training loss 0.080 Epoch 13 iteration 0250/3766: training loss 0.102 Epoch 13 iteration 0500/3766: training loss 0.102 Epoch 13 iteration 0750/3766: training loss 0.102 Epoch 13 iteration 1000/3766: training loss 0.102 Epoch 13 iteration 1250/3766: training loss 0.103 Epoch 13 iteration 1500/3766: training loss 0.102 Epoch 13 iteration 1750/3766: training loss 0.102 Epoch 13 iteration 2000/3766: training loss 0.102 Epoch 13 iteration 2250/3766: training loss 0.102 Epoch 13 iteration 2500/3766: training loss 0.102 Epoch 13 iteration 2750/3766: training loss 0.102 Epoch 13 iteration 3000/3766: training loss 0.102 Epoch 13 iteration 3250/3766: training loss 0.102 Epoch 13 iteration 3500/3766: training loss 0.102 Epoch 13 iteration 3750/3766: training loss 0.102 Epoch 13, validation abs_REL: 0.092 sq_REL: 0.828 RMSE: 4.622, RMSE_log: 0.197 Delta_1: 0.915 Delta_2: 0.960 Delta_2: 0.978 Epoch 14 iteration 0000/3766: training loss 0.093 Epoch 14 iteration 0250/3766: training loss 0.101 Epoch 14 iteration 0500/3766: training loss 0.101 Epoch 14 iteration 0750/3766: training loss 0.101 Epoch 14 iteration 1000/3766: training loss 0.101 Epoch 14 iteration 1250/3766: training loss 0.101 Epoch 14 iteration 1500/3766: training loss 0.101 Epoch 14 iteration 1750/3766: training loss 0.101 Epoch 14 iteration 2000/3766: training loss 0.101 Epoch 14 iteration 2250/3766: training loss 0.101 Epoch 14 iteration 2500/3766: training loss 0.101 Epoch 14 iteration 2750/3766: training loss 0.102 Epoch 14 iteration 3000/3766: training loss 0.102 Epoch 14 iteration 3250/3766: training loss 0.102 Epoch 14 iteration 3500/3766: training loss 0.102 Epoch 14 iteration 3750/3766: training loss 0.102 Epoch 14, validation abs_REL: 0.091 sq_REL: 0.804 RMSE: 4.516, RMSE_log: 0.191 Delta_1: 0.918 Delta_2: 0.962 Delta_2: 0.980 Epoch 15 iteration 0000/3766: training loss 0.119 Epoch 15 iteration 0250/3766: training loss 0.101 Epoch 15 iteration 0500/3766: training loss 0.101 Epoch 15 iteration 0750/3766: training loss 0.101 Epoch 15 iteration 1000/3766: training loss 0.101 Epoch 15 iteration 1250/3766: training loss 0.101 Epoch 15 iteration 1500/3766: training loss 0.101 Epoch 15 iteration 1750/3766: training loss 0.101 Epoch 15 iteration 2000/3766: training loss 0.101 Epoch 15 iteration 2250/3766: training loss 0.101 Epoch 15 iteration 2500/3766: training loss 0.101 Epoch 15 iteration 2750/3766: training loss 0.101 Epoch 15 iteration 3000/3766: training loss 0.101 Epoch 15 iteration 3250/3766: training loss 0.101 Epoch 15 iteration 3500/3766: training loss 0.101 Epoch 15 iteration 3750/3766: training loss 0.101 Epoch 15, validation abs_REL: 0.080 sq_REL: 0.758 RMSE: 4.466, RMSE_log: 0.193 Delta_1: 0.917 Delta_2: 0.961 Delta_2: 0.978 Epoch 16 iteration 0000/3766: training loss 0.125 Epoch 16 iteration 0250/3766: training loss 0.100 Epoch 16 iteration 0500/3766: training loss 0.100 Epoch 16 iteration 0750/3766: training loss 0.100 Epoch 16 iteration 1000/3766: training loss 0.100 Epoch 16 iteration 1250/3766: training loss 0.100 Epoch 16 iteration 1500/3766: training loss 0.100 Epoch 16 iteration 1750/3766: training loss 0.100 Epoch 16 iteration 2000/3766: training loss 0.101 Epoch 16 iteration 2250/3766: training loss 0.101 Epoch 16 iteration 2500/3766: training loss 0.101 Epoch 16 iteration 2750/3766: training loss 0.101 Epoch 16 iteration 3000/3766: training loss 0.101 Epoch 16 iteration 3250/3766: training loss 0.101 Epoch 16 iteration 3500/3766: training loss 0.101 Epoch 16 iteration 3750/3766: training loss 0.100 Epoch 16, validation abs_REL: 0.087 sq_REL: 0.853 RMSE: 4.646, RMSE_log: 0.195 Delta_1: 0.917 Delta_2: 0.961 Delta_2: 0.979 Epoch 17 iteration 0000/3766: training loss 0.101 Epoch 17 iteration 0250/3766: training loss 0.101 Epoch 17 iteration 0500/3766: training loss 0.101 Epoch 17 iteration 0750/3766: training loss 0.100 Epoch 17 iteration 1000/3766: training loss 0.100 Epoch 17 iteration 1250/3766: training loss 0.100 Epoch 17 iteration 1500/3766: training loss 0.100 Epoch 17 iteration 1750/3766: training loss 0.100 Epoch 17 iteration 2000/3766: training loss 0.100 Epoch 17 iteration 2250/3766: training loss 0.100 Epoch 17 iteration 2500/3766: training loss 0.100 Epoch 17 iteration 2750/3766: training loss 0.100 Epoch 17 iteration 3000/3766: training loss 0.100 Epoch 17 iteration 3250/3766: training loss 0.100 Epoch 17 iteration 3500/3766: training loss 0.100 Epoch 17 iteration 3750/3766: training loss 0.100 Epoch 17, validation abs_REL: 0.079 sq_REL: 0.741 RMSE: 4.458, RMSE_log: 0.191 Delta_1: 0.918 Delta_2: 0.961 Delta_2: 0.979 Epoch 18 iteration 0000/3766: training loss 0.097 Epoch 18 iteration 0250/3766: training loss 0.100 Epoch 18 iteration 0500/3766: training loss 0.100 Epoch 18 iteration 0750/3766: training loss 0.100 Epoch 18 iteration 1000/3766: training loss 0.100 Epoch 18 iteration 1250/3766: training loss 0.100 Epoch 18 iteration 1500/3766: training loss 0.100 Epoch 18 iteration 1750/3766: training loss 0.100 Epoch 18 iteration 2000/3766: training loss 0.100 Epoch 18 iteration 2250/3766: training loss 0.100 Epoch 18 iteration 2500/3766: training loss 0.100 Epoch 18 iteration 2750/3766: training loss 0.100 Epoch 18 iteration 3000/3766: training loss 0.100 Epoch 18 iteration 3250/3766: training loss 0.100 Epoch 18 iteration 3500/3766: training loss 0.100 Epoch 18 iteration 3750/3766: training loss 0.100 Epoch 18, validation abs_REL: 0.084 sq_REL: 0.820 RMSE: 4.544, RMSE_log: 0.192 Delta_1: 0.918 Delta_2: 0.961 Delta_2: 0.979 Epoch 19 iteration 0000/3766: training loss 0.097 Epoch 19 iteration 0250/3766: training loss 0.100 Epoch 19 iteration 0500/3766: training loss 0.100 Epoch 19 iteration 0750/3766: training loss 0.099 Epoch 19 iteration 1000/3766: training loss 0.099 Epoch 19 iteration 1250/3766: training loss 0.099 Epoch 19 iteration 1500/3766: training loss 0.100 Epoch 19 iteration 1750/3766: training loss 0.099 Epoch 19 iteration 2000/3766: training loss 0.099 Epoch 19 iteration 2250/3766: training loss 0.099 Epoch 19 iteration 2500/3766: training loss 0.099 Epoch 19 iteration 2750/3766: training loss 0.099 Epoch 19 iteration 3000/3766: training loss 0.099 Epoch 19 iteration 3250/3766: training loss 0.099 Epoch 19 iteration 3500/3766: training loss 0.099 Epoch 19 iteration 3750/3766: training loss 0.099 Epoch 19, validation abs_REL: 0.089 sq_REL: 0.813 RMSE: 4.549, RMSE_log: 0.194 Delta_1: 0.919 Delta_2: 0.962 Delta_2: 0.979 Training Finished! Total training time is 19h 28m