Namespace(avg_reprojection=False, batch_size=12, ctx=[gpu(0)], data_path='/home/ubuntu/.mxnet/datasets/kitti/kitti_data', dataset='kitti', disable_automasking=False, disable_median_scaling=False, disparity_smoothness=0.001, eval_eigen_to_benchmark=False, eval_model=None, eval_mono=False, eval_split='eigen', eval_stereo=False, ext_disp_to_eval=None, frame_ids=[0, -1, 1], gpu=0, height=192, hybridize=False, learning_rate=0.0001, load_weights_folder=None, log_dir='./tmp/mono/', log_frequency=250, max_depth=100.0, min_depth=0.1, model_zoo='monodepth2_resnet18_kitti_mono_640x192', no_eval=False, no_gpu=False, no_ssim=False, num_epochs=20, num_workers=12, png=True, pose_model_input='pairs', pred_depth_scale_factor=1, pretrained_base=True, pretrained_type='customer', resume=None, save_frequency=1, save_pred_disps=False, scales=[0, 1, 2, 3], scheduler_step_size=15, split='eigen_zhou', start_epoch=0, use_stereo=False, v1_multiscale=False, warmup_epochs=0, width=640) MonoDepth2( (encoder): ResnetEncoder( (encoder): ResNetV1b( (conv1): Conv2D(3 -> 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu): Activation(relu) (maxpool): MaxPool2D(size=(3, 3), stride=(2, 2), padding=(1, 1), ceil_mode=False, global_pool=False, pool_type=max, layout=NCHW) (layer1): HybridSequential( (0): BasicBlockV1b( (conv1): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu1): Activation(relu) (conv2): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu2): Activation(relu) ) (1): BasicBlockV1b( (conv1): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu1): Activation(relu) (conv2): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu2): Activation(relu) ) ) (layer2): HybridSequential( (0): BasicBlockV1b( (conv1): Conv2D(64 -> 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) (relu1): Activation(relu) (conv2): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) (relu2): Activation(relu) (downsample): HybridSequential( (0): Conv2D(64 -> 128, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) ) ) (1): BasicBlockV1b( (conv1): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) (relu1): Activation(relu) (conv2): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) (relu2): Activation(relu) ) ) (layer3): HybridSequential( (0): BasicBlockV1b( (conv1): Conv2D(128 -> 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) (relu1): Activation(relu) (conv2): Conv2D(256 -> 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) (relu2): Activation(relu) (downsample): HybridSequential( (0): Conv2D(128 -> 256, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) ) ) (1): BasicBlockV1b( (conv1): Conv2D(256 -> 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) (relu1): Activation(relu) (conv2): Conv2D(256 -> 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) (relu2): Activation(relu) ) ) (layer4): HybridSequential( (0): BasicBlockV1b( (conv1): Conv2D(256 -> 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) (relu1): Activation(relu) (conv2): Conv2D(512 -> 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) (relu2): Activation(relu) (downsample): HybridSequential( (0): Conv2D(256 -> 512, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) ) ) (1): BasicBlockV1b( (conv1): Conv2D(512 -> 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) (relu1): Activation(relu) (conv2): Conv2D(512 -> 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) (relu2): Activation(relu) ) ) (avgpool): GlobalAvgPool2D(size=(1, 1), stride=(1, 1), padding=(0, 0), ceil_mode=True, global_pool=True, pool_type=avg, layout=NCHW) (flat): Flatten (fc): Dense(512 -> 1000, linear) ) ) (decoder): DepthDecoder( (decoder): HybridSequential( (0): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(512 -> 256, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (1): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(512 -> 256, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (2): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(256 -> 128, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (3): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(256 -> 128, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (4): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(128 -> 64, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (5): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(128 -> 64, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (6): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(64 -> 32, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (7): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(96 -> 32, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (8): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(32 -> 16, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (9): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(16 -> 16, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (10): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(16 -> 1, kernel_size=(3, 3), stride=(1, 1)) ) (11): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(32 -> 1, kernel_size=(3, 3), stride=(1, 1)) ) (12): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(64 -> 1, kernel_size=(3, 3), stride=(1, 1)) ) (13): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(128 -> 1, kernel_size=(3, 3), stride=(1, 1)) ) ) (sigmoid): Activation(sigmoid) ) ) MonoDepth2PoseNet( (encoder): ResnetEncoder( (encoder): ResNetV1b( (conv1): Conv2D(6 -> 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu): Activation(relu) (maxpool): MaxPool2D(size=(3, 3), stride=(2, 2), padding=(1, 1), ceil_mode=False, global_pool=False, pool_type=max, layout=NCHW) (layer1): HybridSequential( (0): BasicBlockV1b( (conv1): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu1): Activation(relu) (conv2): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu2): Activation(relu) ) (1): BasicBlockV1b( (conv1): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu1): Activation(relu) (conv2): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu2): Activation(relu) ) ) (layer2): HybridSequential( (0): BasicBlockV1b( (conv1): Conv2D(64 -> 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) (relu1): Activation(relu) (conv2): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) (relu2): Activation(relu) (downsample): HybridSequential( (0): Conv2D(64 -> 128, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) ) ) (1): BasicBlockV1b( (conv1): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) (relu1): Activation(relu) (conv2): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) (relu2): Activation(relu) ) ) (layer3): HybridSequential( (0): BasicBlockV1b( (conv1): Conv2D(128 -> 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) (relu1): Activation(relu) (conv2): Conv2D(256 -> 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) (relu2): Activation(relu) (downsample): HybridSequential( (0): Conv2D(128 -> 256, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) ) ) (1): BasicBlockV1b( (conv1): Conv2D(256 -> 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) (relu1): Activation(relu) (conv2): Conv2D(256 -> 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) (relu2): Activation(relu) ) ) (layer4): HybridSequential( (0): BasicBlockV1b( (conv1): Conv2D(256 -> 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) (relu1): Activation(relu) (conv2): Conv2D(512 -> 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) (relu2): Activation(relu) (downsample): HybridSequential( (0): Conv2D(256 -> 512, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) ) ) (1): BasicBlockV1b( (conv1): Conv2D(512 -> 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) (relu1): Activation(relu) (conv2): Conv2D(512 -> 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) (relu2): Activation(relu) ) ) (avgpool): GlobalAvgPool2D(size=(1, 1), stride=(1, 1), padding=(0, 0), ceil_mode=True, global_pool=True, pool_type=avg, layout=NCHW) (flat): Flatten (fc): Dense(512 -> 1000, linear) ) ) (decoder): PoseDecoder( (net): HybridSequential( (0): Conv2D(512 -> 256, kernel_size=(1, 1), stride=(1, 1)) (1): Conv2D(256 -> 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (2): Conv2D(256 -> 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3): Conv2D(256 -> 12, kernel_size=(1, 1), stride=(1, 1)) ) ) ) Starting Epoch: 0 Total Epochs: 20 Namespace(avg_reprojection=False, batch_size=10, ctx=[gpu(0)], data_path='/home/ubuntu/.mxnet/datasets/kitti/kitti_data', dataset='kitti', disable_automasking=False, disable_median_scaling=False, disparity_smoothness=0.001, eval_eigen_to_benchmark=False, eval_model=None, eval_mono=False, eval_split='eigen', eval_stereo=False, ext_disp_to_eval=None, frame_ids=[0, -1, 1], gpu=0, height=192, hybridize=False, learning_rate=0.0001, load_weights_folder=None, log_dir='./tmp/mono/', log_frequency=250, max_depth=100.0, min_depth=0.1, model_zoo='monodepth2_resnet18_kitti_mono_640x192', no_eval=False, no_gpu=False, no_ssim=False, num_epochs=20, num_workers=12, png=True, pose_model_input='pairs', pred_depth_scale_factor=1, pretrained_base=True, pretrained_type='customer', resume=None, save_frequency=1, save_pred_disps=False, scales=[0, 1, 2, 3], scheduler_step_size=15, split='eigen_zhou', start_epoch=0, use_stereo=False, v1_multiscale=False, warmup_epochs=0, width=640) MonoDepth2( (encoder): ResnetEncoder( (encoder): ResNetV1b( (conv1): Conv2D(3 -> 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu): Activation(relu) (maxpool): MaxPool2D(size=(3, 3), stride=(2, 2), padding=(1, 1), ceil_mode=False, global_pool=False, pool_type=max, layout=NCHW) (layer1): HybridSequential( (0): BasicBlockV1b( (conv1): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu1): Activation(relu) (conv2): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu2): Activation(relu) ) (1): BasicBlockV1b( (conv1): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu1): Activation(relu) (conv2): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu2): Activation(relu) ) ) (layer2): HybridSequential( (0): BasicBlockV1b( (conv1): Conv2D(64 -> 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) (relu1): Activation(relu) (conv2): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) (relu2): Activation(relu) (downsample): HybridSequential( (0): Conv2D(64 -> 128, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) ) ) (1): BasicBlockV1b( (conv1): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) (relu1): Activation(relu) (conv2): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) (relu2): Activation(relu) ) ) (layer3): HybridSequential( (0): BasicBlockV1b( (conv1): Conv2D(128 -> 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) (relu1): Activation(relu) (conv2): Conv2D(256 -> 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) (relu2): Activation(relu) (downsample): HybridSequential( (0): Conv2D(128 -> 256, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) ) ) (1): BasicBlockV1b( (conv1): Conv2D(256 -> 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) (relu1): Activation(relu) (conv2): Conv2D(256 -> 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) (relu2): Activation(relu) ) ) (layer4): HybridSequential( (0): BasicBlockV1b( (conv1): Conv2D(256 -> 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) (relu1): Activation(relu) (conv2): Conv2D(512 -> 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) (relu2): Activation(relu) (downsample): HybridSequential( (0): Conv2D(256 -> 512, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) ) ) (1): BasicBlockV1b( (conv1): Conv2D(512 -> 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) (relu1): Activation(relu) (conv2): Conv2D(512 -> 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) (relu2): Activation(relu) ) ) (avgpool): GlobalAvgPool2D(size=(1, 1), stride=(1, 1), padding=(0, 0), ceil_mode=True, global_pool=True, pool_type=avg, layout=NCHW) (flat): Flatten (fc): Dense(512 -> 1000, linear) ) ) (decoder): DepthDecoder( (decoder): HybridSequential( (0): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(512 -> 256, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (1): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(512 -> 256, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (2): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(256 -> 128, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (3): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(256 -> 128, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (4): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(128 -> 64, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (5): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(128 -> 64, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (6): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(64 -> 32, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (7): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(96 -> 32, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (8): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(32 -> 16, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (9): ConvBlock( (conv): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(16 -> 16, kernel_size=(3, 3), stride=(1, 1)) ) (nonlin): ELU( ) ) (10): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(16 -> 1, kernel_size=(3, 3), stride=(1, 1)) ) (11): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(32 -> 1, kernel_size=(3, 3), stride=(1, 1)) ) (12): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(64 -> 1, kernel_size=(3, 3), stride=(1, 1)) ) (13): Conv3x3( (pad): ReflectionPad2D( ) (conv): Conv2D(128 -> 1, kernel_size=(3, 3), stride=(1, 1)) ) ) (sigmoid): Activation(sigmoid) ) ) MonoDepth2PoseNet( (encoder): ResnetEncoder( (encoder): ResNetV1b( (conv1): Conv2D(6 -> 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu): Activation(relu) (maxpool): MaxPool2D(size=(3, 3), stride=(2, 2), padding=(1, 1), ceil_mode=False, global_pool=False, pool_type=max, layout=NCHW) (layer1): HybridSequential( (0): BasicBlockV1b( (conv1): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu1): Activation(relu) (conv2): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu2): Activation(relu) ) (1): BasicBlockV1b( (conv1): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu1): Activation(relu) (conv2): Conv2D(64 -> 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=64) (relu2): Activation(relu) ) ) (layer2): HybridSequential( (0): BasicBlockV1b( (conv1): Conv2D(64 -> 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) (relu1): Activation(relu) (conv2): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) (relu2): Activation(relu) (downsample): HybridSequential( (0): Conv2D(64 -> 128, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) ) ) (1): BasicBlockV1b( (conv1): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) (relu1): Activation(relu) (conv2): Conv2D(128 -> 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=128) (relu2): Activation(relu) ) ) (layer3): HybridSequential( (0): BasicBlockV1b( (conv1): Conv2D(128 -> 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) (relu1): Activation(relu) (conv2): Conv2D(256 -> 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) (relu2): Activation(relu) (downsample): HybridSequential( (0): Conv2D(128 -> 256, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) ) ) (1): BasicBlockV1b( (conv1): Conv2D(256 -> 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) (relu1): Activation(relu) (conv2): Conv2D(256 -> 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=256) (relu2): Activation(relu) ) ) (layer4): HybridSequential( (0): BasicBlockV1b( (conv1): Conv2D(256 -> 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) (relu1): Activation(relu) (conv2): Conv2D(512 -> 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) (relu2): Activation(relu) (downsample): HybridSequential( (0): Conv2D(256 -> 512, kernel_size=(1, 1), stride=(2, 2), bias=False) (1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) ) ) (1): BasicBlockV1b( (conv1): Conv2D(512 -> 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn1): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) (relu1): Activation(relu) (conv2): Conv2D(512 -> 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False) (bn2): BatchNorm(axis=1, eps=1e-05, momentum=0.9, fix_gamma=False, use_global_stats=False, in_channels=512) (relu2): Activation(relu) ) ) (avgpool): GlobalAvgPool2D(size=(1, 1), stride=(1, 1), padding=(0, 0), ceil_mode=True, global_pool=True, pool_type=avg, layout=NCHW) (flat): Flatten (fc): Dense(512 -> 1000, linear) ) ) (decoder): PoseDecoder( (net): HybridSequential( (0): Conv2D(512 -> 256, kernel_size=(1, 1), stride=(1, 1)) (1): Conv2D(256 -> 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (2): Conv2D(256 -> 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) (3): Conv2D(256 -> 12, kernel_size=(1, 1), stride=(1, 1)) ) ) ) Starting Epoch: 0 Total Epochs: 20 Epoch 0 iteration 0000/3981: training loss 0.156 Epoch 0 iteration 0250/3981: training loss 0.148 Epoch 0 iteration 0500/3981: training loss 0.147 Epoch 0 iteration 0750/3981: training loss 0.145 Epoch 0 iteration 1000/3981: training loss 0.144 Epoch 0 iteration 1250/3981: training loss 0.143 Epoch 0 iteration 1500/3981: training loss 0.142 Epoch 0 iteration 1750/3981: training loss 0.141 Epoch 0 iteration 2000/3981: training loss 0.141 Epoch 0 iteration 2250/3981: training loss 0.140 Epoch 0 iteration 2500/3981: training loss 0.139 Epoch 0 iteration 2750/3981: training loss 0.139 Epoch 0 iteration 3000/3981: training loss 0.138 Epoch 0 iteration 3250/3981: training loss 0.137 Epoch 0 iteration 3500/3981: training loss 0.137 Epoch 0 iteration 3750/3981: training loss 0.136 Epoch 0, validation abs_REL: 0.193 sq_REL: 1.319 RMSE: 6.159, RMSE_log: 0.270 Delta_1: 0.720 Delta_2: 0.904 Delta_2: 0.965 Epoch 1 iteration 0000/3981: training loss 0.109 Epoch 1 iteration 0250/3981: training loss 0.128 Epoch 1 iteration 0500/3981: training loss 0.128 Epoch 1 iteration 0750/3981: training loss 0.128 Epoch 1 iteration 1000/3981: training loss 0.128 Epoch 1 iteration 1250/3981: training loss 0.128 Epoch 1 iteration 1500/3981: training loss 0.127 Epoch 1 iteration 1750/3981: training loss 0.127 Epoch 1 iteration 2000/3981: training loss 0.127 Epoch 1 iteration 2250/3981: training loss 0.127 Epoch 1 iteration 2500/3981: training loss 0.127 Epoch 1 iteration 2750/3981: training loss 0.127 Epoch 1 iteration 3000/3981: training loss 0.126 Epoch 1 iteration 3250/3981: training loss 0.126 Epoch 1 iteration 3500/3981: training loss 0.126 Epoch 1 iteration 3750/3981: training loss 0.126 Epoch 1, validation abs_REL: 0.166 sq_REL: 1.135 RMSE: 5.460, RMSE_log: 0.236 Delta_1: 0.784 Delta_2: 0.935 Delta_2: 0.976 Epoch 2 iteration 0000/3981: training loss 0.114 Epoch 2 iteration 0250/3981: training loss 0.122 Epoch 2 iteration 0500/3981: training loss 0.122 Epoch 2 iteration 0750/3981: training loss 0.122 Epoch 2 iteration 1000/3981: training loss 0.122 Epoch 2 iteration 1250/3981: training loss 0.122 Epoch 2 iteration 1500/3981: training loss 0.122 Epoch 2 iteration 1750/3981: training loss 0.122 Epoch 2 iteration 2000/3981: training loss 0.122 Epoch 2 iteration 2250/3981: training loss 0.122 Epoch 2 iteration 2500/3981: training loss 0.122 Epoch 2 iteration 2750/3981: training loss 0.122 Epoch 2 iteration 3000/3981: training loss 0.122 Epoch 2 iteration 3250/3981: training loss 0.122 Epoch 2 iteration 3500/3981: training loss 0.122 Epoch 2 iteration 3750/3981: training loss 0.122 Epoch 2, validation abs_REL: 0.144 sq_REL: 1.032 RMSE: 5.600, RMSE_log: 0.229 Delta_1: 0.805 Delta_2: 0.939 Delta_2: 0.977 Epoch 3 iteration 0000/3981: training loss 0.123 Epoch 3 iteration 0250/3981: training loss 0.120 Epoch 3 iteration 0500/3981: training loss 0.120 Epoch 3 iteration 0750/3981: training loss 0.120 Epoch 3 iteration 1000/3981: training loss 0.120 Epoch 3 iteration 1250/3981: training loss 0.120 Epoch 3 iteration 1500/3981: training loss 0.120 Epoch 3 iteration 1750/3981: training loss 0.120 Epoch 3 iteration 2000/3981: training loss 0.120 Epoch 3 iteration 2250/3981: training loss 0.120 Epoch 3 iteration 2500/3981: training loss 0.120 Epoch 3 iteration 2750/3981: training loss 0.120 Epoch 3 iteration 3000/3981: training loss 0.120 Epoch 3 iteration 3250/3981: training loss 0.120 Epoch 3 iteration 3500/3981: training loss 0.120 Epoch 3 iteration 3750/3981: training loss 0.120 Epoch 3, validation abs_REL: 0.153 sq_REL: 1.229 RMSE: 5.611, RMSE_log: 0.229 Delta_1: 0.807 Delta_2: 0.941 Delta_2: 0.978 Epoch 4 iteration 0000/3981: training loss 0.113 Epoch 4 iteration 0250/3981: training loss 0.119 Epoch 4 iteration 0500/3981: training loss 0.119 Epoch 4 iteration 0750/3981: training loss 0.119 Epoch 4 iteration 1000/3981: training loss 0.119 Epoch 4 iteration 1250/3981: training loss 0.118 Epoch 4 iteration 1500/3981: training loss 0.119 Epoch 4 iteration 1750/3981: training loss 0.118 Epoch 4 iteration 2000/3981: training loss 0.118 Epoch 4 iteration 2250/3981: training loss 0.118 Epoch 4 iteration 2500/3981: training loss 0.118 Epoch 4 iteration 2750/3981: training loss 0.118 Epoch 4 iteration 3000/3981: training loss 0.118 Epoch 4 iteration 3250/3981: training loss 0.118 Epoch 4 iteration 3500/3981: training loss 0.118 Epoch 4 iteration 3750/3981: training loss 0.118 Epoch 4, validation abs_REL: 0.147 sq_REL: 1.056 RMSE: 5.272, RMSE_log: 0.226 Delta_1: 0.819 Delta_2: 0.939 Delta_2: 0.978 Epoch 5 iteration 0000/3981: training loss 0.123 Epoch 5 iteration 0250/3981: training loss 0.117 Epoch 5 iteration 0500/3981: training loss 0.117 Epoch 5 iteration 0750/3981: training loss 0.117 Epoch 5 iteration 1000/3981: training loss 0.117 Epoch 5 iteration 1250/3981: training loss 0.117 Epoch 5 iteration 1500/3981: training loss 0.117 Epoch 5 iteration 1750/3981: training loss 0.117 Epoch 5 iteration 2000/3981: training loss 0.117 Epoch 5 iteration 2250/3981: training loss 0.117 Epoch 5 iteration 2500/3981: training loss 0.117 Epoch 5 iteration 2750/3981: training loss 0.117 Epoch 5 iteration 3000/3981: training loss 0.117 Epoch 5 iteration 3250/3981: training loss 0.117 Epoch 5 iteration 3500/3981: training loss 0.117 Epoch 5 iteration 3750/3981: training loss 0.117 Epoch 5, validation abs_REL: 0.142 sq_REL: 1.172 RMSE: 5.415, RMSE_log: 0.223 Delta_1: 0.830 Delta_2: 0.944 Delta_2: 0.979 Epoch 6 iteration 0000/3981: training loss 0.099 Epoch 6 iteration 0250/3981: training loss 0.116 Epoch 6 iteration 0500/3981: training loss 0.116 Epoch 6 iteration 0750/3981: training loss 0.116 Epoch 6 iteration 1000/3981: training loss 0.116 Epoch 6 iteration 1250/3981: training loss 0.116 Epoch 6 iteration 1500/3981: training loss 0.116 Epoch 6 iteration 1750/3981: training loss 0.116 Epoch 6 iteration 2000/3981: training loss 0.116 Epoch 6 iteration 2250/3981: training loss 0.116 Epoch 6 iteration 2500/3981: training loss 0.116 Epoch 6 iteration 2750/3981: training loss 0.116 Epoch 6 iteration 3000/3981: training loss 0.116 Epoch 6 iteration 3250/3981: training loss 0.116 Epoch 6 iteration 3500/3981: training loss 0.116 Epoch 6 iteration 3750/3981: training loss 0.116 Epoch 6, validation abs_REL: 0.143 sq_REL: 1.253 RMSE: 5.522, RMSE_log: 0.228 Delta_1: 0.830 Delta_2: 0.938 Delta_2: 0.978 Epoch 7 iteration 0000/3981: training loss 0.096 Epoch 7 iteration 0250/3981: training loss 0.116 Epoch 7 iteration 0500/3981: training loss 0.115 Epoch 7 iteration 0750/3981: training loss 0.115 Epoch 7 iteration 1000/3981: training loss 0.115 Epoch 7 iteration 1250/3981: training loss 0.115 Epoch 7 iteration 1500/3981: training loss 0.115 Epoch 7 iteration 1750/3981: training loss 0.115 Epoch 7 iteration 2000/3981: training loss 0.115 Epoch 7 iteration 2250/3981: training loss 0.115 Epoch 7 iteration 2500/3981: training loss 0.115 Epoch 7 iteration 2750/3981: training loss 0.115 Epoch 7 iteration 3000/3981: training loss 0.115 Epoch 7 iteration 3250/3981: training loss 0.115 Epoch 7 iteration 3500/3981: training loss 0.115 Epoch 7 iteration 3750/3981: training loss 0.115 Epoch 7, validation abs_REL: 0.142 sq_REL: 1.192 RMSE: 5.372, RMSE_log: 0.225 Delta_1: 0.836 Delta_2: 0.940 Delta_2: 0.978 Epoch 8 iteration 0000/3981: training loss 0.098 Epoch 8 iteration 0250/3981: training loss 0.115 Epoch 8 iteration 0500/3981: training loss 0.115 Epoch 8 iteration 0750/3981: training loss 0.114 Epoch 8 iteration 1000/3981: training loss 0.114 Epoch 8 iteration 1250/3981: training loss 0.115 Epoch 8 iteration 1500/3981: training loss 0.115 Epoch 8 iteration 1750/3981: training loss 0.115 Epoch 8 iteration 2000/3981: training loss 0.115 Epoch 8 iteration 2250/3981: training loss 0.115 Epoch 8 iteration 2500/3981: training loss 0.115 Epoch 8 iteration 2750/3981: training loss 0.114 Epoch 8 iteration 3000/3981: training loss 0.115 Epoch 8 iteration 3250/3981: training loss 0.114 Epoch 8 iteration 3500/3981: training loss 0.114 Epoch 8 iteration 3750/3981: training loss 0.114 Epoch 8, validation abs_REL: 0.142 sq_REL: 1.192 RMSE: 5.466, RMSE_log: 0.222 Delta_1: 0.832 Delta_2: 0.940 Delta_2: 0.979 Epoch 9 iteration 0000/3981: training loss 0.109 Epoch 9 iteration 0250/3981: training loss 0.113 Epoch 9 iteration 0500/3981: training loss 0.113 Epoch 9 iteration 0750/3981: training loss 0.114 Epoch 9 iteration 1000/3981: training loss 0.113 Epoch 9 iteration 1250/3981: training loss 0.114 Epoch 9 iteration 1500/3981: training loss 0.114 Epoch 9 iteration 1750/3981: training loss 0.114 Epoch 9 iteration 2000/3981: training loss 0.114 Epoch 9 iteration 2250/3981: training loss 0.114 Epoch 9 iteration 2500/3981: training loss 0.114 Epoch 9 iteration 2750/3981: training loss 0.114 Epoch 9 iteration 3000/3981: training loss 0.114 Epoch 9 iteration 3250/3981: training loss 0.114 Epoch 9 iteration 3500/3981: training loss 0.114 Epoch 9 iteration 3750/3981: training loss 0.114 Epoch 9, validation abs_REL: 0.136 sq_REL: 1.049 RMSE: 5.255, RMSE_log: 0.221 Delta_1: 0.840 Delta_2: 0.941 Delta_2: 0.979 Epoch 10 iteration 0000/3981: training loss 0.098 Epoch 10 iteration 0250/3981: training loss 0.113 Epoch 10 iteration 0500/3981: training loss 0.114 Epoch 10 iteration 0750/3981: training loss 0.113 Epoch 10 iteration 1000/3981: training loss 0.113 Epoch 10 iteration 1250/3981: training loss 0.113 Epoch 10 iteration 1500/3981: training loss 0.113 Epoch 10 iteration 1750/3981: training loss 0.113 Epoch 10 iteration 2000/3981: training loss 0.113 Epoch 10 iteration 2250/3981: training loss 0.113 Epoch 10 iteration 2500/3981: training loss 0.113 Epoch 10 iteration 2750/3981: training loss 0.113 Epoch 10 iteration 3000/3981: training loss 0.113 Epoch 10 iteration 3250/3981: training loss 0.113 Epoch 10 iteration 3500/3981: training loss 0.113 Epoch 10 iteration 3750/3981: training loss 0.113 Epoch 10, validation abs_REL: 0.144 sq_REL: 1.442 RMSE: 5.652, RMSE_log: 0.230 Delta_1: 0.834 Delta_2: 0.939 Delta_2: 0.977 Epoch 11 iteration 0000/3981: training loss 0.118 Epoch 11 iteration 0250/3981: training loss 0.113 Epoch 11 iteration 0500/3981: training loss 0.113 Epoch 11 iteration 0750/3981: training loss 0.113 Epoch 11 iteration 1000/3981: training loss 0.113 Epoch 11 iteration 1250/3981: training loss 0.113 Epoch 11 iteration 1500/3981: training loss 0.113 Epoch 11 iteration 1750/3981: training loss 0.113 Epoch 11 iteration 2000/3981: training loss 0.113 Epoch 11 iteration 2250/3981: training loss 0.113 Epoch 11 iteration 2500/3981: training loss 0.113 Epoch 11 iteration 2750/3981: training loss 0.113 Epoch 11 iteration 3000/3981: training loss 0.113 Epoch 11 iteration 3250/3981: training loss 0.113 Epoch 11 iteration 3500/3981: training loss 0.113 Epoch 11 iteration 3750/3981: training loss 0.113 Epoch 11, validation abs_REL: 0.141 sq_REL: 1.225 RMSE: 5.511, RMSE_log: 0.224 Delta_1: 0.837 Delta_2: 0.942 Delta_2: 0.978 Epoch 12 iteration 0000/3981: training loss 0.125 Epoch 12 iteration 0250/3981: training loss 0.113 Epoch 12 iteration 0500/3981: training loss 0.112 Epoch 12 iteration 0750/3981: training loss 0.112 Epoch 12 iteration 1000/3981: training loss 0.112 Epoch 12 iteration 1250/3981: training loss 0.113 Epoch 12 iteration 1500/3981: training loss 0.113 Epoch 12 iteration 1750/3981: training loss 0.113 Epoch 12 iteration 2000/3981: training loss 0.113 Epoch 12 iteration 2250/3981: training loss 0.113 Epoch 12 iteration 2500/3981: training loss 0.113 Epoch 12 iteration 2750/3981: training loss 0.113 Epoch 12 iteration 3000/3981: training loss 0.113 Epoch 12 iteration 3250/3981: training loss 0.113 Epoch 12 iteration 3500/3981: training loss 0.112 Epoch 12 iteration 3750/3981: training loss 0.112 Epoch 12, validation abs_REL: 0.139 sq_REL: 1.224 RMSE: 5.404, RMSE_log: 0.223 Delta_1: 0.841 Delta_2: 0.943 Delta_2: 0.979 Epoch 13 iteration 0000/3981: training loss 0.116 Epoch 13 iteration 0250/3981: training loss 0.112 Epoch 13 iteration 0500/3981: training loss 0.112 Epoch 13 iteration 0750/3981: training loss 0.112 Epoch 13 iteration 1000/3981: training loss 0.112 Epoch 13 iteration 1250/3981: training loss 0.112 Epoch 13 iteration 1500/3981: training loss 0.112 Epoch 13 iteration 1750/3981: training loss 0.112 Epoch 13 iteration 2000/3981: training loss 0.112 Epoch 13 iteration 2250/3981: training loss 0.112 Epoch 13 iteration 2500/3981: training loss 0.112 Epoch 13 iteration 2750/3981: training loss 0.112 Epoch 13 iteration 3000/3981: training loss 0.112 Epoch 13 iteration 3250/3981: training loss 0.112 Epoch 13 iteration 3500/3981: training loss 0.112 Epoch 13 iteration 3750/3981: training loss 0.112 Epoch 13, validation abs_REL: 0.143 sq_REL: 1.313 RMSE: 5.603, RMSE_log: 0.226 Delta_1: 0.837 Delta_2: 0.942 Delta_2: 0.978 Epoch 14 iteration 0000/3981: training loss 0.101 Epoch 14 iteration 0250/3981: training loss 0.111 Epoch 14 iteration 0500/3981: training loss 0.111 Epoch 14 iteration 0750/3981: training loss 0.111 Epoch 14 iteration 1000/3981: training loss 0.112 Epoch 14 iteration 1250/3981: training loss 0.112 Epoch 14 iteration 1500/3981: training loss 0.112 Epoch 14 iteration 1750/3981: training loss 0.112 Epoch 14 iteration 2000/3981: training loss 0.112 Epoch 14 iteration 2250/3981: training loss 0.112 Epoch 14 iteration 2500/3981: training loss 0.112 Epoch 14 iteration 2750/3981: training loss 0.112 Epoch 14 iteration 3000/3981: training loss 0.112 Epoch 14 iteration 3250/3981: training loss 0.112 Epoch 14 iteration 3500/3981: training loss 0.112 Epoch 14 iteration 3750/3981: training loss 0.112 Epoch 14, validation abs_REL: 0.138 sq_REL: 1.210 RMSE: 5.427, RMSE_log: 0.221 Delta_1: 0.842 Delta_2: 0.941 Delta_2: 0.979 Epoch 15 iteration 0000/3981: training loss 0.101 Epoch 15 iteration 0250/3981: training loss 0.112 Epoch 15 iteration 0500/3981: training loss 0.112 Epoch 15 iteration 0750/3981: training loss 0.112 Epoch 15 iteration 1000/3981: training loss 0.112 Epoch 15 iteration 1250/3981: training loss 0.112 Epoch 15 iteration 1500/3981: training loss 0.112 Epoch 15 iteration 1750/3981: training loss 0.112 Epoch 15 iteration 2000/3981: training loss 0.112 Epoch 15 iteration 2250/3981: training loss 0.111 Epoch 15 iteration 2500/3981: training loss 0.111 Epoch 15 iteration 2750/3981: training loss 0.111 Epoch 15 iteration 3000/3981: training loss 0.111 Epoch 15 iteration 3250/3981: training loss 0.111 Epoch 15 iteration 3500/3981: training loss 0.111 Epoch 15 iteration 3750/3981: training loss 0.111 Epoch 15, validation abs_REL: 0.141 sq_REL: 1.364 RMSE: 5.539, RMSE_log: 0.225 Delta_1: 0.839 Delta_2: 0.943 Delta_2: 0.978 Epoch 16 iteration 0000/3981: training loss 0.125 Epoch 16 iteration 0250/3981: training loss 0.112 Epoch 16 iteration 0500/3981: training loss 0.112 Epoch 16 iteration 0750/3981: training loss 0.111 Epoch 16 iteration 1000/3981: training loss 0.111 Epoch 16 iteration 1250/3981: training loss 0.111 Epoch 16 iteration 1500/3981: training loss 0.111 Epoch 16 iteration 1750/3981: training loss 0.111 Epoch 16 iteration 2000/3981: training loss 0.111 Epoch 16 iteration 2250/3981: training loss 0.111 Epoch 16 iteration 2500/3981: training loss 0.111 Epoch 16 iteration 2750/3981: training loss 0.111 Epoch 16 iteration 3000/3981: training loss 0.111 Epoch 16 iteration 3250/3981: training loss 0.111 Epoch 16 iteration 3500/3981: training loss 0.111 Epoch 16 iteration 3750/3981: training loss 0.111 Epoch 16, validation abs_REL: 0.143 sq_REL: 1.356 RMSE: 5.642, RMSE_log: 0.226 Delta_1: 0.838 Delta_2: 0.942 Delta_2: 0.978 Epoch 17 iteration 0000/3981: training loss 0.122 Epoch 17 iteration 0250/3981: training loss 0.113 Epoch 17 iteration 0500/3981: training loss 0.112 Epoch 17 iteration 0750/3981: training loss 0.112 Epoch 17 iteration 1000/3981: training loss 0.111 Epoch 17 iteration 1250/3981: training loss 0.111 Epoch 17 iteration 1500/3981: training loss 0.111 Epoch 17 iteration 1750/3981: training loss 0.111 Epoch 17 iteration 2000/3981: training loss 0.111 Epoch 17 iteration 2250/3981: training loss 0.111 Epoch 17 iteration 2500/3981: training loss 0.111 Epoch 17 iteration 2750/3981: training loss 0.111 Epoch 17 iteration 3000/3981: training loss 0.111 Epoch 17 iteration 3250/3981: training loss 0.111 Epoch 17 iteration 3500/3981: training loss 0.111 Epoch 17 iteration 3750/3981: training loss 0.111 Epoch 17, validation abs_REL: 0.139 sq_REL: 1.252 RMSE: 5.570, RMSE_log: 0.224 Delta_1: 0.841 Delta_2: 0.940 Delta_2: 0.978 Epoch 18 iteration 0000/3981: training loss 0.108 Epoch 18 iteration 0250/3981: training loss 0.112 Epoch 18 iteration 0500/3981: training loss 0.111 Epoch 18 iteration 0750/3981: training loss 0.111 Epoch 18 iteration 1000/3981: training loss 0.110 Epoch 18 iteration 1250/3981: training loss 0.110 Epoch 18 iteration 1500/3981: training loss 0.110 Epoch 18 iteration 1750/3981: training loss 0.111 Epoch 18 iteration 2000/3981: training loss 0.110 Epoch 18 iteration 2250/3981: training loss 0.111 Epoch 18 iteration 2500/3981: training loss 0.111 Epoch 18 iteration 2750/3981: training loss 0.111 Epoch 18 iteration 3000/3981: training loss 0.111 Epoch 18 iteration 3250/3981: training loss 0.111 Epoch 18 iteration 3500/3981: training loss 0.111 Epoch 18 iteration 3750/3981: training loss 0.110 Epoch 18, validation abs_REL: 0.142 sq_REL: 1.488 RMSE: 5.789, RMSE_log: 0.227 Delta_1: 0.842 Delta_2: 0.940 Delta_2: 0.977 Epoch 19 iteration 0000/3981: training loss 0.117 Epoch 19 iteration 0250/3981: training loss 0.110 Epoch 19 iteration 0500/3981: training loss 0.110 Epoch 19 iteration 0750/3981: training loss 0.110 Epoch 19 iteration 1000/3981: training loss 0.110 Epoch 19 iteration 1250/3981: training loss 0.110 Epoch 19 iteration 1500/3981: training loss 0.110 Epoch 19 iteration 1750/3981: training loss 0.110 Epoch 19 iteration 2000/3981: training loss 0.110 Epoch 19 iteration 2250/3981: training loss 0.110 Epoch 19 iteration 2500/3981: training loss 0.110 Epoch 19 iteration 2750/3981: training loss 0.110 Epoch 19 iteration 3000/3981: training loss 0.110 Epoch 19 iteration 3250/3981: training loss 0.110 Epoch 19 iteration 3500/3981: training loss 0.110 Epoch 19 iteration 3750/3981: training loss 0.110 Epoch 19, validation abs_REL: 0.137 sq_REL: 1.311 RMSE: 5.556, RMSE_log: 0.224 Delta_1: 0.843 Delta_2: 0.943 Delta_2: 0.978 Training Finished! Total training time is 56h 8m