# -*- mode: dockerfile -*- # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # Dockerfile to run MXNet on Ubuntu 16.04 for GPU FROM nvidia/cuda:9.0-cudnn7-devel WORKDIR /work/deps # Ubuntu-core RUN apt-get update && \ apt-get install -y \ automake \ apt-transport-https \ build-essential \ ca-certificates \ cmake \ curl \ git \ libcurl4-openssl-dev \ libjemalloc-dev \ liblapack-dev \ libopenblas-dev \ libopencv-dev \ libtool \ libzmq3-dev \ ninja-build \ python3-dev \ python3-pip \ software-properties-common \ wget \ unzip RUN unlink /usr/bin/python && \ ln -s $(which python3) /usr/bin/python && \ ln -s $(which pip3) /usr/bin/pip RUN pip install nose cpplint==1.3.0 pylint==1.8.3 'numpy<1.15.0,>=1.8.2' nose-timer \ 'requests<2.19.0,>=2.18.4' h5py==2.8.0rc1 scipy==1.0.1 gluoncv # TensorRT WORKDIR /work ENV LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:/usr/local/lib # Clone MXNet ADD https://api.github.com/repos/apache/incubator-mxnet/git/refs/heads/master mxnet.commit.txt RUN git clone --recursive https://github.com/apache/incubator-mxnet.git WORKDIR /work/incubator-mxnet # Protobuf RUN cd /work && \ git clone --recursive -b 3.5.1.1 https://github.com/google/protobuf.git && \ cd protobuf && \ ./autogen.sh && \ ./configure && \ make -j$(nproc) && \ make install && \ ldconfig # Build ONNX RUN echo "Installing ONNX." && \ cd 3rdparty/onnx-tensorrt/third_party/onnx && \ rm -rf build && \ mkdir -p build && \ cd build && \ cmake \ -DCMAKE_CXX_FLAGS=-I/usr/include/python${PYVER}\ -DBUILD_SHARED_LIBS=ON .. && \ make -j$(nproc) && \ make install # Install TensorRT RUN wget -qO tensorrt.deb https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1604/x86_64/nvinfer-runtime-trt-repo-ubuntu1604-4.0.1-ga-cuda9.0_1-1_amd64.deb && \ dpkg -i tensorrt.deb && \ apt-get update && \ apt-get install -y --allow-downgrades libnvinfer-dev && \ rm tensorrt.deb # Build ONNX-TensorRT RUN cd 3rdparty/onnx-tensorrt/ && \ mkdir -p build && \ cd build && \ cmake .. && \ make -j$(nproc) && \ make install RUN make \ USE_BLAS=openblas \ USE_CUDA=1 \ USE_CUDA_PATH=/usr/local/cuda \ USE_CUDNN=1 \ USE_DIST_KVSTORE=0 \ USE_TENSORRT=1 \ ONNX_NAMESPACE=onnx \ CUDA_ARCH="-gencode arch=compute_60,code=sm_60 \ -gencode arch=compute_61,code=sm_61 \ -gencode arch=compute_70,code=sm_70 \ -gencode arch=compute_70,code=compute_70" \ -j$(nproc) RUN mv lib/libmxnet.so /usr/local/lib && \ ldconfig && \ make clean && \ cd python && \ pip install -e .