#!/usr/bin/env bash
# This script replaces our old "make" command line - instead of
# "make mode=debug image=rogue", use "build mode=debug image=rogue".
# This first runs "make" with the same parameters, and then builds
# the requested image.

usage() {
	cat <<-EOF
	Build OSv kernel and specified modules/apps and package them together into a runnable image

	This bash shell script acts as a top level orchestration tool intended to build
	OSv images. It does so by invoking a make against the main OSv makefile to
	build the kernel and then delegates to various Python scripts (scripts/module.py)
	to orchestrate building of explicitly specified apps and modules as well as any
	implicitly dependant ones. The end result is the build/<mode>/usr.img file that
	is composed of OSv kernel (loader-stripped.elf) and application files and can be
	executed using ./scripts/run.py.

	Usage:
	  scripts/build [options] image=<app1>[,<app2[.run_conf]>[,<module1>,..]] modules=<module1>[,..] | check
	  scripts/build clean

	Options:
	  --help|-h                     Print this help message
	  arch=x64|aarch64              Specify the build architecture; default is the same as build host arch
	  mode=release|debug            Specify the build mode; default is release
	  export=none|selected|all      If 'selected' or 'all' export the app files to <export_dir>
	  export_dir=<dir>              The directory to export the files to; default is build/export
	  fs=zfs|rofs|rofs_with_zfs|    Specify the filesystem of the image partition
	     ramfs|virtiofs
	  fs_size=N                     Specify the size of the image in bytes
	  fs_size_mb=N                  Specify the size of the image in MiB
	  app_local_exec_tls_size=N     Specify the size of app local TLS in bytes; the default is 64
	  usrskel=<*.skel>              Specify the base manifest for the image
	  <module_makefile_arg>=<value> Pass value of module_makefile_arg to an app/module makefile
	                                (can be used to customize specific app/module build process)
	  -j<N>                         Set number of parallel jobs for make
	  --append-manifest             Append build/<mode>/append.manifest to usr.manifest
	  --create-disk                 Instead of usr.img create kernel-less disk.img
	  --create-zfs-disk             Create extra empty disk with ZFS filesystem
	  --use-openzfs                 Build and manipulate ZFS images using on host OpenZFS tools

	Examples:
	  ./scripts/build -j4 fs=rofs image=native-example   # Create image with native-example app

	  ./scripts/build JAVA_VERSION=10 image=openjdk-zulu-9-and-above,spring-boot-example
	                                                     # Create image with spring boot app with Java 10 jdk

	  ./scripts/manifest_from_host.sh -w ls && ./script/build --append-manifest
	                                                     # Create manifest for 'ls' executable

	  ./script/build check                               # Create test image and run all tests in it

	  ./script/build clean                               # Clean the build tree
	EOF
	exit ${1:-0}
}

trap 'echo "$0 failed: $BASH_COMMAND" >&2; exit $?' ERR

# If "MAKEFLAGS" is set, we've been run from "make". The following hacks
# allows our Makefile to forward "make image=..." requests to to us
# without running into an infinite loop when we call "make".
MAKEFLAGS=${MAKEFLAGS/ image=/ zzz=}
MAKEFLAGS=${MAKEFLAGS/#image=/zzz=}
MAKEFLAGS=${MAKEFLAGS/ modules=/ zzz=}
MAKEFLAGS=${MAKEFLAGS/#modules=/zzz=}
unset image modules

# Pass to "make" all the given args except "image=..." or "module=..."
declare -a args
# Make is going to be invoked twice. At the first run ("stage1") we
# build all possible dependencies any app or module might require. At
# the second - the loader.elf is linked and the loader.img is
# produced.  In the case of "make clean", the first invocation (and
# the only one) is used for cleaning up the kernel.
stage1_args="stage1"
for i
do
	case $i in
	--help|-h)
		usage ;;
	image=*|modules=*|fs=*|usrskel=*|check|--append-manifest|--create-disk|--create-zfs-disk|--use-openzfs) ;;
	clean)
		stage1_args=clean ;;
	arch=*)
		eval ${i}
		args+=("$i") ;;
	*)
		args+=("$i") ;;
	esac
done

if [[ "${arch-}" == "aarch64" && -d "build/downloaded_packages/aarch64/toolchain/" ]]; then
	. scripts/download_aarch64_toolchain.sh
	export CROSS_PREFIX=aarch64-none-linux-gnu-
fi

make "${args[@]}" -f conf/Makefile -j1 config | tee build.out
# check exit status of make
status=${PIPESTATUS[0]}
if [ $status -ne 0 ]
then
	echo "make -f conf/Makefile failed. Exiting from build script"
	exit $status
fi

make "${args[@]}" ${stage1_args} | tee build.out
# check exit status of make
status=${PIPESTATUS[0]}
if [ $status -ne 0 ]
then
	echo "make failed. Exiting from build script"
	exit $status
fi

for i
do
	case $i in
	clean)	# "build clean" is like "make clean" to clean the kernel,
		# plus additional cleanups of all modules.
		set -x
		OSV_BASE=`pwd` ./scripts/module.py clean -q
		exit;;
	esac
done

# Find out where "make" decided to put its compilation results. We'll
# put the images in the same place.
if test ! -L build/last -o ! -f build/last/loader.o
then
	echo "'make stage1' did not leave expected results" >&2
	exit 2
fi
OUT=build/`readlink build/last`

# Look for arguments that look like "name=value", and set them in an
# assoative array "vars". Also look for the "-j ..." parallelism option,
# which we want to pass to the module's Makefile as well.
declare -A vars
j_arg=
checknumber=
for i
do
	if test -n "$checknumber"
	then
		checknumber=
		case $i in
		[0-9]*)
			j_arg="-j$i"
			continue;;
		esac
	fi
	case $i in
	*=*)	name=${i%%=*}
		value=${i#*=}
		vars[$name]=$value;;
	-j)
		# module.py's command line parser has trouble of parsing a
		# "-j" option without a value. Help it with a fake value "-".
		j_arg="-j-"
		# make allows the "-j" option to be separated from the number by a
		# space (e.g., "-j 4"), but since this number is optional, we will
		# need to also check the next argument, whether it is a number.
		checknumber=yes;;
	-j*)	j_arg=$i;;
	check)	# "build check" is a shortcut for
		# "build image=tests; scripts/test.py"
		vars[image]=tests;;
	--append-manifest)
		vars[append_manifest]="true";;
	--create-disk)
		vars[create_disk]="true";;
	--create-zfs-disk)
		vars[create_zfs_disk]="true";;
	--use-openzfs)
		vars[use_openzfs]="true";;
	esac
done

# fs_size_mb is in megabytes (1024*1024 bytes)
fs_size_mb=${vars[fs_size_mb]-512}
# fs_size is in bytes
fs_size=${vars[fs_size]-$(($fs_size_mb*1024*1024))}
# size must be a multiple of 512. Round it down
fs_size=$((fs_size - (fs_size & 511)))

SRC=`pwd`
arch=`expr $OUT : '.*\.\(.*\)'`
mode=`expr $OUT : '.*/\(.*\)\..*'`

# Set "modules" according to the image= or modules= paramters, or some
# defaults (with same rules as in our old makefile)
case $arch in
aarch64) image=${vars[image]-uush};;
*) image=${vars[image]-default};;
esac
modules=${vars[modules]-!$image}

case $OUT in
/*)	OSV_BUILD_PATH=$OUT;;
*)	OSV_BUILD_PATH=`pwd`/$OUT;;
esac

host_arch=$(uname -m)

# Default manifest
manifest=bootfs.manifest.skel
fs_type=${vars[fs]-zfs}
usrskel_arg=
case $fs_type in
zfs)
	;; # Nothing to change here. This is our default behavior
rofs|rofs_with_zfs|virtiofs)
	# Both are read-only (in OSv) and require nothing extra on bootfs to work
	manifest=bootfs_empty.manifest.skel
	usrskel_arg="--usrskel usr_rofs.manifest.skel";;
ramfs)
	cp "$SRC"/static/etc/fstab "$OUT"/fstab
	manifest=$OUT/usr.manifest
	usrskel_arg="--usrskel usr_ramfs.manifest.skel";;
*)
	echo "Unknown filesystem \"$fs_type\"" >&2
	exit 2
esac

if [[ "$host_arch" == "aarch64" || "$arch" == "aarch64" ]]; then
	vars[create_disk]="true"
fi

if test -n "${vars[usrskel]}"
then
	# Override default skel
	usrskel_arg="--usrskel ${vars[usrskel]}"
fi

export=${vars[export]-none}
if [ "$export" == "selected" ]
then
	no_required_arg="--no-required"
fi

if [[ ${vars[append_manifest]} == "true" && $modules == "!default" ]]; then
	modules="empty"
fi

CC=gcc
if [[ "$host_arch" == "x86_64" && "$arch" == 'aarch64' ]]; then
    CC=${CROSS_PREFIX:-aarch64-linux-gnu-}gcc
fi

libgcc_s_path=$(${CC} -print-file-name=libgcc_s.so.1)
if [[ "$libgcc_s_path" == "libgcc_s.so.1" ]]; then
	cat <<-EOF
	Unable to resolve libgcc_s.so.1 using "${CC}".
	Looking in build/downloaded_packages/aarch64/gcc/install/lib64
	EOF
	libgcc_s_path="build/downloaded_packages/aarch64/gcc/install/lib64/libgcc_s.so.1"
fi
libgcc_s_dir=$(dirname $(readlink -f ${libgcc_s_path}))

# The parentheses start a subshell. Whatever is exported there, doesn't affect the external shell
(
	# Note: the double-quotes and almost everything in the line below is important to correctly allow spaces
	# This specifically allows us to pass extra arguments to the modules/apps makefiles
	for i in "${args[@]}"
	do
		case $i in
		*=*)
			# again, the double-quotes is important in case the variable's value contains spaces
			export "$i" ;;
		esac
	done
	# Export the variables we already have. This makes it unnecessary to do "fs__type=$fstype ..."
	export fs_type mode OSV_BUILD_PATH libgcc_s_dir
	# Other variables we wanted to rename, I don't know why
	export ARCH=$arch OSV_BASE=$SRC
	# Run what we wanted to run. It will inherit everything we exported above.
	scripts/module.py $j_arg build -c "$modules" $usrskel_arg $no_required_arg
)

if [[ ${vars[append_manifest]} == "true" && -f "$OSV_BUILD_PATH/append.manifest" ]]; then
	cat "$OSV_BUILD_PATH/append.manifest" >> "$OSV_BUILD_PATH/usr.manifest"
	if [[ -f "$OSV_BUILD_PATH/append_cmdline" && $(cat "$OSV_BUILD_PATH/cmdline") == "" ]]; then
		cp $OSV_BUILD_PATH/append_cmdline $OSV_BUILD_PATH/cmdline
	fi
fi

bootfs_manifest=$manifest make "${args[@]}" | tee -a build.out
# check exit status of make
status=${PIPESTATUS[0]}
if [ $status -ne 0 ]
then
	echo "make failed. Exiting from build script"
	exit $status
fi

if test ! -f build/last/loader.img
then
	echo "'make' did not leave expected results" >&2
	exit 2
fi

loader_size=`stat --printf %s $OUT/loader.img`
kernel_end=$(($loader_size+2097151 & ~2097151))

# The python scripts called below assume the current directory is $OUT (as was
# the case in our old build.mk).
cd $OUT

if [ "$export" != "none" ]; then
	export_dir=${vars[export_dir]-$SRC/build/export}
	rm -rf "$export_dir"
	"$SRC"/scripts/export_manifest.py -e "$export_dir" -m usr.manifest -D libgcc_s_dir="$libgcc_s_dir"
fi

if [[ ${vars[create_disk]} == "true" ]]; then
	partition_offset=512
	bare="$SRC"/scripts/disk.bin
	raw_disk=disk
	qcow2_disk=disk
else
	partition_offset=$kernel_end
	bare=loader.img
	raw_disk=bare
	qcow2_disk=usr
fi

create_zfs_disk() {
	cp $bare $raw_disk.raw
	"$SRC"/scripts/imgedit.py setpartition "-f raw ${raw_disk}.raw" 2 $partition_offset $partition_size
	if [[ ${vars[use_openzfs]} == "true" ]]; then
		#We use raw disk on purpose so that zfs-image-on-host.sh can use loop device which is faster to copy files to
		qemu-img resize ${raw_disk}.raw ${image_size}b >/dev/null 2>&1
		"$SRC"/scripts/zfs-image-on-host.sh build ${raw_disk}.raw 1 osv zfs true
		qemu-img convert -f raw -O qcow2 $raw_disk.raw $qcow2_disk.img
	else
		qemu-img convert -f raw -O qcow2 $raw_disk.raw $qcow2_disk.img
		qemu-img resize $qcow2_disk.img ${image_size}b >/dev/null 2>&1
		"$SRC"/scripts/upload_manifest.py --arch=$arch -o $qcow2_disk.img -m usr.manifest -D libgcc_s_dir="$libgcc_s_dir"
	fi
	rm ${raw_disk}.raw
}

create_rofs_disk() {
	cp $bare $raw_disk.raw
	"$SRC"/scripts/imgedit.py setpartition "-f raw ${raw_disk}.raw" 2 $partition_offset $partition_size
	qemu-img resize ${raw_disk}.raw ${image_size}b >/dev/null 2>&1
	dd if=rofs.img of=${raw_disk}.raw obs=$partition_offset seek=1 >/dev/null 2>&1
	qemu-img convert -f raw -O qcow2 $raw_disk.raw $qcow2_disk.img
	qemu-img resize $qcow2_disk.img ${image_size}b >/dev/null 2>&1
}

create_zfs_filesystem() {
	local image_path=$1
	if [[ ${vars[use_openzfs]} == "true" ]]; then
		local partition=$3
		"$SRC"/scripts/zfs-image-on-host.sh build $image_path $partition osv zfs false
	else
		local device_path=$2
		local qemu_arch=$arch
		if [[ "$qemu_arch" == 'aarch64' ]]; then
			console=''
			zfs_builder_name='zfs_builder.img'
		else
			qemu_arch='x86_64'
			console='--console=serial'
			zfs_builder_name='zfs_builder-stripped.elf'
		fi
		"$SRC"/scripts/run.py -k --kernel-path $zfs_builder_name --arch=$qemu_arch --vnc none -m 512 -c1 -i ${image_path} --block-device-cache unsafe \
			-s -e "${console} --norandom --nomount --noinit --preload-zfs-library /tools/mkfs.so ${device_path}; /zfs.so set compression=off osv"
	fi
}

if [[ "$arch" == 'aarch64' ]]; then
	export STRIP=${CROSS_PREFIX:-aarch64-linux-gnu-}strip
fi

case $fs_type in
zfs)
	partition_size=$((fs_size - partition_offset))
	image_size=$fs_size
	cp "$SRC"/static/etc/fstab fstab
	create_zfs_disk ;;
rofs)
	rm -rf rofs.img
	cp "$SRC"/static/etc/fstab_rofs fstab
	if [[ ${vars[create_zfs_disk]} == "true" ]]; then
		echo "/dev/vblk1.1 /data      zfs       defaults 0 0" >> fstab
	fi
	"$SRC"/scripts/gen-rofs-img.py -o rofs.img -m usr.manifest -D libgcc_s_dir="$libgcc_s_dir"
	partition_size=`stat --printf %s rofs.img`
	image_size=$fs_size
	create_rofs_disk ;;
rofs_with_zfs)
	# Create disk with rofs image on it 1st partition
	rm -rf rofs.img
	cp "$SRC"/static/etc/fstab_rofs fstab
	echo "/dev/vblk0.2 /data      zfs       defaults 0 0" >> fstab
	"$SRC"/scripts/gen-rofs-img.py -o rofs.img -m usr.manifest -D libgcc_s_dir="$libgcc_s_dir"
	partition_size=`stat --printf %s rofs.img`
	image_size=$((fs_size+partition_size))
	create_rofs_disk
	# Resize the disk to fit ZFS on it after rofs
	qemu-img resize $qcow2_disk.img ${image_size}b >/dev/null 2>&1
	# Create filesystem on ZFS partition
	zfs_partition_offset=$((partition_offset + partition_size))
	zfs_partition_size=$((image_size-zfs_partition_offset))
	"$SRC"/scripts/imgedit.py setpartition "$qcow2_disk.img" 3 $zfs_partition_offset $zfs_partition_size
	create_zfs_filesystem $qcow2_disk.img "/dev/vblk0.2" 2;;
ramfs|virtiofs)
	# No need to create extra fs like above: ramfs is already created (as the
	# bootfs) and virtio-fs is specified with virtiofsd at run time
	image_size=$((partition_offset))
	cp $bare $raw_disk.raw
	"$SRC"/scripts/imgedit.py setpartition "-f raw ${raw_disk}.raw" 2 $partition_offset 0
	qemu-img resize ${raw_disk}.raw ${image_size}b >/dev/null 2>&1
	qemu-img convert -f raw -O qcow2 $raw_disk.raw $qcow2_disk.img ;;
esac
# Prepend the root fs type option to the command line (preserved by run.py)
cmdline=$(cat cmdline)
echo -n "--rootfs=${fs_type} ${cmdline}" > cmdline

if [[ -f "$OSV_BUILD_PATH/usr.img" ]]; then
	"$SRC"/scripts/imgedit.py setargs usr.img `cat cmdline`
fi

if [[ ${vars[create_zfs_disk]} == "true" ]]; then
	partition_offset=512
	partition_size=$((fs_size - partition_offset))
	image_size=$fs_size
	raw_disk=zfs_disk
	qcow2_disk=zfs_disk
	cp "$SRC"/scripts/disk.bin $raw_disk.raw
	"$SRC"/scripts/imgedit.py setpartition "-f raw ${raw_disk}.raw" 2 $partition_offset $partition_size
	qemu-img convert -f raw -O qcow2 $raw_disk.raw $qcow2_disk.img
	qemu-img resize $qcow2_disk.img ${image_size}b >/dev/null 2>&1
	create_zfs_filesystem $qcow2_disk.img "/dev/vblk0.1" 1
fi

# Support "build check"
for i
do
	case $i in
	check)	set -x
		cd "$SRC"
		exec ./scripts/test.py
	esac
done