ORNL AF cluster 800-node AMD Epyc 2-sockets 64-cores per node
CNL
gnu,cray,intel
mpt
nwp501
/lustre/storm/nwp501/proj-shared/e3sm
e3sm,nwp501
/lustre/storm/nwp501/proj-shared/e3sm/e3sm_scratch/
/lustre/storm/nwp501/proj-shared/e3sm/inputdata
/lustre/storm/nwp501/proj-shared/e3sm/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/lustre/storm/nwp501/proj-shared/e3sm/baselines/$COMPILER
/lustre/storm/nwp501/proj-shared/e3sm/tools/cprnc
8
e3sm_developer
4
miller_slurm
e3sm
128
128
FALSE
srun
-n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit
-c $SHELL{echo 128/ {{ tasks_per_node }} |bc}
$SHELL{if [ 128 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;}
-m plane={{ tasks_per_node }}
/opt/cray/pe/modules/default/init/perl.pm
/opt/cray/pe/modules/default/init/python.py
/opt/cray/pe/modules/default/init/sh
/opt/cray/pe/modules/default/init/csh
/opt/cray/pe/modules/default/bin/modulecmd perl
/opt/cray/pe/modules/default/bin/modulecmd python
module
module
craype
cray-mpich
cray-parallel-netcdf
cray-hdf5-parallel
cray-hdf5
cray-netcdf
cray-netcdf-hdf5parallel
craype/2.7.15
PrgEnv-cray
PrgEnv-gnu
PrgEnv-gnu/8.3.3
gcc/12.1.0
PrgEnv-gnu
gcc/9.3.0
PrgEnv-cray/8.3.3
darshan
cray-mpich/8.1.16
cray-hdf5-parallel/1.12.1.3
cray-netcdf-hdf5parallel/4.8.1.3
cray-parallel-netcdf/1.12.2.3
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
1000
/usr/lib/perl5/5.26.2
/opt/cray/pe/netcdf-hdf5parallel/4.8.1.3/gnu/9.1/
/opt/cray/pe/netcdf-hdf5parallel/4.8.1.3/gnu/9.1/
$SHELL{dirname $(dirname $(which pnetcdf_version))}
128M
cores
Perlmutter CPU-only nodes at NERSC. Phase2 only: Each node has 2 AMD EPYC 7713 64-Core (Milan) 512GB
$ENV{NERSC_HOST}:perlmutter
Linux
intel,gnu,nvidia,amdclang
mpich
e3sm
/global/cfs/cdirs/e3sm
e3sm,m3411,m3412
$ENV{PSCRATCH}/e3sm_scratch/pm-cpu
/global/cfs/cdirs/e3sm/www/$ENV{USER}
http://portal.nersc.gov/project/e3sm/$ENV{USER}
/global/cfs/cdirs/e3sm/inputdata
/global/cfs/cdirs/e3sm/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/global/cfs/cdirs/e3sm/baselines/$COMPILER
/global/cfs/cdirs/e3sm/tools/cprnc/cprnc
10
e3sm_developer
4
nersc_slurm
e3sm
256
128
TRUE
srun
--label
-n {{ total_tasks }} -N {{ num_nodes }}
-c $SHELL{echo 256/`./xmlquery --value MAX_MPITASKS_PER_NODE`|bc}
$SHELL{if [ 128 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu-bind=cores"; else echo "--cpu-bind=threads";fi;}
-m plane=$SHELL{echo `./xmlquery --value MAX_MPITASKS_PER_NODE`}
/opt/cray/pe/lmod/8.7.60/init/perl
/opt/cray/pe/lmod/8.7.60/init/python
/opt/cray/pe/lmod/8.7.60/init/sh
/opt/cray/pe/lmod/8.7.60/init/csh
/opt/cray/pe/lmod/lmod/libexec/lmod perl
/opt/cray/pe/lmod/lmod/libexec/lmod python
module
module
cpe
cray-hdf5-parallel
cray-netcdf-hdf5parallel
cray-parallel-netcdf
cray-netcdf
cray-hdf5
PrgEnv-gnu
PrgEnv-intel
PrgEnv-nvidia
PrgEnv-cray
PrgEnv-aocc
gcc-native
intel
intel-oneapi
nvidia
aocc
cudatoolkit
climate-utils
cray-libsci
matlab
craype-accel-nvidia80
craype-accel-host
perftools-base
perftools
darshan
PrgEnv-gnu/8.6.0
gcc-native/14
cray-libsci/25.09.0
PrgEnv-intel/8.6.0
intel/2025.3
PrgEnv-nvidia
nvidia/25.9
cray-libsci/25.09.0
PrgEnv-aocc
aocc/4.1.0
cray-libsci/25.09.0
craype-accel-host
craype/2.7.35
cray-mpich/9.0.1
cmake/3.30.2
cray-hdf5-parallel/1.12.2.9
cray-netcdf-hdf5parallel/4.9.0.9
cray-parallel-netcdf/1.12.3.13
cray-hdf5-parallel/1.14.3.7
cray-netcdf-hdf5parallel/4.9.2.1
cray-parallel-netcdf/1.12.3.19
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
0.20
1
1
1
FALSE
/global/cfs/cdirs/e3sm/perl/lib/perl5-only-switch
kdreg2
MPI_Bcast
$ENV{CRAY_NETCDF_HDF5PARALLEL_PREFIX}
$ENV{CRAY_PARALLEL_NETCDF_PREFIX}
4000MB
CMA
Prepost
$ENV{CRAY_LD_LIBRARY_PATH}:$ENV{LD_LIBRARY_PATH}
128M
spread
threads
/global/cfs/cdirs/e3sm/3rdparty/protobuf/21.6/intel-2023.2.0/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.10.2/cray-mpich-8.1.28/intel-2023.2.0; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/c-blosc2/2.15.2/intel-2023.2.0; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$MGARD_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/mgard/1.5.2/intel-2023.2.0; else echo "$MGARD_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/sz/2.1.12.5/intel-2023.2.0; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/zfp/1.0.1/intel-2023.2.0; else echo "$ZFP_ROOT"; fi}
/global/cfs/cdirs/e3sm/3rdparty/protobuf/21.6/gcc-native-12.3/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.10.2/cray-mpich-8.1.28/gcc-native-12.3; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/c-blosc2/2.15.2/gcc-native-12.3; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$MGARD_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/mgard/1.5.2/gcc-native-12.3; else echo "$MGARD_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/sz/2.1.12.5/gcc-native-12.3; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/zfp/1.0.1/gcc-native-12.3; else echo "$ZFP_ROOT"; fi}
Generic
$SHELL{if [ -z "$Albany_ROOT" ]; then echo /global/common/software/e3sm/albany/2025.11.14/gcc/12.3; else echo "$Albany_ROOT"; fi}
$SHELL{if [ -z "$Trilinos_ROOT" ]; then echo /global/common/software/e3sm/trilinos/2025.10.01/gcc/12.3; else echo "$Trilinos_ROOT"; fi}
/global/cfs/cdirs/e3sm/3rdparty/protobuf/21.6/nvidia-24.5/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.10.2/cray-mpich-8.1.28/nvidia-24.5; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/c-blosc2/2.15.2/nvidia-24.5; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$MGARD_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/mgard/1.5.2/nvidia-24.5; else echo "$MGARD_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/sz/2.1.12.5/nvidia-24.5; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/zfp/1.0.1/nvidia-24.5; else echo "$ZFP_ROOT"; fi}
$SHELL{if [ -z "$BLAS_ROOT" ]; then echo $NVIDIA_PATH/compilers; else echo "$BLAS_ROOT"; fi}
$SHELL{if [ -z "$LAPACK_ROOT" ]; then echo $NVIDIA_PATH/compilers; else echo "$LAPACK_ROOT"; fi}
NVHPC
trace
Intel10_64_dyn
/global/cfs/cdirs/e3sm/3rdparty/protobuf/21.6/aocc-4.1.0/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.10.2/cray-mpich-8.1.28/aocc-4.1.0; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/c-blosc2/2.15.2/aocc-4.1.0; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$MGARD_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/mgard/1.5.2/aocc-4.1.0; else echo "$MGARD_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/sz/2.1.12.5/aocc-4.1.0; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/zfp/1.0.1/aocc-4.1.0; else echo "$ZFP_ROOT"; fi}
$SHELL{if [ -z "$MOAB_ROOT" ]; then echo /global/cfs/cdirs/e3sm/software/moab/intel; else echo "$MOAB_ROOT"; fi}
$SHELL{if [ -z "$MOAB_ROOT" ]; then echo /global/cfs/cdirs/e3sm/software/moab/gnu; else echo "$MOAB_ROOT"; fi}
-1
Perlmutter GPU nodes at NERSC. Phase1 only: Each GPU node has single AMD EPYC 7713 64-Core (Milan) (256GB) and 4 nvidia A100's.
$ENV{NERSC_HOST}:perlmutter
Linux
gnugpu,gnu,nvidiagpu,nvidia
mpich
e3sm_g
/global/cfs/cdirs/e3sm
e3sm,m3411,m3412
$ENV{PSCRATCH}/e3sm_scratch/pm-gpu
/global/cfs/cdirs/e3sm/www/$ENV{USER}
http://portal.nersc.gov/project/e3sm/$ENV{USER}
/global/cfs/cdirs/e3sm/inputdata
/global/cfs/cdirs/e3sm/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/global/cfs/cdirs/e3sm/baselines/$COMPILER
/global/cfs/cdirs/e3sm/tools/cprnc/cprnc
10
e3sm_developer
4
nersc_slurm
e3sm
128
128
128
4
64
64
TRUE
srun
--label
-n {{ total_tasks }} -N {{ num_nodes }}
-c $SHELL{echo 128/`./xmlquery --value MAX_MPITASKS_PER_NODE`|bc}
$SHELL{if [ 64 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu-bind=cores"; else echo "--cpu-bind=threads";fi;}
-m plane=$SHELL{echo `./xmlquery --value MAX_MPITASKS_PER_NODE`}
$SHELL{mpn=`./xmlquery --value MAX_MPITASKS_PER_NODE`; if [ 64 -le $mpn ]; then echo $CIMEROOT/../cime_config/machines/scripts/pm-gpu_set_affinity_npergpu.sh $mpn; fi;}
/opt/cray/pe/lmod/8.7.60/init/perl
/opt/cray/pe/lmod/8.7.60/init/python
/opt/cray/pe/lmod/8.7.60/init/sh
/opt/cray/pe/lmod/8.7.60/init/csh
/opt/cray/pe/lmod/lmod/libexec/lmod perl
/opt/cray/pe/lmod/lmod/libexec/lmod python
module
module
cpe
cray-hdf5-parallel
cray-netcdf-hdf5parallel
cray-parallel-netcdf
cray-netcdf
cray-hdf5
PrgEnv-gnu
PrgEnv-intel
PrgEnv-nvidia
PrgEnv-cray
PrgEnv-aocc
gcc-native
intel
intel-oneapi
nvidia
aocc
cudatoolkit
climate-utils
cray-libsci
matlab
craype-accel-nvidia80
craype-accel-host
perftools-base
perftools
darshan
PrgEnv-gnu/8.6.0
gcc-native/14
PrgEnv-nvidia
nvidia/25.9
cudatoolkit/12.9
craype-accel-nvidia80
cudatoolkit/12.9
craype-accel-nvidia80
gcc-native-mixed/14
craype-accel-host
craype-accel-host
cray-libsci/25.09.0
craype/2.7.35
cray-mpich/9.0.1
cmake/3.30.2
cray-hdf5-parallel/1.12.2.9
cray-netcdf-hdf5parallel/4.9.0.9
cray-parallel-netcdf/1.12.3.13
cray-hdf5-parallel/1.14.3.7
cray-netcdf-hdf5parallel/4.9.2.1
cray-parallel-netcdf/1.12.3.19
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
0.20
1
1
1
FALSE
/global/cfs/cdirs/e3sm/perl/lib/perl5-only-switch
kdreg2
MPI_Bcast
$ENV{CRAY_NETCDF_HDF5PARALLEL_PREFIX}
$ENV{CRAY_PARALLEL_NETCDF_PREFIX}
$ENV{CRAY_LD_LIBRARY_PATH}:$ENV{LD_LIBRARY_PATH}
128M
spread
threads
1
1
$SHELL{if [ -z "$MOAB_ROOT" ]; then echo /global/cfs/cdirs/e3sm/software/moab/gnugpu ; else echo "$MOAB_ROOT"; fi}
/global/cfs/cdirs/e3sm/3rdparty/protobuf/21.6/gcc-native-12.3/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.10.2/cray-mpich-8.1.28/gcc-native-12.3; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/c-blosc2/2.15.2/gcc-native-12.3; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$MGARD_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/mgard/1.5.2/gcc-native-12.3; else echo "$MGARD_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/sz/2.1.12.5/gcc-native-12.3; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/zfp/1.0.1/gcc-native-12.3; else echo "$ZFP_ROOT"; fi}
/global/cfs/cdirs/e3sm/3rdparty/protobuf/21.6/nvidia-24.5/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.10.2/cray-mpich-8.1.28/nvidia-24.5; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/c-blosc2/2.15.2/nvidia-24.5; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$MGARD_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/mgard/1.5.2/nvidia-24.5; else echo "$MGARD_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/sz/2.1.12.5/nvidia-24.5; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/zfp/1.0.1/nvidia-24.5; else echo "$ZFP_ROOT"; fi}
-1
Muller CPU-only nodes on internal NERSC machine, similar to pm-cpu (very small)
$ENV{NERSC_HOST}:muller
Linux
intel,gnu,nvidia,amdclang
mpich
e3sm
/global/cfs/cdirs/e3sm
e3sm,m3411,m3412
$ENV{SCRATCH}/e3sm_scratch/muller-cpu
/global/cfs/cdirs/e3sm/www/$ENV{USER}
http://portal.nersc.gov/project/e3sm/$ENV{USER}
/global/cfs/cdirs/e3sm/inputdata
/global/cfs/cdirs/e3sm/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/global/cfs/cdirs/e3sm/baselines/$COMPILER
/global/cfs/cdirs/e3sm/tools/cprnc/cprnc
10
e3sm_developer
4
nersc_slurm
e3sm
256
128
TRUE
srun
--label
-n {{ total_tasks }} -N {{ num_nodes }}
-c $SHELL{echo 256/`./xmlquery --value MAX_MPITASKS_PER_NODE`|bc}
$SHELL{if [ 128 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu-bind=cores"; else echo "--cpu-bind=threads";fi;}
-m plane=$SHELL{echo `./xmlquery --value MAX_MPITASKS_PER_NODE`}
/opt/cray/pe/lmod/8.7.60/init/perl
/opt/cray/pe/lmod/8.7.60/init/python
/opt/cray/pe/lmod/8.7.60/init/sh
/opt/cray/pe/lmod/8.7.60/init/csh
/opt/cray/pe/lmod/lmod/libexec/lmod perl
/opt/cray/pe/lmod/lmod/libexec/lmod python
module
module
cpe
cray-hdf5-parallel
cray-netcdf-hdf5parallel
cray-parallel-netcdf
cray-netcdf
cray-hdf5
PrgEnv-gnu
PrgEnv-intel
PrgEnv-nvidia
PrgEnv-cray
PrgEnv-aocc
gcc-native
intel
intel-oneapi
nvidia
aocc
cudatoolkit
climate-utils
cray-libsci
matlab
craype-accel-nvidia80
craype-accel-host
perftools-base
perftools
darshan
PrgEnv-gnu/8.6.0
gcc-native/14
cray-libsci/25.09.0
PrgEnv-intel/8.6.0
intel/2025.3
PrgEnv-nvidia
nvidia/25.9
cray-libsci/25.09.0
PrgEnv-aocc
aocc/4.1.0
cray-libsci/25.09.0
craype-accel-host
craype/2.7.35
cray-mpich/9.0.1
cmake/3.30.2
cray-hdf5-parallel/1.12.2.9
cray-netcdf-hdf5parallel/4.9.0.9
cray-parallel-netcdf/1.12.3.13
cray-hdf5-parallel/1.14.3.7
cray-netcdf-hdf5parallel/4.9.2.1
cray-parallel-netcdf/1.12.3.19
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
0.20
1
1
1
FALSE
/global/cfs/cdirs/e3sm/perl/lib/perl5-only-switch
kdreg2
MPI_Bcast
$ENV{CRAY_NETCDF_HDF5PARALLEL_PREFIX}
$ENV{CRAY_PARALLEL_NETCDF_PREFIX}
4000MB
CMA
$ENV{CRAY_LD_LIBRARY_PATH}:$ENV{LD_LIBRARY_PATH}
128M
spread
threads
/global/cfs/cdirs/e3sm/3rdparty/protobuf/21.6/intel-2023.2.0/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.10.2/cray-mpich-8.1.28/intel-2023.2.0; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/c-blosc2/2.15.2/intel-2023.2.0; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$MGARD_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/mgard/1.5.2/intel-2023.2.0; else echo "$MGARD_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/sz/2.1.12.5/intel-2023.2.0; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/zfp/1.0.1/intel-2023.2.0; else echo "$ZFP_ROOT"; fi}
/global/cfs/cdirs/e3sm/3rdparty/protobuf/21.6/gcc-native-12.3/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.10.2/cray-mpich-8.1.28/gcc-native-12.3; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/c-blosc2/2.15.2/gcc-native-12.3; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$MGARD_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/mgard/1.5.2/gcc-native-12.3; else echo "$MGARD_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/sz/2.1.12.5/gcc-native-12.3; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/zfp/1.0.1/gcc-native-12.3; else echo "$ZFP_ROOT"; fi}
Generic
$SHELL{if [ -z "$Albany_ROOT" ]; then echo /global/common/software/e3sm/albany/2024.03.26/gcc/11.2.0; else echo "$Albany_ROOT"; fi}
$SHELL{if [ -z "$Trilinos_ROOT" ]; then echo /global/common/software/e3sm/trilinos/15.1.1/gcc/11.2.0; else echo "$Trilinos_ROOT"; fi}
/global/cfs/cdirs/e3sm/3rdparty/protobuf/21.6/nvidia-24.5/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.10.2/cray-mpich-8.1.28/nvidia-24.5; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/c-blosc2/2.15.2/nvidia-24.5; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$MGARD_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/mgard/1.5.2/nvidia-24.5; else echo "$MGARD_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/sz/2.1.12.5/nvidia-24.5; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/zfp/1.0.1/nvidia-24.5; else echo "$ZFP_ROOT"; fi}
$SHELL{if [ -z "$BLAS_ROOT" ]; then echo $NVIDIA_PATH/compilers; else echo "$BLAS_ROOT"; fi}
$SHELL{if [ -z "$LAPACK_ROOT" ]; then echo $NVIDIA_PATH/compilers; else echo "$LAPACK_ROOT"; fi}
NVHPC
trace
Intel10_64_dyn
/global/cfs/cdirs/e3sm/3rdparty/protobuf/21.6/aocc-4.1.0/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.10.2/cray-mpich-8.1.28/aocc-4.1.0; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/c-blosc2/2.15.2/aocc-4.1.0; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$MGARD_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/mgard/1.5.2/aocc-4.1.0; else echo "$MGARD_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/sz/2.1.12.5/aocc-4.1.0; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/zfp/1.0.1/aocc-4.1.0; else echo "$ZFP_ROOT"; fi}
$SHELL{if [ -z "$MOAB_ROOT" ]; then echo /global/cfs/cdirs/e3sm/software/moab/intel; else echo "$MOAB_ROOT"; fi}
$SHELL{if [ -z "$MOAB_ROOT" ]; then echo /global/cfs/cdirs/e3sm/software/moab/gnu; else echo "$MOAB_ROOT"; fi}
-1
Muller GPU nodes on internal machine at NERSC. similar to pm-gpu
$ENV{NERSC_HOST}:muller
Linux
gnugpu,gnu,nvidiagpu,nvidia
mpich
e3sm_g
/global/cfs/cdirs/e3sm
e3sm,m3411,m3412
$ENV{SCRATCH}/e3sm_scratch/muller-gpu
/global/cfs/cdirs/e3sm/www/$ENV{USER}
http://portal.nersc.gov/project/e3sm/$ENV{USER}
/global/cfs/cdirs/e3sm/inputdata
/global/cfs/cdirs/e3sm/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/global/cfs/cdirs/e3sm/baselines/$COMPILER
/global/cfs/cdirs/e3sm/tools/cprnc/cprnc
10
e3sm_developer
4
nersc_slurm
e3sm
128
128
128
4
64
64
TRUE
srun
--label
-n {{ total_tasks }} -N {{ num_nodes }}
-c $SHELL{echo 128/`./xmlquery --value MAX_MPITASKS_PER_NODE`|bc}
$SHELL{if [ 64 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu-bind=cores"; else echo "--cpu-bind=threads";fi;}
-m plane=$SHELL{echo `./xmlquery --value MAX_MPITASKS_PER_NODE`}
$SHELL{mpn=`./xmlquery --value MAX_MPITASKS_PER_NODE`; if [ 64 -le $mpn ]; then echo $CIMEROOT/../cime_config/machines/scripts/pm-gpu_set_affinity_npergpu.sh $mpn; fi;}
/opt/cray/pe/lmod/8.7.60/init/perl
/opt/cray/pe/lmod/8.7.60/init/python
/opt/cray/pe/lmod/8.7.60/init/sh
/opt/cray/pe/lmod/8.7.60/init/csh
/opt/cray/pe/lmod/lmod/libexec/lmod perl
/opt/cray/pe/lmod/lmod/libexec/lmod python
module
module
cpe
cray-hdf5-parallel
cray-netcdf-hdf5parallel
cray-parallel-netcdf
cray-netcdf
cray-hdf5
PrgEnv-gnu
PrgEnv-intel
PrgEnv-nvidia
PrgEnv-cray
PrgEnv-aocc
gcc-native
intel
intel-oneapi
nvidia
aocc
cudatoolkit
climate-utils
cray-libsci
matlab
craype-accel-nvidia80
craype-accel-host
perftools-base
perftools
darshan
PrgEnv-gnu/8.6.0
gcc-native/14
PrgEnv-nvidia
nvidia/25.9
cudatoolkit/12.9
craype-accel-nvidia80
cudatoolkit/12.9
craype-accel-nvidia80
gcc-native-mixed/14
craype-accel-host
craype-accel-host
cray-libsci/25.09.0
craype/2.7.35
cray-mpich/9.0.1
cmake/3.30.2
cray-hdf5-parallel/1.12.2.9
cray-netcdf-hdf5parallel/4.9.0.9
cray-parallel-netcdf/1.12.3.13
cray-hdf5-parallel/1.14.3.7
cray-netcdf-hdf5parallel/4.9.2.1
cray-parallel-netcdf/1.12.3.19
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
0.20
1
1
1
FALSE
/global/cfs/cdirs/e3sm/perl/lib/perl5-only-switch
kdreg2
MPI_Bcast
$ENV{CRAY_NETCDF_HDF5PARALLEL_PREFIX}
$ENV{CRAY_PARALLEL_NETCDF_PREFIX}
$ENV{CRAY_LD_LIBRARY_PATH}:$ENV{LD_LIBRARY_PATH}
128M
spread
threads
1
1
$SHELL{if [ -z "$MOAB_ROOT" ]; then echo /global/cfs/cdirs/e3sm/software/moab/gnugpu ; else echo "$MOAB_ROOT"; fi}
/global/cfs/cdirs/e3sm/3rdparty/protobuf/21.6/gcc-native-12.3/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.10.2/cray-mpich-8.1.28/gcc-native-12.3; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/c-blosc2/2.15.2/gcc-native-12.3; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$MGARD_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/mgard/1.5.2/gcc-native-12.3; else echo "$MGARD_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/sz/2.1.12.5/gcc-native-12.3; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/zfp/1.0.1/gcc-native-12.3; else echo "$ZFP_ROOT"; fi}
/global/cfs/cdirs/e3sm/3rdparty/protobuf/21.6/nvidia-24.5/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.10.2/cray-mpich-8.1.28/nvidia-24.5; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/c-blosc2/2.15.2/nvidia-24.5; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$MGARD_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/mgard/1.5.2/nvidia-24.5; else echo "$MGARD_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/sz/2.1.12.5/nvidia-24.5; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/zfp/1.0.1/nvidia-24.5; else echo "$ZFP_ROOT"; fi}
-1
Alvarez CPU nodes -- internal test machine at NERSC, very similar to pm-cpu. each node has 2 AMD EPYC 7713 64-Core (Milan) 512GB
$ENV{NERSC_HOST}:alvarez
Linux
intel,gnu,nvidia,amdclang
mpich
e3sm
/global/cfs/cdirs/e3sm
e3sm,m3411,m3412
$ENV{SCRATCH}/e3sm_scratch/alvarez-cpu
/global/cfs/cdirs/e3sm/www/$ENV{USER}
http://portal.nersc.gov/project/e3sm/$ENV{USER}
/global/cfs/cdirs/e3sm/inputdata
/global/cfs/cdirs/e3sm/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/global/cfs/cdirs/e3sm/baselines/$COMPILER
/global/cfs/cdirs/e3sm/tools/cprnc/cprnc
10
e3sm_developer
4
nersc_slurm
e3sm
256
128
TRUE
srun
--label
-n {{ total_tasks }} -N {{ num_nodes }}
-c $SHELL{echo 256/`./xmlquery --value MAX_MPITASKS_PER_NODE`|bc}
$SHELL{if [ 128 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu-bind=cores"; else echo "--cpu-bind=threads";fi;}
-m plane=$SHELL{echo `./xmlquery --value MAX_MPITASKS_PER_NODE`}
/opt/cray/pe/lmod/8.7.60/init/perl
/opt/cray/pe/lmod/8.7.60/init/python
/opt/cray/pe/lmod/8.7.60/init/sh
/opt/cray/pe/lmod/8.7.60/init/csh
/opt/cray/pe/lmod/lmod/libexec/lmod perl
/opt/cray/pe/lmod/lmod/libexec/lmod python
module
module
cpe
cray-hdf5-parallel
cray-netcdf-hdf5parallel
cray-parallel-netcdf
cray-netcdf
cray-hdf5
PrgEnv-gnu
PrgEnv-intel
PrgEnv-nvidia
PrgEnv-cray
PrgEnv-aocc
gcc-native
intel
intel-oneapi
nvidia
aocc
cudatoolkit
climate-utils
cray-libsci
matlab
craype-accel-nvidia80
craype-accel-host
perftools-base
perftools
darshan
PrgEnv-gnu/8.6.0
gcc-native/14
cray-libsci/25.09.0
PrgEnv-intel/8.6.0
intel/2025.3
PrgEnv-nvidia
nvidia/25.9
cray-libsci/25.09.0
PrgEnv-aocc
aocc/4.1.0
cray-libsci/25.09.0
craype-accel-host
craype/2.7.35
cray-mpich/9.0.1
cmake/3.30.2
cray-hdf5-parallel/1.12.2.9
cray-netcdf-hdf5parallel/4.9.0.9
cray-parallel-netcdf/1.12.3.13
cray-hdf5-parallel/1.14.3.7
cray-netcdf-hdf5parallel/4.9.2.1
cray-parallel-netcdf/1.12.3.19
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
0.20
1
1
1
FALSE
/global/cfs/cdirs/e3sm/perl/lib/perl5-only-switch
kdreg2
MPI_Bcast
$ENV{CRAY_NETCDF_HDF5PARALLEL_PREFIX}
$ENV{CRAY_PARALLEL_NETCDF_PREFIX}
4000MB
CMA
$ENV{CRAY_LD_LIBRARY_PATH}:$ENV{LD_LIBRARY_PATH}
128M
spread
threads
/global/cfs/cdirs/e3sm/3rdparty/protobuf/21.6/intel-2023.2.0/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.10.2/cray-mpich-8.1.28/intel-2023.2.0; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/c-blosc2/2.15.2/intel-2023.2.0; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$MGARD_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/mgard/1.5.2/intel-2023.2.0; else echo "$MGARD_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/sz/2.1.12.5/intel-2023.2.0; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/zfp/1.0.1/intel-2023.2.0; else echo "$ZFP_ROOT"; fi}
/global/cfs/cdirs/e3sm/3rdparty/protobuf/21.6/gcc-native-12.3/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.10.2/cray-mpich-8.1.28/gcc-native-12.3; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/c-blosc2/2.15.2/gcc-native-12.3; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$MGARD_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/mgard/1.5.2/gcc-native-12.3; else echo "$MGARD_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/sz/2.1.12.5/gcc-native-12.3; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/zfp/1.0.1/gcc-native-12.3; else echo "$ZFP_ROOT"; fi}
Generic
$SHELL{if [ -z "$Albany_ROOT" ]; then echo /global/common/software/e3sm/albany/2024.03.26/gcc/11.2.0; else echo "$Albany_ROOT"; fi}
$SHELL{if [ -z "$Trilinos_ROOT" ]; then echo /global/common/software/e3sm/trilinos/15.1.1/gcc/11.2.0; else echo "$Trilinos_ROOT"; fi}
/global/cfs/cdirs/e3sm/3rdparty/protobuf/21.6/nvidia-24.5/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.10.2/cray-mpich-8.1.28/nvidia-24.5; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/c-blosc2/2.15.2/nvidia-24.5; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$MGARD_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/mgard/1.5.2/nvidia-24.5; else echo "$MGARD_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/sz/2.1.12.5/nvidia-24.5; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/zfp/1.0.1/nvidia-24.5; else echo "$ZFP_ROOT"; fi}
$SHELL{if [ -z "$BLAS_ROOT" ]; then echo $NVIDIA_PATH/compilers; else echo "$BLAS_ROOT"; fi}
$SHELL{if [ -z "$LAPACK_ROOT" ]; then echo $NVIDIA_PATH/compilers; else echo "$LAPACK_ROOT"; fi}
NVHPC
trace
Intel10_64_dyn
/global/cfs/cdirs/e3sm/3rdparty/protobuf/21.6/aocc-4.1.0/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/adios2/2.10.2/cray-mpich-8.1.28/aocc-4.1.0; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/c-blosc2/2.15.2/aocc-4.1.0; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$MGARD_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/mgard/1.5.2/aocc-4.1.0; else echo "$MGARD_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/sz/2.1.12.5/aocc-4.1.0; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /global/cfs/cdirs/e3sm/3rdparty/zfp/1.0.1/aocc-4.1.0; else echo "$ZFP_ROOT"; fi}
$SHELL{if [ -z "$MOAB_ROOT" ]; then echo /global/cfs/cdirs/e3sm/software/moab/intel; else echo "$MOAB_ROOT"; fi}
$SHELL{if [ -z "$MOAB_ROOT" ]; then echo /global/cfs/cdirs/e3sm/software/moab/gnu; else echo "$MOAB_ROOT"; fi}
-1
Alvarez GPU nodes -- internal test machine at NERSC. similar to pm-gpu
$ENV{NERSC_HOST}:alvarez
Linux
gnugpu,gnu,nvidiagpu,nvidia
mpich
e3sm_g
/global/cfs/cdirs/e3sm
e3sm,m3411,m3412
$ENV{SCRATCH}/e3sm_scratch/alvarez-gpu
/global/cfs/cdirs/e3sm/www/$ENV{USER}
http://portal.nersc.gov/project/e3sm/$ENV{USER}
/global/cfs/cdirs/e3sm/inputdata
/global/cfs/cdirs/e3sm/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/global/cfs/cdirs/e3sm/baselines/$COMPILER
/global/cfs/cdirs/e3sm/tools/cprnc/cprnc
10
e3sm_developer
4
nersc_slurm
e3sm
128
128
128
4
64
64
TRUE
srun
--label
-n {{ total_tasks }} -N {{ num_nodes }}
-c $SHELL{echo 128/`./xmlquery --value MAX_MPITASKS_PER_NODE`|bc}
$SHELL{if [ 64 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu-bind=cores"; else echo "--cpu-bind=threads";fi;}
-m plane=$SHELL{echo `./xmlquery --value MAX_MPITASKS_PER_NODE`}
$SHELL{mpn=`./xmlquery --value MAX_MPITASKS_PER_NODE`; if [ 64 -le $mpn ]; then echo $CIMEROOT/../cime_config/machines/scripts/pm-gpu_set_affinity_npergpu.sh $mpn; fi;}
/opt/cray/pe/lmod/8.7.60/init/perl
/opt/cray/pe/lmod/8.7.60/init/python
/opt/cray/pe/lmod/8.7.60/init/sh
/opt/cray/pe/lmod/8.7.60/init/csh
/opt/cray/pe/lmod/lmod/libexec/lmod perl
/opt/cray/pe/lmod/lmod/libexec/lmod python
module
module
cpe
cray-hdf5-parallel
cray-netcdf-hdf5parallel
cray-parallel-netcdf
cray-netcdf
cray-hdf5
PrgEnv-gnu
PrgEnv-intel
PrgEnv-nvidia
PrgEnv-cray
PrgEnv-aocc
gcc-native
intel
intel-oneapi
nvidia
aocc
cudatoolkit
climate-utils
cray-libsci
matlab
craype-accel-nvidia80
craype-accel-host
perftools-base
perftools
darshan
PrgEnv-gnu/8.6.0
gcc-native/14
PrgEnv-nvidia
nvidia/25.9
cudatoolkit/12.9
craype-accel-nvidia80
cudatoolkit/12.9
craype-accel-nvidia80
gcc-native-mixed/14
craype-accel-host
craype-accel-host
cray-libsci/25.09.0
craype/2.7.35
cray-mpich/9.0.1
cmake/3.30.2
cray-hdf5-parallel/1.12.2.9
cray-netcdf-hdf5parallel/4.9.0.9
cray-parallel-netcdf/1.12.3.13
cray-hdf5-parallel/1.14.3.7
cray-netcdf-hdf5parallel/4.9.2.1
cray-parallel-netcdf/1.12.3.19
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
0.20
1
1
1
FALSE
/global/cfs/cdirs/e3sm/perl/lib/perl5-only-switch
kdreg2
MPI_Bcast
$ENV{CRAY_NETCDF_HDF5PARALLEL_PREFIX}
$ENV{CRAY_PARALLEL_NETCDF_PREFIX}
$ENV{CRAY_LD_LIBRARY_PATH}:$ENV{LD_LIBRARY_PATH}
128M
spread
threads
1
1
$SHELL{if [ -z "$MOAB_ROOT" ]; then echo /global/cfs/cdirs/e3sm/software/moab/gnugpu ; else echo "$MOAB_ROOT"; fi}
-1
Spock. NCCS moderate-security system that contains similar hardware and software as the upcoming Frontier system at ORNL.
.*spock.*
Linux
gnu,cray
mpich
cli133
/gpfs/alpine/$PROJECT/proj-shared/$ENV{USER}/e3sm_scratch/spock
/gpfs/alpine/cli115/world-shared/e3sm/inputdata
/gpfs/alpine/cli115/world-shared/e3sm/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/gpfs/alpine/cli133/world-shared/grnydawn/e3sm/tools/cprnc_spock/cprnc
8
1
slurm
e3sm
64
64
TRUE
srun
--label
-l -K -n {{ total_tasks }} -N {{ num_nodes }}
--cpu_bind=cores
-c $ENV{OMP_NUM_THREADS}
-m plane={{ tasks_per_node }}
/usr/share/lmod/lmod/init/sh
/usr/share/lmod/lmod/init/csh
/usr/share/lmod/lmod/init/perl
/usr/share/lmod/lmod/init/env_modules_python.py
/usr/share/lmod/lmod/libexec/lmod perl
module
module
/usr/share/lmod/lmod/libexec/lmod python
DefApps
cray-python/3.8.5.1
subversion/1.14.0
git/2.31.1
cmake/3.20.2
zlib/1.2.11
cray-libsci/21.06.1.1
PrgEnv-gnu/8.0.0
cray-mpich/8.1.7
cray-hdf5-parallel/1.12.0.6
cray-netcdf-hdf5parallel/4.7.4.6
cray-parallel-netcdf/1.12.1.5
PrgEnv-cray/8.0.0
cray-mpich/8.1.7
cray-hdf5-parallel/1.12.0.6
cray-netcdf-hdf5parallel/4.7.4.6
cray-parallel-netcdf/1.12.1.5
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
$SHELL{dirname $(dirname $(which nc-config))}
$SHELL{dirname $(dirname $(which pnetcdf_version))}
128M
spread
threads
Frontier. AMD EPYC 7A53 64C nodes, 128 hwthreads, 512GB DDR4, 4 MI250X GPUs.
.*frontier.*
Linux
craygnu-mphipcc,craycray-mphipcc,crayamd-mphipcc,craygnu,craycray,crayamd
mpich
cli115
/lustre/orion/proj-shared/cli115
.*
/lustre/orion/cli115/proj-shared/$ENV{USER}/e3sm_scratch
/lustre/orion/cli115/world-shared/e3sm/inputdata
/lustre/orion/cli115/world-shared/e3sm/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/lustre/orion/cli115/world-shared/e3sm/baselines/frontier/$COMPILER
/lustre/orion/cli115/world-shared/e3sm/tools/cprnc/cprnc
8
1
frontier_slurm
e3sm
56
8
TRUE
srun
-l -K -n {{ total_tasks }} -N {{ num_nodes }}
-c $ENV{OMP_NUM_THREADS}
$ENV{NTASKS_PER_GPU}
$ENV{GPU_BIND_ARGS}
/opt/cray/pe/lmod/lmod/init/sh
/opt/cray/pe/lmod/lmod/init/csh
/opt/cray/pe/lmod/lmod/init/perl
/opt/cray/pe/lmod/lmod/init/env_modules_python.py
/opt/cray/pe/lmod/lmod/libexec/lmod perl
module
module
/opt/cray/pe/lmod/lmod/libexec/lmod python
Core/25.03
PrgEnv-gnu
cpe/25.09
gcc-native/14.2
libunwind
cray-python/3.11.7
subversion
git
cmake
cray-hdf5-parallel/1.14.3.7
cray-netcdf-hdf5parallel/4.9.2.1
cray-parallel-netcdf/1.12.3.19
darshan-runtime
craype-accel-amd-gfx90a
rocm/6.2.4
Core/24.00
cpe/22.12
cce/15.0.1
craype/2.7.20
craype-accel-amd-gfx90a
rocm/5.4.0
Core Core/24.00
PrgEnv-cray PrgEnv-amd/8.3.3
amd amd/5.4.0
cray-libsci/22.12.1.1
craype-accel-amd-gfx90a
rocm/5.4.0
libunwind/1.5.0
libfabric/1.20.1
cray-mpich/8.1.26
cray-python/3.9.13.1
subversion/1.14.1
git/2.36.1
cmake/3.21.3
cray-hdf5-parallel/1.12.2.1
cray-netcdf-hdf5parallel/4.9.0.1
cray-parallel-netcdf/1.12.3.1
darshan-runtime
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
0
$ENV{NETCDF_DIR}
$ENV{PNETCDF_DIR}
0
1
2
$ENV{CRAY_LD_LIBRARY_PATH}:$ENV{LD_LIBRARY_PATH}
True
1
$SHELL{which hipcc}
--gpus-per-node=8
--ntasks-per-gpu=$SHELL{echo "`./xmlquery --value MAX_MPITASKS_PER_NODE`/8"|bc}
--gpu-bind=closest
128M
spread
threads
/ccs/proj/cli115/software/frontier/3rdparty/protobuf/21.6/craygnu/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /ccs/proj/cli115/software/frontier/3rdparty/adios2/2.10.2/craygnu; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /ccs/proj/cli115/software/frontier/3rdparty/c-blosc2/2.15.2/craygnu; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$H5Z_BLOSC2_ROOT" ]; then echo /ccs/proj/cli115/software/frontier/3rdparty/h5z-blosc2/2.0.0/craygnu; else echo "$H5Z_BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$MGARD_ROOT" ]; then echo /ccs/proj/cli115/software/frontier/3rdparty/mgard/1.5.2/craygnu; else echo "$MGARD_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /ccs/proj/cli115/software/frontier/3rdparty/sz/2.1.12.5/craygnu; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /ccs/proj/cli115/software/frontier/3rdparty/zfp/1.0.1/craygnu; else echo "$ZFP_ROOT"; fi}
$SHELL{if [ -z "$H5Z_ZFP_ROOT" ]; then echo /ccs/proj/cli115/software/frontier/3rdparty/h5z-zfp/1.1.1/craygnu; else echo "$H5Z_ZFP_ROOT"; fi}
/ccs/proj/cli115/software/frontier/3rdparty/protobuf/21.6/craycray/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /ccs/proj/cli115/software/frontier/3rdparty/adios2/2.10.2/craycray; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /ccs/proj/cli115/software/frontier/3rdparty/c-blosc2/2.15.2/craycray; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$MGARD_ROOT" ]; then echo /ccs/proj/cli115/software/frontier/3rdparty/mgard/1.5.2/craycray; else echo "$MGARD_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /ccs/proj/cli115/software/frontier/3rdparty/sz/2.1.12.5/craycray; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /ccs/proj/cli115/software/frontier/3rdparty/zfp/1.0.1/craycray; else echo "$ZFP_ROOT"; fi}
/ccs/proj/cli115/software/frontier/3rdparty/protobuf/21.6/crayamd/lib/pkgconfig:$ENV{CRAY_LIBSCI_PREFIX_DIR}/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /ccs/proj/cli115/software/frontier/3rdparty/adios2/2.10.2/crayamd; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /ccs/proj/cli115/software/frontier/3rdparty/c-blosc2/2.15.2/crayamd; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$MGARD_ROOT" ]; then echo /ccs/proj/cli115/software/frontier/3rdparty/mgard/1.5.2/crayamd; else echo "$MGARD_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /ccs/proj/cli115/software/frontier/3rdparty/sz/2.1.12.5/crayamd; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /ccs/proj/cli115/software/frontier/3rdparty/zfp/1.0.1/crayamd; else echo "$ZFP_ROOT"; fi}
Mac OS/X workstation or laptop
Darwin
gnu
openmpi,mpich
$ENV{HOME}/projects/acme/scratch
$ENV{HOME}/projects/acme/cesm-inputdata
$ENV{HOME}/projects/acme/ptclm-data
$ENV{HOME}/projects/acme/scratch/archive/$CASE
$ENV{HOME}/projects/acme/baselines/$COMPILER
$CCSMROOT/tools/cprnc/build/cprnc
4
e3sm_developer
none
jnjohnson at lbl dot gov
4
2
mpirun
$ENV{HOME}/projects/acme/scratch/$CASE/run
$ENV{HOME}/projects/acme/scratch/$CASE/bld
Linux workstation or laptop
none
LINUX
gnu
openmpi,mpich
$ENV{HOME}/projects/acme/scratch
$ENV{HOME}/projects/acme/cesm-inputdata
$ENV{HOME}/projects/acme/ptclm-data
$ENV{HOME}/projects/acme/scratch/archive/$CASE
$ENV{HOME}/projects/acme/baselines/$COMPILER
$CCSMROOT/tools/cprnc/build/cprnc
4
e3sm_developer
none
jayesh at mcs dot anl dot gov
4
2
mpirun
-np {{ total_tasks }}
$ENV{HOME}/projects/acme/scratch/$CASE/run
$ENV{HOME}/projects/acme/scratch/$CASE/bld
Windows Subsystem for Linux v2, using Ubuntu distribution
none
LINUX
gnu
mpich
$ENV{HOME}/e3sm_scratch
$ENV{HOME}/pt-e3sm-inputdata
$ENV{HOME}/pt-e3sm-inputdata
$ENV{HOME}/e3sm_scratch/archive/$CASE
$ENV{HOME}/e3sm_baselines
$CCSMROOT/tools/cprnc/build/cprnc
make
4
e3sm_developer
none
thorntonpe at ornl dot gov
4
4
mpirun
-np {{ total_tasks }}
$ENV{HOME}/e3sm_scratch/$CASE/run
$ENV{HOME}/e3sm_scratch/$CASE/bld
$ENV{BLASLAPACK_LIBDIR}
Singularity container
singularity
LINUX
gnu
mpich
$ENV{HOME}/projects/e3sm/scratch
$ENV{HOME}/projects/e3sm/cesm-inputdata
$ENV{HOME}/projects/e3sm/ptclm-data
$ENV{HOME}/projects/e3sm/scratch/archive/$CASE
$ENV{HOME}/projects/e3sm/baselines/$COMPILER
$CCSMROOT/tools/cprnc/build/cprnc
make
4
e3sm_developer
none
lukasz at uchicago dot edu
16
16
mpirun
-launcher fork -hosts localhost -np {{ total_tasks }}
$ENV{HOME}/projects/e3sm/scratch/$CASE/run
$ENV{HOME}/projects/e3sm/scratch/$CASE/bld
$SRCROOT
/usr/local/packages/netcdf-serial
/usr/local/packages/cmake/bin:/usr/local/packages/hdf5-serial/bin:/usr/local/packages/netcdf-serial/bin:$ENV{PATH}
/usr/local/packages/szip/lib:/usr/local/packages/hdf5-serial/lib:/usr/local/packages/netcdf-serial/lib
/usr/local/packages/netcdf-parallel
/usr/local/packages/pnetcdf
/usr/local/packages/hdf5-parallel
/usr/local/packages/cmake/bin:/usr/local/packages/mpich/bin:/usr/local/packages/hdf5-parallel/bin:/usr/local/packages/netcdf-parallel/bin:/usr/local/packages/pnetcdf/bin:$ENV{PATH}
/usr/local/packages/mpich/lib:/usr/local/packages/szip/lib:/usr/local/packages/hdf5-parallel/lib:/usr/local/packages/netcdf-parallel/lib:/usr/local/packages/pnetcdf/lib
Linux workstation for Jenkins testing
(melvin|watson|s999964|climate|penn|sems)
LINUX
proxy.sandia.gov:80
gnu,intel
openmpi
/sems-data-store/ACME/timings
.*
$ENV{HOME}/acme/scratch
/sems-data-store/ACME/inputdata
/sems-data-store/ACME/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/sems-data-store/ACME/baselines/$COMPILER
/sems-data-store/ACME/cprnc/build.new/cprnc
32
e3sm_developer
none
jgfouca at sandia dot gov
48
48
mpirun
-np {{ total_tasks }}
--map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to hwthread:overload-allowed
/usr/share/Modules/init/python.py
/usr/share/Modules/init/perl.pm
/usr/share/Modules/init/sh
/usr/share/Modules/init/csh
/usr/bin/modulecmd python
/usr/bin/modulecmd perl
module
module
sems-env
acme-env
sems-git
acme-binutils
sems-python/3.5.2
sems-cmake/3.12.2
sems-gcc/7.3.0
sems-intel/16.0.3
sems-netcdf/4.4.1/exo
acme-pfunit/3.2.8/base
acme-openmpi/2.1.5
acme-netcdf/4.7.4/acme
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
1000
$ENV{SEMS_NETCDF_ROOT}
64M
spread
threads
Huge Linux workstation for Sandia climate scientists
mappy
LINUX
proxy.sandia.gov:80
gnu
openmpi
/sems-data-store/ACME/mappy/timings
.*
$ENV{HOME}/acme/scratch
/sems-data-store/ACME/inputdata
/sems-data-store/ACME/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/sems-data-store/ACME/baselines/mappy/$COMPILER
/sems-data-store/ACME/mappy/cprnc/cprnc
64
e3sm_developer
slurm_single_node
jgfouca at sandia dot gov
64
64
srun
--cpu_bind=threads
/projects/sems/install/rhel9-x86_64/sems/lmod/lmod/8.7.24/gcc/11.4.1/base/lnirq74/lmod/lmod/init/env_modules_python.py
/projects/sems/install/rhel9-x86_64/sems/lmod/lmod/8.7.24/gcc/11.4.1/base/lnirq74/lmod/lmod/init/perl
/projects/sems/install/rhel9-x86_64/sems/lmod/lmod/8.7.24/gcc/11.4.1/base/lnirq74/lmod/lmod/init/sh
/projects/sems/install/rhel9-x86_64/sems/lmod/lmod/8.7.24/gcc/11.4.1/base/lnirq74/lmod/lmod/init/csh
/projects/sems/install/rhel9-x86_64/sems/lmod/lmod/8.7.24/gcc/11.4.1/base/lnirq74/lmod/lmod/libexec/lmod python
/projects/sems/install/rhel9-x86_64/sems/lmod/lmod/8.7.24/gcc/11.4.1/base/lnirq74/lmod/lmod/libexec/lmod perl
module
module
sems-git/2.42.0
sems-cmake/3.27.9
sems-gcc/13.2.0
sems-openblas
sems-netcdf-c-serial/4.9.2
sems-netcdf-fortran-serial/4.6.1
sems-openmpi-no-cuda/4.1.6
sems-netcdf-c/4.9.2
sems-netcdf-cxx/4.2
sems-netcdf-fortran/4.6.1
sems-parallel-netcdf/1.12.3
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
0
$ENV{NETCDF_C_ROOT}
$ENV{NETCDF_FORTRAN_ROOT}
$ENV{OPENBLAS_ROOT}
64M
spread
threads
OpenBLAS
4000MB
$ENV{PARALLEL_NETCDF_ROOT}
-1
Huge Linux workstation for Sandia climate scientists
^[a-fA-F0-9]{12}$
LINUX
proxy.sandia.gov:80
gnu
openmpi
/tmp
/projects/e3sm/inputdata
/projects/e3sm/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/projects/e3sm/baselines/ghci-snl-cpu/$COMPILER
/projects/e3sm/cprnc/cprnc
32
e3sm_developer
none
lbertag at sandia dot gov
32
32
mpirun
--bind-to core
-np {{ total_tasks }}
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
0
$ENV{NETCDF_C_ROOT}
$ENV{NETCDF_FORTRAN_ROOT}
$ENV{PARALLEL_NETCDF_ROOT}
64M
spread
threads
Generic
4000MB
Sandia GPU testbed
weaver
LINUX
gnugpu
openmpi
/home/projects/e3sm/timings
.*
$ENV{HOME}/acme/scratch
/home/projects/e3sm/scream/data
/home/projects/e3sm/scream/data/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/home/projects/e3sm/baselines/weaver/$COMPILER
/home/projects/e3sm/cprnc/cprnc
32
e3sm_developer
lsf
jgfouca at sandia dot gov
32
32
mpirun
-np {{ total_tasks }}
/usr/share/Modules/init/sh
/usr/share/Modules/init/python.py
module
/usr/bin/modulecmd python
cuda/10.1.105
ucx/1.6.0
git/2.10.1
python/3.7.3
cmake/3.18.0
perl/5.22.1
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
0
/ascldap/users/projects/e3sm/scream/libs/mpfr/install/weaver/lib:/ascldap/users/projects/e3sm/scream/libs/gcc/install/weaver/gcc/8.5.0/lib64:/ascldap/users/projects/e3sm/scream/libs/gcc/install/weaver/gcc/8.5.0/lib:$ENV{LD_LIBRARY_PATH}
/ascldap/users/projects/e3sm/scream/libs/gcc/install/weaver/gcc/8.5.0/bin:/ascldap/users/projects/e3sm/scream/libs/gcc/install/weaver/gcc/8.5.0/libexec/gcc/powerpc64le-unknown-linux-gnu/8.5.0:/ascldap/users/projects/e3sm/scream/libs/openmpi/install/weaver/gcc/8.5.0/cuda/10.1.105/bin:/ascldap/users/projects/e3sm/scream/libs/pnetcdf/install/weaver/gcc/8.5.0/cuda/10.1.105/bin:/ascldap/users/projects/e3sm/scream/libs/netcdf-c/install/weaver/gcc/8.5.0/cuda/10.1.105/bin:/ascldap/users/projects/e3sm/scream/libs/netcdf-fortran/install/weaver/gcc/8.5.0/cuda/10.1.105/bin:/ascldap/users/projects/e3sm/scream/libs/wget/bin:/ascldap/users/jgfouca/perl5/bin:$ENV{PATH}
/ascldap/users/jgfouca/perl5/lib/perl5
/ascldap/users/jgfouca/perl5
/ascldap/users/projects/e3sm/scream/libs/netcdf-c/install/weaver/gcc/8.5.0/cuda/10.1.105
/ascldap/users/projects/e3sm/scream/libs/netcdf-fortran/install/weaver/gcc/8.5.0/cuda/10.1.105
/ascldap/users/projects/e3sm/scream/libs/pnetcdf/install/weaver/gcc/8.5.0/cuda/10.1.105
IBM Power 8 Testbed machine
white
LINUX
gnu
openmpi
$ENV{HOME}/projects/e3sm/scratch
$ENV{HOME}/projects/e3sm/cesm-inputdata
$ENV{HOME}/projects/e3sm/ptclm-data
$ENV{HOME}/projects/e3sm/scratch/archive/$CASE
$ENV{HOME}/projects/e3sm/baselines/$COMPILER
$CCSMROOT/tools/cprnc/build/cprnc
32
e3sm_developer
lsf
mdeakin at sandia dot gov
4
1
mpirun
/usr/share/Modules/init/sh
/usr/share/Modules/init/python.py
module
/usr/bin/modulecmd python
devpack/20181011/openmpi/2.1.2/gcc/7.2.0/cuda/9.2.88
$ENV{HOME}/projects/e3sm/scratch/$CASE/run
$ENV{HOME}/projects/e3sm/scratch/$CASE/bld
$ENV{NETCDF_ROOT}
/ascldap/users/jgfouca/packages/netcdf-fortran-4.4.4-white
$SRCROOT
Skylake Testbed machine
blake
LINUX
intel18
openmpi
$ENV{HOME}/projects/e3sm/scratch
$ENV{HOME}/projects/e3sm/cesm-inputdata
$ENV{HOME}/projects/e3sm/ptclm-data
$ENV{HOME}/projects/e3sm/scratch/archive/$CASE
$ENV{HOME}/projects/e3sm/baselines/$COMPILER
$CCSMROOT/tools/cprnc/build/cprnc
48
e3sm_developer
slurm
mdeakin at sandia dot gov
48
48
mpirun
/usr/share/Modules/init/sh
/usr/share/Modules/init/python.py
module
module
zlib/1.2.11
intel/compilers/18.1.163
openmpi/2.1.2/intel/18.1.163
hdf5/1.10.1/openmpi/2.1.2/intel/18.1.163
netcdf-exo/4.4.1.1/openmpi/2.1.2/intel/18.1.163
$ENV{HOME}/projects/e3sm/scratch/$CASE/run
$ENV{HOME}/projects/e3sm/scratch/$CASE/bld
$ENV{NETCDF_ROOT}
$ENV{NETCDFF_ROOT}
Linux workstation for ANL
compute.*mcs.anl.gov
LINUX
gnu
mpich,openmpi
$ENV{HOME}/acme/scratch
/home/climate1/acme/inputdata
/home/climate1/acme/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/home/climate1/acme/baselines/$COMPILER
/home/climate1/acme/cprnc/build/cprnc
make
32
e3sm_developer
none
jgfouca at sandia dot gov
32
32
mpirun
-l -np {{ total_tasks }}
mpirun
-np {{ total_tasks }}
/software/common/adm/packages/softenv-1.6.2/etc/softenv-load.csh
/software/common/adm/packages/softenv-1.6.2/etc/softenv-load.sh
source /software/common/adm/packages/softenv-1.6.2/etc/softenv-aliases.csh ; soft
source /software/common/adm/packages/softenv-1.6.2/etc/softenv-aliases.sh ; soft
+gcc-8.2.0
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
/soft/apps/packages/climate/cmake/3.18.4/bin:/soft/apps/packages/climate/gmake/bin:$ENV{PATH}
/soft/apps/packages/climate/hdf5/1.8.16-serial/gcc-8.2.0/lib:/soft/apps/packages/climate/szip/2.1/gcc-8.2.0/lib:$ENV{LD_LIBRARY_PATH}
/soft/apps/packages/climate/netcdf/4.4.1c-4.2cxx-4.4.4f-serial/gcc-8.2.0
/soft/apps/packages/climate/hdf5/1.8.16-parallel/mpich-3.3.2/gcc-8.2.0/lib:/soft/apps/packages/climate/szip/2.1/gcc-8.2.0/lib:$ENV{LD_LIBRARY_PATH}
/soft/apps/packages/climate/mpich/3.3.2/gcc-8.2.0/bin:/soft/apps/packages/climate/cmake/3.18.4/bin:/soft/apps/packages/climate/gmake/bin:$ENV{PATH}
/soft/apps/packages/climate/hdf5/1.8.16-parallel/mpich-3.3.2/gcc-8.2.0
/soft/apps/packages/climate/netcdf/4.4.1c-4.2cxx-4.4.4f-parallel/mpich-3.3.2/gcc-8.2.0
/soft/apps/packages/climate/pnetcdf/1.12.0/mpich-3.3.2/gcc-8.2.0
/soft/apps/packages/climate/openmpi/2.1.5/gcc-8.2.0/bin:/soft/apps/packages/climate/cmake/3.18.4/bin:/soft/apps/packages/climate/gmake/bin:$ENV{PATH}
/soft/apps/packages/climate/zlib/1.2.11/gcc-8.2.0-static
/soft/apps/packages/climate/szip/2.1/gcc-8.2.0-static
/soft/apps/packages/climate/hdf5/1.8.12-parallel/openmpi-2.1.5/gcc-8.2.0-static
/soft/apps/packages/climate/netcdf/4.7.4c-4.3.1cxx-4.4.4f-parallel/openmpi-2.1.5/gcc-8.2.0-static-hdf5-1.8.12-pnetcdf-1.12.0
/soft/apps/packages/climate/pnetcdf/1.12.0/openmpi-2.1.5/gcc-8.2.0
64M
/soft/apps/packages/climate/perl5/lib/perl5
ANL CELS General Computing Environment (Linux) workstation (Ubuntu 22.04)
compute-386-(01|02|03|04|05|06|07|08)|compute-240-(10|11|12|13|14|15)
LINUX
gnu
mpich,openmpi
/scratch/$ENV{USER}/e3sm/timings
.*
/scratch/$ENV{USER}/e3sm/scratch
/nfs/gce/projects/climate/inputdata
$DIN_LOC_ROOT/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/nfs/gce/projects/climate/e3sm/baselines/$COMPILER
/nfs/gce/projects/climate/e3sm/cprnc/build/cprnc
make
8
e3sm_developer
none
jayesh at mcs dot anl dot gov
32
32
mpirun
-l -np {{ total_tasks }}
mpirun
--oversubscribe -np {{ total_tasks }}
/nfs/gce/software/custom/linux-ubuntu22.04-x86_64/spack/opt/spack/linux-ubuntu22.04-x86_64/gcc-11.2.0/lmod-8.5.6-hkjjxhp/lmod/8.5.6/init/env_modules_python.py
/nfs/gce/software/custom/linux-ubuntu22.04-x86_64/spack/opt/spack/linux-ubuntu22.04-x86_64/gcc-11.2.0/lmod-8.5.6-hkjjxhp/lmod/lmod/init/perl
/nfs/gce/software/custom/linux-ubuntu22.04-x86_64/spack/opt/spack/linux-ubuntu22.04-x86_64/gcc-11.2.0/lmod-8.5.6-hkjjxhp/lmod/lmod/init/bash
/nfs/gce/software/custom/linux-ubuntu22.04-x86_64/spack/opt/spack/linux-ubuntu22.04-x86_64/gcc-11.2.0/lmod-8.5.6-hkjjxhp/lmod/lmod/init/sh
/nfs/gce/software/custom/linux-ubuntu22.04-x86_64/spack/opt/spack/linux-ubuntu22.04-x86_64/gcc-11.2.0/lmod-8.5.6-hkjjxhp/lmod/lmod/init/csh
/nfs/gce/software/custom/linux-ubuntu22.04-x86_64/spack/opt/spack/linux-ubuntu22.04-x86_64/gcc-11.2.0/lmod-8.5.6-hkjjxhp/lmod/lmod/libexec/lmod python
module
module
module
module
gcc/12.1.0
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
/nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/netcdf/4.8.0c-4.3.1cxx-4.5.3f-serial/gcc-12.1.0
/nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/mpich/4.1.2/gcc-12.1.0/lib:$ENV{LD_LIBRARY_PATH}
/nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/mpich/4.1.2/gcc-12.1.0/bin:$ENV{PATH}
/nfs/gce/software/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/zlib-1.2.11-p7dmb5p
/nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/hdf5/1.14.6/mpich-4.1.2/gcc-12.1.0-mt
/nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/netcdf/4.8.0c-4.3.1cxx-4.5.3f-parallel/mpich-4.1.2/gcc-12.1.0-mt
/nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/pnetcdf/1.12.3/mpich-4.1.2/gcc-12.1.0
$SHELL{if [ -z "$MOAB_ROOT" ]; then echo /nfs/gce/projects/climate/software/moab/devel/mpich-4.1.2/gcc-12.1.0; else echo "$MOAB_ROOT"; fi}
/nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/openmpi/4.1.6/gcc-12.1.0/lib:$ENV{LD_LIBRARY_PATH}
/nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/openmpi/4.1.6/gcc-12.1.0/bin:$ENV{PATH}
/nfs/gce/software/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/zlib-1.2.11-p7dmb5p
/nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/hdf5/1.12.2/openmpi-4.1.6/gcc-12.1.0
/nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/netcdf/4.8.0c-4.3.1cxx-4.5.3f-parallel/openmpi-4.1.6/gcc-12.1.0
/nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/pnetcdf/1.12.3/openmpi-4.1.6/gcc-12.1.0
$SHELL{if [ -z "$MOAB_ROOT" ]; then echo /nfs/gce/projects/climate/software/moab/devel/openmpi-4.1.6/gcc-12.1.0; else echo "$MOAB_ROOT"; fi}
64M
/nfs/gce/projects/climate/software/perl5/lib/perl5
$SHELL{if [ -z "$PKG_CONFIG_PATH" ]; then echo /nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/protobuf/21.6/gcc-12.1.0/lib/pkgconfig; else echo "/nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/protobuf/21.6/gcc-12.1.0/lib/pkgconfig:$PKG_CONFIG_PATH"; fi}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/adios2/2.10.2/mpich-4.1.2/gcc-12.1.0; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/c-blosc2/2.15.2/gcc-12.1.0; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$H5Z_BLOSC2_ROOT" ]; then echo /nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/h5z-blosc2/2.0.0/mpich-4.1.2/gcc-12.1.0-mt; else echo "$H5Z_BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$MGARD_ROOT" ]; then echo /nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/mgard/1.5.2/gcc-12.1.0; else echo "$MGARD_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/sz/2.1.12.5/gcc-12.1.0; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/zfp/1.0.1/gcc-12.1.0; else echo "$ZFP_ROOT"; fi}
$SHELL{if [ -z "$H5Z_ZFP_ROOT" ]; then echo /nfs/gce/projects/climate/software/linux-ubuntu22.04-x86_64/h5z-zfp/1.1.1/mpich-4.1.2/gcc-12.1.0-mt; else echo "$H5Z_ZFP_ROOT"; fi}
FALSE
SNL clust
(skybridge|chama)
LINUX
proxy.sandia.gov:80
intel
openmpi
fy210162
/projects/ccsm/timings
.*
/gpfs/$USER/acme_scratch/sandiatoss3
/projects/ccsm/inputdata
/projects/ccsm/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/projects/ccsm/ccsm_baselines/$COMPILER
/projects/ccsm/cprnc/build.toss3/cprnc
8
e3sm_integration
slurm
jgfouca at sandia dot gov
16
16
TRUE
mpiexec
--n {{ total_tasks }}
--map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to core
/usr/share/lmod/lmod/init/python.py
/usr/share/lmod/lmod/init/perl.pm
/usr/share/lmod/lmod/init/sh
/usr/share/lmod/lmod/init/csh
/usr/share/lmod/lmod/libexec/lmod python
/usr/share/lmod/lmod/libexec/lmod perl
module
module
sems-archive-env
acme-env
sems-archive-git
sems-archive-cmake/3.19.1
gnu/6.3.1
sems-archive-intel/17.0.0
sems-archive-openmpi/1.10.5
acme-netcdf/4.7.4/acme
sems-archive-netcdf/4.4.1/exo
/tscratch/$USER/acme_scratch/sandiatoss3/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
$ENV{SEMS_NETCDF_ROOT}
64M
$ENV{SEMS_NETCDF_ROOT}
SNL clust
(boca)
LINUX
proxy.sandia.gov:80
intel
openmpi
fy210162
/projects/ccsm/timings
.*
/gpfs/$USER/acme_scratch/boca
/projects/ccsm/inputdata
/projects/ccsm/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/projects/ccsm/ccsm_baselines/$COMPILER
/projects/ccsm/cprnc/build.toss3/cprnc
32
e3sm_integration
slurm
jgfouca at sandia dot gov
36
36
TRUE
mpiexec
--n {{ total_tasks }}
--map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to core
/usr/share/lmod/lmod/init/python.py
/usr/share/lmod/lmod/init/perl.pm
/usr/share/lmod/lmod/init/sh
/usr/share/lmod/lmod/init/csh
/usr/share/lmod/lmod/libexec/lmod python
/usr/share/lmod/lmod/libexec/lmod perl
module
module
/projects/sems/acme-boca-modulefiles/env-module
acme-boca-env
aue/python/3.11.6
sems-archive-git
sems-archive-cmake/3.19.1
gnu/10.2
sems-archive-intel/21.3.0
sems-archive-openmpi/4.1.4
acme-netcdf/4.7.4/acme
sems-archive-netcdf/4.4.1/exo
/tscratch/$USER/acme_scratch/boca/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
$ENV{SEMS_NETCDF_ROOT}
$ENV{SEMS_NETCDF_ROOT}
$ENV{SEMS_NETCDF_ROOT}/include
$ENV{SEMS_NETCDF_ROOT}/lib
64M
$ENV{SEMS_NETCDF_ROOT}
/projects/cldera/cldera-tools/install-master/intel
SNL clust
(flight)
LINUX
proxy.sandia.gov:80
intel
openmpi
fy210162
/projects/ccsm/timings
.*
/gpfs/$USER/acme_scratch/flight
/projects/ccsm/inputdata
/projects/ccsm/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/projects/ccsm/ccsm_baselines/$COMPILER
/projects/ccsm/cprnc/build.toss3/cprnc
56
e3sm_integration
slurm
jgfouca at sandia dot gov
112
112
TRUE
mpiexec
--n {{ total_tasks }}
--map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to core
/usr/share/lmod/lmod/init/python.py
/usr/share/lmod/lmod/init/perl.pm
/usr/share/lmod/lmod/init/sh
/usr/share/lmod/lmod/init/csh
/usr/share/lmod/lmod/libexec/lmod python
/usr/share/lmod/lmod/libexec/lmod perl
module
module
/projects/sems/acme-boca-modulefiles/env-module
acme-boca-env
aue/python/3.11.6
sems-archive-git
sems-archive-cmake/3.19.1
gnu/10.3.1
sems-archive-intel/21.3.0
sems-archive-openmpi/4.1.4
acme-netcdf/4.7.4/acme
sems-archive-netcdf/4.4.1/exo
/tscratch/$USER/acme_scratch/flight/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
$ENV{SEMS_NETCDF_ROOT}
$ENV{SEMS_NETCDF_ROOT}
$ENV{SEMS_NETCDF_ROOT}/include
$ENV{SEMS_NETCDF_ROOT}/lib
64M
$ENV{SEMS_NETCDF_ROOT}
/projects/cldera/cldera-tools/install-master/intel
SNL clust
ghost-login
LINUX
proxy.sandia.gov:80
intel
openmpi
fy210162
/tscratch/$USER/acme_scratch/ghost
/projects/ccsm/inputdata
/projects/ccsm/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/projects/ccsm/ccsm_baselines/$COMPILER
/projects/ccsm/cprnc/build.toss3/cprnc
8
e3sm_integration
slurm
jgfouca at sandia dot gov
36
36
TRUE
mpiexec
--n {{ total_tasks }}
--map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to core
/usr/share/lmod/lmod/init/python.py
/usr/share/lmod/lmod/init/perl.pm
/usr/share/lmod/lmod/init/sh
/usr/share/lmod/lmod/init/csh
/usr/share/lmod/lmod/libexec/lmod python
/usr/share/lmod/lmod/libexec/lmod perl
module
module
/projects/sems/acme-boca-modulefiles/env-module
acme-boca-env
aue/python/3.11.6
sems-archive-git
sems-archive-cmake/3.19.1
gnu/10.3.1
sems-archive-intel/21.3.0
sems-archive-openmpi/4.1.4
acme-netcdf/4.7.4/acme
sems-archive-netcdf/4.4.1/exo
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
$ENV{SEMS_NETCDF_ROOT}
64M
$ENV{SEMS_NETCDF_ROOT}
ANL/LCRC Linux Cluster
(b\d+|blues.*).lcrc.anl.gov
LINUX
intel,gnu
impi,openmpi,mvapich
condo
/lcrc/group/e3sm
.*
/lcrc/group/e3sm/$USER/scratch/anvil
/lcrc/group/e3sm/public_html/$ENV{USER}
https://web.lcrc.anl.gov/public/e3sm/$ENV{USER}
/lcrc/group/e3sm/data/inputdata
/lcrc/group/e3sm/data/inputdata/atm/datm7
/lcrc/group/e3sm/$USER/archive/$CASE
/lcrc/group/e3sm/baselines/anvil/$COMPILER
/lcrc/group/e3sm/soft/tools/cprnc/cprnc
8
e3sm_integration
slurm
E3SM
36
36
FALSE
srun
-l -n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit
--cpu_bind=cores
-c $ENV{OMP_NUM_THREADS}
-m plane={{ tasks_per_node }}
/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/sh;export MODULEPATH=$MODULEPATH:/software/centos7/spack-latest/share/spack/lmod/linux-centos7-x86_64/Core
/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/csh;setenv MODULEPATH $MODULEPATH\:/software/centos7/spack-latest/share/spack/lmod/linux-centos7-x86_64/Core
/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/env_modules_python.py
export MODULEPATH=$MODULEPATH:/software/centos7/spack-latest/share/spack/lmod/linux-centos7-x86_64/Core;/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/libexec/lmod python
module
module
/lcrc/group/e3sm/soft/modulefiles/anvil
cmake/3.26.3-nszudya
gcc/8.2.0
intel/20.0.4-lednsve
intel-mkl/2020.4.304-voqlapk
mvapich2/2.3.6-verbs-x4iz7lq
netcdf-c/4.4.1-gei7x7w
netcdf-cxx/4.2-db2f5or
netcdf-fortran/4.4.4-b4ldb3a
parallel-netcdf/1.11.0-kj4jsvt
intel-mpi/2019.9.304-i42whlw
netcdf-c/4.4.1-blyisdg
netcdf-cxx/4.2-gkqc6fq
netcdf-fortran/4.4.4-eanrh5t
parallel-netcdf/1.11.0-y3nmmej
openmpi/4.1.1-v3b3npd
netcdf-c/4.4.1-smyuxme
netcdf-cxx/4.2-kfb2aag
netcdf-fortran/4.4.4-mablvyc
parallel-netcdf/1.11.0-x4n5s7k
gcc/8.2.0-xhxgy33
intel-mkl/2020.4.304-d6zw4xa
netcdf/4.4.1-ve2zfkw
netcdf-cxx/4.2-2rkopdl
netcdf-fortran/4.4.4-thtylny
mvapich2/2.2-verbs-ppznoge
parallel-netcdf/1.11.0-c22b2bn
intel-mpi/2019.9.304-rxpzd6p
netcdf-c/4.4.1-fysjgfx
netcdf-cxx/4.2-oaiw2v6
netcdf-fortran/4.4.4-kxgkaop
parallel-netcdf/1.11.0-fce7akl
openmpi/4.1.1-x5n4m36
netcdf-c/4.4.1-mtfptpl
netcdf-cxx/4.2-osp27dq
netcdf-fortran/4.4.4-5yd6dos
parallel-netcdf/1.11.0-a7ohxsg
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
0
$SHELL{dirname $(dirname $(which nc-config))}
$SHELL{dirname $(dirname $(which nf-config))}
$SHELL{dirname $(dirname $(which pnetcdf_version))}
/lcrc/group/e3sm/soft/perl/5.26.0/bin:$ENV{PATH}
self,sm,ud
16384
0
1
1
1
2
10
64M
granularity=core,balanced
1
cores
ANL LCRC cluster 512-node AMD Epyc 7532 2-sockets 64-cores per node
chr.*
LINUX
intel,gnu,oneapi-ifx
openmpi,impi
e3sm
/lcrc/group/e3sm/PERF_Chrysalis
.*
/lcrc/group/e3sm/$USER/scratch/chrys
/lcrc/group/e3sm/public_html/$ENV{USER}
https://web.lcrc.anl.gov/public/e3sm/$ENV{USER}
/lcrc/group/e3sm/data/inputdata
/lcrc/group/e3sm/data/inputdata/atm/datm7
/lcrc/group/e3sm/$USER/scratch/chrys/archive/$CASE
/lcrc/group/e3sm/baselines/chrys/$COMPILER
/lcrc/group/e3sm/tools/cprnc/cprnc
8
e3sm_integration
4
slurm
E3SM
128
64
FALSE
srun
--mpi=pmi2 -l -n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit
$SHELL{if [ 64 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;}
-c $SHELL{echo 128/ {{ tasks_per_node }} |bc}
-m plane={{ tasks_per_node }}
/gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/lmod-8.3-5be73rg/lmod/lmod/init/sh
/gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/lmod-8.3-5be73rg/lmod/lmod/init/csh
/gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/lmod-8.3-5be73rg/lmod/lmod/init/env_modules_python.py
/gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/lmod-8.3-5be73rg/lmod/lmod/libexec/lmod python
module
module
subversion/1.14.0-e4smcy3
perl/5.32.0-bsnc6lt
cmake/3.24.2-whgdv7y
intel/20.0.4-kodw73g
intel-mkl/2020.4.304-g2qaxzf
openmpi/4.1.6-2mm63n2
hdf5/1.10.7-4cghwvq
netcdf-c/4.7.4-4qjdadt
netcdf-fortran/4.5.3-qozrykr
parallel-netcdf/1.11.0-icrpxty
intel-mpi/2019.9.304-tkzvizk
hdf5/1.10.7-wczt56s
netcdf-c/4.7.4-ba6agmb
netcdf-fortran/4.5.3-5lvy5p4
parallel-netcdf/1.11.0-b74wv4m
gcc/11.2.0-bgddrif
intel-oneapi-mkl/2022.1.0-w4kgsn4
openmpi/4.1.6-ggebj5o
hdf5/1.10.7-ol6xuae
netcdf-c/4.7.4-pfocec2
netcdf-fortran/4.5.3-va3hoor
parallel-netcdf/1.11.0-d7h4ysd
gcc/11.2.0-bgddrif
intel-oneapi-mkl/2022.1.0-w4kgsn4
gcc/9.2.0-ugetvbp
intel-mkl/2020.4.304-n3b5fye
intel-mpi/2019.9.304-jdih7h5
hdf5/1.8.16-dtbpce3
netcdf-c/4.7.4-seagl7g
netcdf-fortran/4.5.3-ova6t37
parallel-netcdf/1.11.0-ifdodru
intel-oneapi-compilers/2025.2.0-mdlxe55
openmpi/4.1.8-nygtpa3
intel-oneapi-mkl/2025.2.0-bcimxay
hdf5/1.14.6-5cgownf
netcdf-c/4.9.3-mekqsor
netcdf-fortran/4.6.2-cjqpiwp
parallel-netcdf/1.14.1-f2fwvr2
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.05
0.05
0
/lcrc/group/e3sm/soft/perl/chrys/lib/perl5
$SHELL{dirname $(dirname $(which h5dump))}
$SHELL{dirname $(dirname $(which nc-config))}
$SHELL{dirname $(dirname $(which nf-config))}
$SHELL{dirname $(dirname $(which pnetcdf_version))}
^lockedfile,individual
^xpmem
128M
granularity=core,balanced
granularity=thread,balanced
cores
$SHELL{if [ -z "$MOAB_ROOT" ]; then echo /lcrc/soft/climate/moab/chrysalis/intel; else echo "$MOAB_ROOT"; fi}
$SHELL{if [ -z "$Albany_ROOT" ]; then echo /lcrc/group/e3sm/soft/albany/2024.03.26/intel/20.0.4; else echo "$Albany_ROOT"; fi}
$SHELL{if [ -z "$Trilinos_ROOT" ]; then echo /lcrc/group/e3sm/soft/trilinos/15.1.1/intel/20.0.4; else echo "$Trilinos_ROOT"; fi}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /lcrc/soft/climate/adios2/2.10.2/openmpi-4.1.6/intel-20.0.4; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /lcrc/soft/climate/c-blosc2/2.15.2/intel-20.0.4; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /lcrc/soft/climate/sz/2.1.12.5/intel-20.0.4; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /lcrc/soft/climate/zfp/1.0.1/intel-20.0.4; else echo "$ZFP_ROOT"; fi}
$SHELL{if [ -z "$MOAB_ROOT" ]; then echo /lcrc/soft/climate/moab/chrysalis/gnu; else echo "$MOAB_ROOT"; fi}
$SHELL{if [ -z "$Albany_ROOT" ]; then echo /lcrc/group/e3sm/soft/albany/2024.03.26/gcc/9.2.0; else echo "$Albany_ROOT"; fi}
$SHELL{if [ -z "$Trilinos_ROOT" ]; then echo /lcrc/group/e3sm/soft/trilinos/15.1.1/gcc/9.2.0; else echo "$Trilinos_ROOT"; fi}
$SHELL{if [ -z "$ADIOS2_ROOT" ]; then echo /lcrc/soft/climate/adios2/2.10.2/openmpi-4.1.6/gcc-11.2.0; else echo "$ADIOS2_ROOT"; fi}
$SHELL{if [ -z "$BLOSC2_ROOT" ]; then echo /lcrc/soft/climate/c-blosc2/2.15.2/gcc-11.2.0; else echo "$BLOSC2_ROOT"; fi}
$SHELL{if [ -z "$SZ_ROOT" ]; then echo /lcrc/soft/climate/sz/2.1.12.5/gcc-11.2.0; else echo "$SZ_ROOT"; fi}
$SHELL{if [ -z "$ZFP_ROOT" ]; then echo /lcrc/soft/climate/zfp/1.0.1/gcc-11.2.0; else echo "$ZFP_ROOT"; fi}
ANL/LCRC Linux Cluster
LINUX
pgigpu
mvapich
e3sm
/lcrc/group/e3sm
.*
/lcrc/group/e3sm/$USER/scratch/blues
/lcrc/group/e3sm/data/inputdata
/lcrc/group/e3sm/data/inputdata/atm/datm7
/lcrc/group/e3sm/$USER/archive/$CASE
/lcrc/group/e3sm/baselines/blues/$COMPILER
/lcrc/group/e3sm/soft/tools/cprnc/cprnc
8
e3sm_integration
4
slurm
E3SM
16
16
TRUE
srun
-l -n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit
--cpu_bind=cores
-c $ENV{OMP_NUM_THREADS}
-m plane=$SHELL{echo 16/$OMP_NUM_THREADS|bc}
/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/sh;export MODULEPATH=$MODULEPATH:/software/centos7/spack-latest/share/spack/lmod/linux-centos7-x86_64/Core:/blues/gpfs/home/software/spack-0.10.1/share/spack/lmod/linux-centos7-x86_64/Core
/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/csh;setenv MODULEPATH $MODULEPATH\:/software/centos7/spack-latest/share/spack/lmod/linux-centos7-x86_64/Core\:/blues/gpfs/home/software/spack-0.10.1/share/spack/lmod/linux-centos7-x86_64/Core
/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/init/env_modules_python.py
export MODULEPATH=$MODULEPATH:/software/centos7/spack-latest/share/spack/lmod/linux-centos7-x86_64/Core:/blues/gpfs/home/software/spack-0.10.1/share/spack/lmod/linux-centos7-x86_64/Core;/home/software/spack-0.10.1/opt/spack/linux-centos7-x86_64/gcc-4.8.5/lmod-7.4.9-ic63herzfgw5u3na5mdtvp3nwxy6oj2z/lmod/lmod/libexec/lmod python
module
module
cmake/3.20.3-vedypwm
nvhpc/20.9-5brtudu
cuda/11.1.0-6dvax5z
netcdf-c/4.7.4-ltqliri
netcdf-cxx/4.2-kf5ox4e
netcdf-fortran/4.5.3-6mgyroo
mvapich2/2.3.4-blues-5fwicb5
parallel-netcdf/1.12.1-nyuvwhn
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
0
$SHELL{dirname $(dirname $(which nc-config))}
$SHELL{dirname $(dirname $(which nf-config))}
$SHELL{dirname $(dirname $(which pnetcdf_version))}
/lcrc/group/e3sm/soft/perl/5.26.0/bin:$ENV{PATH}
0
1
1
2
64M
cores
ANL/LCRC Linux Cluster: 6x 128c EPYC nodes with 8x A100 GPUs
gpulogin.*
LINUX
pgigpu
openmpi
e3sm
/lcrc/group/e3sm
.*
/lcrc/group/e3sm/$USER/scratch/swing
/lcrc/group/e3sm/data/inputdata
/lcrc/group/e3sm/data/inputdata/atm/datm7
/lcrc/group/e3sm/$USER/archive/$CASE
/lcrc/group/e3sm/baselines/swing/$COMPILER
/lcrc/group/e3sm/soft/tools/cprnc/cprnc
8
e3sm_gpu
4
slurm
E3SM
128
16
128
16
TRUE
srun
-l -n {{ total_tasks }} -N {{ num_nodes }} -K
$SHELL{if [ 128 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;}
-c $SHELL{echo 256/ {{ tasks_per_node }} |bc}
-m plane={{ tasks_per_node }}
/gpfs/fs1/soft/swing/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/lmod-8.3-5tuyfdb/lmod/lmod/init/sh
/gpfs/fs1/soft/swing/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/lmod-8.3-5tuyfdb/lmod/lmod/init/csh
/gpfs/fs1/soft/swing/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/lmod-8.3-5tuyfdb/lmod/lmod/init/env_modules_python.py
/gpfs/fs1/soft/swing/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-9.3.0/lmod-8.3-5tuyfdb/lmod/lmod/libexec/lmod python
module
module
cmake/3.21.1-e5i6eks
nvhpc/20.9-37zsymt
cuda/11.1.1-nkh7mm7
openmpi/4.1.1-r6ebr2e
netcdf-c/4.7.4-zppo53l
netcdf-cxx/4.2-wjm7fye
netcdf-fortran/4.5.3-srsajjs
parallel-netcdf/1.12.1-75szceu
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
0
$SHELL{dirname $(dirname $(which nc-config))}
$SHELL{dirname $(dirname $(which nf-config))}
$SHELL{dirname $(dirname $(which pnetcdf_version))}
/lcrc/group/e3sm/soft/perl/5.26.0/bin:$ENV{PATH}
64M
cores
ANL-LCRC, 672-nodes Xeon E5-2695v4 Broadwell 36C/2.10GHz, Intel Omni-Path OPA 100G network, PBS Pro
beboplogin.*
LINUX
gnu
openmpi
e3sm
/lcrc/group/e3sm/$USER/scratch/bebop
/lcrc/group/e3sm/data/inputdata
/lcrc/group/e3sm/data/inputdata/atm/datm7
/lcrc/group/e3sm/$USER/archive/$CASE
/lcrc/group/e3sm/baselines/bebop/$COMPILER
/lcrc/group/e3sm/tools/cprnc/cprnc.bebop
8
e3sm_integration
4
pbspro
E3SM
36
36
TRUE
mpirun
--tag-output -n {{ total_tasks }}
--map-by slot:PE=$ENV{OMP_NUM_THREADS} --bind-to core
/gpfs/fs1/soft/bebop/software/spack-built/linux-rocky8-x86_64/gcc-8.5.0/lmod-8.7.37-rxz3nx5/lmod/lmod/init/sh
/gpfs/fs1/soft/bebop/software/spack-built/linux-rocky8-x86_64/gcc-8.5.0/lmod-8.7.37-rxz3nx5/lmod/lmod/init/csh
/gpfs/fs1/soft/bebop/software/spack-built/linux-rocky8-x86_64/gcc-8.5.0/lmod-8.7.37-rxz3nx5/lmod/lmod/init/env_modules_python.py
/gpfs/fs1/soft/bebop/software/spack-built/linux-rocky8-x86_64/gcc-8.5.0/lmod-8.7.37-rxz3nx5/lmod/lmod/libexec/lmod python
module
module
cmake/3.29.2
gcc/13.2.0-xk53apo
openmpi/5.0.5-ohr7u5x
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0
/lcrc/group/e3sm/soft/bebop/netcdf-c/4.9.3/gcc-13.2.0/openmpi-5.0.5
/lcrc/group/e3sm/soft/bebop/netcdf-fortran/4.6.2/gcc-13.2.0/openmpi-5.0.5
/lcrc/group/e3sm/soft/bebop/pnetcdf/1.14.1/gcc-13.2.0/openmpi-5.0.5
/lcrc/group/e3sm/soft/bebop/pnetcdf/1.14.1/gcc-13.2.0/openmpi-5.0.5/bin:/lcrc/group/e3sm/soft/bebop/netcdf-fortran/4.6.2/gcc-13.2.0/openmpi-5.0.5/bin:/lcrc/group/e3sm/soft/bebop/netcdf-c/4.9.3/gcc-13.2.0/openmpi-5.0.5/bin:/lcrc/group/e3sm/soft/perl/5.26.0/bin:$ENV{PATH}
/lcrc/group/e3sm/soft/bebop/pnetcdf/1.14.1/gcc-13.2.0/openmpi-5.0.5/lib:/lcrc/group/e3sm/soft/bebop/netcdf-fortran/4.6.2/gcc-13.2.0/openmpi-5.0.5/lib:/lcrc/group/e3sm/soft/bebop/netcdf-c/4.9.3/gcc-13.2.0/openmpi-5.0.5/lib:$ENV{LD_LIBRARY_PATH}
$SHELL{if [ -z "$LAPACK_ROOT" ]; then echo /lcrc/group/e3sm/soft/bebop/netlib-lapack/3.12.0/gcc-13.2.0; else echo "$LAPACK_ROOT"; fi}
$SHELL{if [ -z "$BLAS_ROOT" ]; then echo /lcrc/group/e3sm/soft/bebop/netlib-lapack/3.12.0/gcc-13.2.0; else echo "$BLAS_ROOT"; fi}
$SHELL{if [ -z "$MOAB_ROOT" ]; then echo /lcrc/soft/climate/moab/improv/gnu; else echo "$MOAB_ROOT"; fi}
^lockedfile,individual
128M
spread
cores
ANL LCRC cluster 825-node AMD 7713 2-sockets 128-cores per node
ilogin(1|2|3|4).lcrc.anl.gov
LINUX
gnu
openmpi
e3sm
/lcrc/group/e3sm/$USER/scratch/improv
/lcrc/group/e3sm/data/inputdata
/lcrc/group/e3sm/data/inputdata/atm/datm7
/lcrc/group/e3sm/$USER/scratch/improv/archive/$CASE
/lcrc/group/e3sm/baselines/improv/$COMPILER
/lcrc/group/e3sm/tools/cprnc/cprnc.improv
8
e3sm_integration
8
pbspro
E3SM
128
128
FALSE
mpirun
--tag-output -n {{ total_tasks }}
--map-by ppr:1:core:PE=$ENV{OMP_NUM_THREADS} --bind-to core --oversubscribe
/gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/lmod-8.3-5be73rg/lmod/lmod/init/sh
/gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/lmod-8.3-5be73rg/lmod/lmod/init/csh
/gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/lmod-8.3-5be73rg/lmod/lmod/init/env_modules_python.py
/gpfs/fs1/soft/chrysalis/spack/opt/spack/linux-centos8-x86_64/gcc-9.3.0/lmod-8.3-5be73rg/lmod/lmod/libexec/lmod python
module
module
cmake/3.30.5-gcc-12.3.0
gcc/12.3.0
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0
/lcrc/group/e3sm/soft/improv/netcdf-c/4.9.2b/gcc-12.3.0/openmpi-4.1.6
/lcrc/group/e3sm/soft/improv/netcdf-fortran/4.6.1b/gcc-12.3.0/openmpi-4.1.6
/lcrc/group/e3sm/soft/improv/pnetcdf/1.14.1/gcc-12.3.0/openmpi-4.1.6
/lcrc/group/e3sm/soft/improv/pnetcdf/1.14.1/gcc-12.3.0/openmpi-4.1.6/bin:/lcrc/group/e3sm/soft/improv/netcdf-fortran/4.6.1b/gcc-12.3.0/openmpi-4.1.6/bin:/lcrc/group/e3sm/soft/improv/netcdf-c/4.9.2b/gcc-12.3.0/openmpi-4.1.6/bin:/lcrc/group/e3sm/soft/improv/openmpi/4.1.6/gcc-12.3.0/bin:/lcrc/group/e3sm/soft/perl/improv/bin:$ENV{PATH}
$SHELL{lp=/lcrc/group/e3sm/soft/improv/pnetcdf/1.14.1/gcc-12.3.0/openmpi-4.1.6/lib:/lcrc/group/e3sm/soft/improv/netcdf-fortran/4.6.1b/gcc-12.3.0/openmpi-4.1.6/lib:/lcrc/group/e3sm/soft/improv/netcdf-c/4.9.2b/gcc-12.3.0/openmpi-4.1.6/lib:/opt/pbs/lib:/lcrc/group/e3sm/soft/improv/openmpi/4.1.6/gcc-12.3.0/lib; if [ -z "$LD_LIBRARY_PATH" ]; then echo $lp; else echo "$lp:$LD_LIBRARY_PATH"; fi}
$SHELL{if [ -z "$MOAB_ROOT" ]; then echo /lcrc/soft/climate/moab/improv/gnu; else echo "$MOAB_ROOT"; fi}
$SHELL{if [ -z "$LAPACK_ROOT" ]; then echo /lcrc/group/e3sm/soft/improv/netlib-lapack/3.12.0/gcc-12.3.0; else echo "$LAPACK_ROOT"; fi}
$SHELL{if [ -z "$BLAS_ROOT" ]; then echo /lcrc/group/e3sm/soft/improv/netlib-lapack/3.12.0/gcc-12.3.0; else echo "$BLAS_ROOT"; fi}
^lockedfile,individual
128M
cores
LLNL Linux Cluster, 112 pes/node, batch system is Slurm
LINUX
oneapi-ifx
mpich
e3smtest
/p/lustre2/$USER/e3sm_scratch/dane
/p/vast1/e3sm/ccsm3data/inputdata
/p/vast1/e3sm/ccsm3data/inputdata/atm/datm7
/p/lustre2/$USER/archive/$CASE
/p/lustre2/$USER/ccsm_baselines/$COMPILER
/usr/workspace/e3sm/apps/cprnc
8
slurm
boutte3 -at- llnl.gov
112
112
srun
-l -n {{ total_tasks }} -N {{ num_nodes }} --kill-on-bad-exit
--cpu_bind=cores
-c $ENV{OMP_NUM_THREADS}
-m plane={{ tasks_per_node }}
/usr/share/lmod/lmod/init/env_modules_python.py
/usr/share/lmod/lmod/init/perl
/usr/share/lmod/lmod/init/sh
/usr/share/lmod/lmod/init/csh
module
module
/usr/share/lmod/lmod/libexec/lmod python
/usr/share/lmod/lmod/libexec/lmod perl
python/3.9.12
git
subversion
cmake/3.26.3
mkl/2022.1.0
intel/2023.2.1-magic
/usr/workspace/e3sm/spack/dane/intel/modules/Core
intel-oneapi-runtime/2023.2.0
gcc-runtime/12.1.1
glibc/2.28
mvapich2/2.3.7
hdf5/1.14.5
netcdf-c/4.9.2
netcdf-fortran/4.6.1
parallel-netcdf/1.14.0
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
128M
$ENV{HDF5_ROOT}
$ENV{NETCDF_C_PATH}
$ENV{NETCDF_FORTRAN_PATH}
$ENV{PNETCDF_PATH}
/usr/tce/packages/intel/intel-2023.2.1/compiler/2023.2.1/linux/bin/intel64:$ENV{PATH}"
ANL experimental/evaluation cluster, batch system is cobalt
jlse.*
LINUX
oneapi-ifx,oneapi-ifxgpu,gnu
mpich,impi,openmpi
/gpfs/jlse-fs0/projects/climate/$USER/scratch
/gpfs/jlse-fs0/projects/climate/inputdata
/gpfs/jlse-fs0/projects/climate/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/gpfs/jlse-fs0/projects/climate/baselines/$COMPILER
/gpfs/jlse-fs0/projects/climate/tools/cprnc/cprnc
16
e3sm_developer
4
cobalt_theta
e3sm
112
96
96
112
48
48
FALSE
mpirun
-l -n {{ total_tasks }} -bind-to core
mpirun
--tag-output -n {{ total_tasks }}
--map-by ppr:{{ tasks_per_numa }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to hwthread
/usr/share/Modules/init/sh
/usr/share/Modules/init/csh
/usr/share/Modules/init/python.py
module
module
/usr/bin/modulecmd python
/soft/modulefiles
cmake/3.22.1
/soft/restricted/CNDA/modules
oneapi/eng-compiler/2022.10.15.006
cmake
gcc/8.2.0
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
/home/azamat/soft/perl/5.32.0/bin:$ENV{PATH}
/home/azamat/soft/netcdf/4.4.1c-4.2cxx-4.4.4f/oneapi-2020.12.15.004-intel_mpi-2019.4.243
/home/azamat/soft/pnetcdf/1.12.1/oneapi-2020.12.15.004-intel_mpi-2019.4.243
/home/azamat/soft/libs
1
10
omp
spread
unit
icc
icpc
ifort
/home/azamat/soft/openmpi/2.1.6/intel19/bin:$ENV{PATH}
/home/azamat/soft/openmpi/2.1.6/intel19/lib:$ENV{LD_LIBRARY_PATH}
/home/azamat/soft/netcdf/4.4.1c-4.2cxx-4.4.4f/intel19-openmpi2.1.6
/home/azamat/soft/pnetcdf/1.12.1/intel19-openmpi2.1.6
gcc
g++
gfortran
/home/azamat/soft/openmpi/2.1.6/gcc8.2.0/lib:/home/azamat/soft/libs:$ENV{LD_LIBRARY_PATH}
/home/azamat/soft/openmpi/2.1.6/gcc8.2.0/bin:/home/azamat/soft/cmake/3.18.5/bin:$ENV{PATH}
/home/azamat/soft/cmake/3.18.5
/home/azamat/soft/cmake/3.18.5/share/aclocal
/home/azamat/soft/cmake/3.18.5
/home/azamat/soft/netcdf/4.4.1c-4.2cxx-4.4.4f/gcc8.2.0-openmpi2.1.6
/home/azamat/soft/pnetcdf/1.12.1/gcc8.2.0-openmpi2.1.6
opencl
NO_GPU
0
4000MB
0
0
DISABLED
verbose,granularity=thread,balanced
128M
threads
128M
-1
ALCF Polaris 560 nodes, 2.8 GHz AMD EPYC Milan 7543P 32c CPU, 4 NVIDIA A100 GPUs
polaris-*
Linux
gnugpu,gnu,nvidiagpu,nvidia
mpich
E3SMinput
E3SMinput
/grand/E3SMinput/polaris/
.*
/eagle/$PROJECT/$USER/scratch/polaris
/grand/E3SMinput/data
/grand/E3SMinput/data/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/grand/E3SMinput/baselines/polaris/$COMPILER
/grand/E3SMinput/soft/cprnc/cprnc
4
e3sm_developer
4
pbspro
e3sm
32
4
32
32
TRUE
mpiexec
-np {{ total_tasks }} --label
-ppn {{ tasks_per_node }}
--cpu-bind core
-d $SHELL{echo 32/ {{ tasks_per_node }} |bc}
$ENV{GPU_TILE_COMPACT}
/opt/cray/pe/lmod/lmod/init/env_modules_python.py
/opt/cray/pe/lmod/lmod/init/sh
/opt/cray/pe/lmod/lmod/init/csh
/opt/cray/pe/lmod/lmod/libexec/lmod python
module
module
/grand/E3SMinput/soft/modulefiles/polaris
cmake/3.27.9
craype-accel-host
PrgEnv-gnu/8.6.0
gcc-native/13.2
hdf5/1.14.6
netcdf/4.9.3c-4.6.2f
PrgEnv-nvidia/8.6.0
hdf5-nvidia/1.14.6
netcdf-nvidia/4.9.3c-4.6.2f
cuda/12.9
craype-accel-nvidia80
cray-libsci/25.03.0
cray-parallel-netcdf/1.12.3.13
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0
/grand/E3SMinput/soft/perl5/lib/perl5
$ENV{CRAY_PARALLEL_NETCDF_PREFIX}
0
$SHELL{if [ -z "$MOAB_ROOT" ]; then echo /grand/E3SMinput/soft/moab/gnu; else echo "$MOAB_ROOT"; fi}
CC
1
/grand/E3SMinput/soft/qsub/set_affinity_gpu_polaris.sh
128M
spread
cores
ANL Sunspot Test and Development System (TDS), batch system is pbspro
uan-.*
LINUX
oneapi-ifx,oneapi-ifxgpu,gnu
mpich
CSC249ADSE15_CNDA
/gila/CSC249ADSE15_CNDA/performance_archive
.*
/lus/gila/projects/CSC249ADSE15_CNDA/$USER/scratch
/lus/gila/projects/CSC249ADSE15_CNDA/inputdata
/lus/gila/projects/CSC249ADSE15_CNDA/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/lus/gila/projects/CSC249ADSE15_CNDA/baselines/$COMPILER
/lus/gila/projects/CSC249ADSE15_CNDA/tools/cprnc/cprnc
16
e3sm_developer
4
pbspro
e3sm
208
96
104
48
FALSE
mpiexec
-np {{ total_tasks }} --label
-ppn {{ tasks_per_node }}
--cpu-bind depth -envall
-d $ENV{OMP_NUM_THREADS}
$ENV{GPU_TILE_COMPACT}
/soft/packaging/lmod/lmod/init/sh
/soft/packaging/lmod/lmod/init/csh
/soft/packaging/lmod/lmod/init/env_modules_python.py
module
module
/soft/packaging/lmod/lmod/libexec/lmod python
cmake
oneapi/eng-compiler/2024.07.30.002
kokkos/4.4.01-omp-sycl
spack cmake
gcc/10.3.0
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
/lus/gila/projects/CSC249ADSE15_CNDA/soft/netcdf/4.9.2c-4.6.1f/oneapi.eng.2024.07.30.002
/lus/gila/projects/CSC249ADSE15_CNDA/soft/pnetcdf/1.14.0/oneapi.eng.2024.07.30.002
/lus/gila/projects/CSC249ADSE15_CNDA/soft/pnetcdf/1.14.0/oneapi.eng.2024.07.30.002/lib:/lus/gila/projects/CSC249ADSE15_CNDA/soft/netcdf/4.9.2c-4.6.1f/oneapi.eng.2024.07.30.002/lib:$ENV{LD_LIBRARY_PATH}
/lus/gila/projects/CSC249ADSE15_CNDA/soft/pnetcdf/1.14.0/oneapi.eng.2024.07.30.002/bin:/lus/gila/projects/CSC249ADSE15_CNDA/soft/netcdf/4.9.2c-4.6.1f/oneapi.eng.2024.07.30.002/bin:$ENV{PATH}
20
1
NO_GPU
0
disable
disable
1
4000MB
0
/soft/tools/mpi_wrapper_utils/gpu_tile_compact.sh
$ENV{KOKKOS_ROOT}
1
0:4,1:4,2:4,3:4:4:4,5:4,6:4,7:4,8:4,9:4,10:4,11:4
0
DISABLED
0
verbose,granularity=thread,balanced
128M
threads
128M
-1
ALCF Aurora, 10624 nodes, 2x52c SPR, 6x2s PVC, 2x512GB DDR5, 2x64GB CPU-HBM, 6x128GB GPU-HBM, Slingshot 11, PBSPro
aurora-uan-.*
LINUX
oneapi-ifxgpu,oneapi-ifx
mpich,mpich1024
E3SM_Dec
/lus/flare/projects/E3SMinput/baselines
.*
/lus/flare/projects/$PROJECT/$USER/scratch
/lus/flare/projects/E3SMinput/data
/lus/flare/projects/E3SMinput/data/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/lus/flare/projects/E3SMinput/baselines/$COMPILER
/lus/flare/projects/E3SMinput/tools/cprnc/cprnc
16
e3sm_integration
4
pbspro
e3sm
102
96
102
12
TRUE
mpiexec
--label -np {{ total_tasks }}
-ppn {{ tasks_per_node }}
-d $ENV{OMP_NUM_THREADS}
--cpu-bind $ENV{CPU_BIND}
--gpu-bind $ENV{GPU_BIND}
--mem-bind $ENV{MEM_BIND}
$ENV{RLIMITS}
/usr/share/lmod/lmod/init/sh
/usr/share/lmod/lmod/init/csh
/usr/share/lmod/lmod/init/env_modules_python.py
module
module
/usr/share/lmod/lmod/libexec/lmod python
/lus/flare/projects/E3SMinput/soft/modulefiles
cmake/3.31.11
oneapi/release/2025.3.1
netcdf-c/4.9.3
netcdf-fortran/4.6.2
mpich-config/collective-tuning/1024
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0
$ENV{NETCDF_C_ROOT}
$ENV{NETCDF_FORTRAN_ROOT}
$ENV{PARALLEL_NETCDF_ROOT}
20
hybrid
disabled
cxi
warn
240
240
1
1
1
1
1
0x001
1
disable
disable
4000MB
0
list:1-8:9-16:17-24:25-32:33-40:41-48:53-60:61-68:69-76:77-84:85-92:93-100
list:0.0:0.1:1.0:1.1:2.0:2.1:3.0:3.1:4.0:4.1:5.0:5.1
list:0:0:0:0:0:0:1:1:1:1:1:1
1
0
DISABLED
0
core
none
local
granularity=core,balanced
128M
--rlimits CORE
-1
-1
ALCF Cray 256 nodes, AMD EPYC Rome 7742 2-socket 64c CPU, 256GB DDR4
crux-*
Linux
gnu
mpich
E3SMinput
E3SMinput
/grand/E3SMinput/crux/
.*
/eagle/$PROJECT/$USER/scratch/crux
/grand/E3SMinput/data
/grand/E3SMinput/data/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/grand/E3SMinput/baselines/crux/$COMPILER
/grand/E3SMinput/soft/crux/cprnc/cprnc
4
e3sm_prod
4
pbspro
e3sm
128
128
TRUE
mpiexec
-np {{ total_tasks }} --label
-ppn {{ tasks_per_node }}
--cpu-bind core
-d $SHELL{echo 128/ {{ tasks_per_node }} |bc}
/opt/cray/pe/lmod/lmod/init/env_modules_python.py
/opt/cray/pe/lmod/lmod/init/sh
/opt/cray/pe/lmod/lmod/init/csh
/opt/cray/pe/lmod/lmod/libexec/lmod python
module
module
/grand/E3SMinput/soft/modulefiles/crux
cmake/3.27.9
craype-accel-host
PrgEnv-gnu/8.6.0
cray-libsci/25.09.0
cray-hdf5-parallel/1.12.2.11
cray-netcdf-hdf5parallel/4.9.0.11
cray-parallel-netcdf/1.12.3.11
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0
/grand/E3SMinput/soft/perl5/lib/perl5
$ENV{CRAY_NETCDF_HDF5PARALLEL_PREFIX}
$ENV{CRAY_PARALLEL_NETCDF_PREFIX}
$ENV{CRAY_LD_LIBRARY_PATH}:$ENV{LD_LIBRARY_PATH}
hybrid
240
240
128M
spread
cores
PNL cluster, OS is Linux, batch system is SLURM
sooty
LINUX
intel,pgi
mvapich2
/lustre/$USER/cime_output_root
/pic/projects/sooty2/$ENV{USER}/e3sm_inputdata
/pic/projects/sooty2/$ENV{USER}/e3sm_inputdata/atm/datm7
/lustre/$USER/archive/$CASE
/lustre/climate/acme_baselines/$COMPILER
/lustre/climate/acme_baselines/cprnc/cprnc
8
slurm
balwinder.singh -at- pnnl.gov
8
8
FALSE
srun
--mpi=none
--ntasks={{ total_tasks }}
--cpu_bind=sockets --cpu_bind=verbose
--kill-on-bad-exit
/share/apps/modules/Modules/3.2.10/init/perl.pm
/share/apps/modules/Modules/3.2.10/init/python.py
/etc/profile.d/modules.csh
/etc/profile.d/modules.sh
/share/apps/modules/Modules/3.2.10/bin/modulecmd perl
/share/apps/modules/Modules/3.2.10/bin/modulecmd python
module
module
perl/5.20.0
cmake/3.17.1
svn/1.8.13
intel/19.0.5
mkl/2019u5
pgi/14.10
mvapich2/2.3.1
netcdf/4.6.3
/lustre/$USER/csmruns/$CASE/run
/lustre/$USER/csmruns/$CASE/bld
$ENV{MKLROOT}
$ENV{NETCDF_LIB}/../
64M
/share/apps/python/3.7.2/lib/:/share/apps/openssl/1.0.2r/lib:$ENV{LD_LIBRARY_PATH}:/share/apps/gcc/8.1.0/lib:/share/apps/gcc/8.1.0/lib64:
/share/apps/python/3.7.2/bin:$ENV{PATH}
PNNL Intel KNC cluster, OS is Linux, batch system is SLURM
glogin
LINUX
intel
impi,mvapich2
/dtemp/$PROJECT/$USER
/dtemp/st49401/sing201/acme/inputdata/
/dtemp/st49401/sing201/acme/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
$CIME_OUTPUT_ROOT/acme/acme_baselines
$CIME_OUTPUT_ROOT/acme/acme_baselines/cprnc/cprnc
8
slurm
balwinder.singh -at- pnnl.gov
16
16
TRUE
mpirun
-np {{ total_tasks }}
srun
--mpi=none
--ntasks={{ total_tasks }}
--cpu_bind=sockets --cpu_bind=verbose
--kill-on-bad-exit
/opt/lmod/7.8.4/init/env_modules_python.py
/etc/profile.d/modules.csh
/etc/profile.d/modules.sh
/opt/lmod/7.8.4/libexec/lmod python
module
module
python/2.7.9
intel/ips_18
mkl/14.0
impi/4.1.2.040
mvapich2/1.9
netcdf/4.3.0
$CIME_OUTPUT_ROOT/csmruns/$CASE/run
$CIME_OUTPUT_ROOT/csmruns/$CASE/bld
64M
$ENV{NETCDF_ROOT}
$ENV{MLIBHOME}
intel
PNL Haswell cluster, OS is Linux, batch system is SLURM
constance
LINUX
intel,pgi,nag
mvapich2,openmpi,intelmpi,mvapich
/pic/scratch/$USER
/pic/projects/climate/csmdata/
/pic/projects/climate/csmdata/atm/datm7
/pic/scratch/$USER/archive/$CASE
/pic/projects/climate/acme_baselines/$COMPILER
/pic/projects/climate/acme_baselines/cprnc/cprnc
8
slurm
balwinder.singh -at- pnnl.gov
24
24
FALSE
srun
--mpi=none
--ntasks={{ total_tasks }}
--cpu_bind=sockets --cpu_bind=verbose
--kill-on-bad-exit
srun
--ntasks={{ total_tasks }}
--cpu_bind=sockets --cpu_bind=verbose
--kill-on-bad-exit
mpirun
-n {{ total_tasks }}
mpirun
-n {{ total_tasks }}
/share/apps/modules/Modules/3.2.10/init/perl.pm
/share/apps/modules/Modules/3.2.10/init/python.py
/etc/profile.d/modules.csh
/etc/profile.d/modules.sh
/share/apps/modules/Modules/3.2.10/bin/modulecmd perl
/share/apps/modules/Modules/3.2.10/bin/modulecmd python
module
module
perl/5.20.0
cmake/3.17.1
gcc/8.1.0
intel/19.0.5
mkl/2019u5
pgi/14.10
nag/6.0
mkl/15.0.1
mvapich2/2.1
mvapich2/2.3.1
mvapich2/2.1
mvapich2/2.3b
intelmpi/5.0.1.035
openmpi/1.8.3
netcdf/4.6.3
netcdf/4.3.2
netcdf/4.4.1.1
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
/share/apps/gcc/8.1.0/lib:/share/apps/gcc/8.1.0/lib64:$ENV{LD_LIBRARY_PATH}
64M
$ENV{NETCDF_LIB}/../
$ENV{MKLROOT}
$ENV{MKLROOT}
PNL E3SM Intel Xeon Gold 6148(Skylake) nodes, OS is Linux, SLURM
compy
LINUX
intel,pgi
impi,mvapich2
/compyfs
.*
/compyfs/$USER/e3sm_scratch
/compyfs/inputdata
/compyfs/inputdata/atm/datm7
/compyfs/$USER/e3sm_scratch/archive/$CASE
/compyfs/e3sm_baselines/$COMPILER
/compyfs/e3sm_baselines/cprnc/cprnc.intel.v20.0.04/cprnc
8
e3sm_integration
4
slurm
bibi.mathew -at- pnnl.gov
40
40
TRUE
srun
--mpi=none
--ntasks={{ total_tasks }} --nodes={{ num_nodes }}
--kill-on-bad-exit
-l --cpu_bind=cores -c $ENV{OMP_NUM_THREADS} -m plane=$SHELL{echo 40/$OMP_NUM_THREADS|bc}
srun
--mpi=pmi2
--ntasks={{ total_tasks }} --nodes={{ num_nodes }}
--kill-on-bad-exit
-l --cpu_bind=cores -c $ENV{OMP_NUM_THREADS} -m plane=$SHELL{echo 40/$OMP_NUM_THREADS|bc}
/share/apps/modules/init/perl.pm
/share/apps/modules/init/python.py
/etc/profile.d/modules.csh
/etc/profile.d/modules.sh
/share/apps/modules/bin/modulecmd perl
/share/apps/modules/bin/modulecmd python
module
module
cmake/3.19.6
gcc/8.1.0
intel/20.0.0
pgi/19.10
mvapich2/2.3.1
intelmpi/2020
intelmpi/2019u3
python/3.11.5
netcdf/4.6.3
pnetcdf/1.9.0
mkl/2020
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.05
0
$ENV{NETCDF_ROOT}/
$ENV{PNETCDF_ROOT}/
$ENV{MKLROOT}
/share/apps/gcc/8.1.0/lib:/share/apps/gcc/8.1.0/lib64:$ENV{LD_LIBRARY_PATH}
0
1
1
10
64M
cores
EMSL, OS is Linux, SLURM
tlogin
LINUX
intel
impi,mvapich2
/tahoma/emsls60153
.*
/tahoma/emsls60153/$USER/e3sm_scratch
/tahoma/emsls60153/inputdata
/tahoma/emsls60153/inputdata/atm/datm7
/tahoma/emsls60153/$USER/e3sm_scratch/archive/$CASE
/tahoma/emsls60153/e3sm_baselines/$COMPILER
/tahoma/emsls60153/e3sm_baselines/cprnc/cprnc
8
e3sm_integration
4
slurm
balwinder.singh -at- pnnl.gov
36
36
TRUE
srun
--mpi=none
--ntasks={{ total_tasks }} --nodes={{ num_nodes }}
--kill-on-bad-exit
-l --cpu_bind=cores -c $ENV{OMP_NUM_THREADS} -m plane=$SHELL{echo 40/$OMP_NUM_THREADS|bc}
srun
--mpi=pmi2
--ntasks={{ total_tasks }} --nodes={{ num_nodes }}
--kill-on-bad-exit
-l --cpu_bind=cores -c $ENV{OMP_NUM_THREADS} -m plane=$SHELL{echo 40/$OMP_NUM_THREADS|bc}
/opt/lmod/7.8.4/init/env_modules_python.py
/etc/profile.d/modules.csh
/etc/profile.d/modules.sh
/opt/lmod/7.8.4/libexec/lmod python
module
module
cmake/3.19.5-intel
gcc/10.2.0
intel/ips_20_u2
mvapich2~cuda/2.3.5-intel
impi/ips_20_u2
netcdf-c/4.7.4-intel-intel-mpi-2019.10.317
netcdf-fortran/4.5.3-intel-intel-mpi-2019.10.317
mkl/scalapack
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.05
0
$SHELL{dirname $(dirname $(which h5diff))}
$SHELL{dirname $(dirname $(which nc-config))}
$SHELL{dirname $(dirname $(which nf-config))}
$ENV{MKLROOT}
0
1
1
$SHELL{which libpmi2.so}
10
64M
cores
ORNL XK6, os is Linux, 32 pes/node, batch system is PBS
oic5
LINUX
gnu
mpich,openmpi
/home/$USER/models/ACME
/home/zdr/models/ccsm_inputdata
/home/zdr/models/ccsm_inputdata/atm/datm7
/home/$USER/models/ACME/run/archive/$CASE
32
e3sm_developer
pbs
dmricciuto
32
32
/projects/cesm/devtools/mpich-3.0.4-gcc4.8.1/bin/mpirun
-np {{ total_tasks }}
--hostfile $ENV{PBS_NODEFILE}
/home/$USER/models/ACME/run/$CASE/run
/home/$USER/models/ACME/run/$CASE/bld
OR-CONDO, CADES-CCSI, os is Linux, 16 pes/nodes, batch system is PBS
or-condo
LINUX
gnu,intel
openmpi
/lustre/or-hydra/cades-ccsi/scratch/$USER
/lustre/or-hydra/cades-ccsi/proj-shared/project_acme/ACME_inputdata
/lustre/or-hydra/cades-ccsi/proj-shared/project_acme/ACME_inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/lustre/or-hydra/cades-ccsi/proj-shared/project_acme/baselines/$COMPILER
/lustre/or-hydra/cades-ccsi/proj-shared/tools/cprnc.orcondo
4
e3sm_developer
slurm
yinj -at- ornl.gov
32
32
FALSE
mpirun
-np {{ total_tasks }}
/usr/share/Modules/init/sh
/usr/share/Modules/init/csh
/usr/share/Modules/init/perl.pm
/usr/share/Modules/init/python.py
module
module
/usr/bin/modulecmd perl
/usr/bin/modulecmd python
PE-gnu
mkl/2017
cmake/3.12.0
python/2.7.12
nco/4.6.9
hdf5-parallel/1.8.17
netcdf-hdf5parallel/4.3.3.1
pnetcdf/1.9.0
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
/software/user_tools/current/cades-ccsi/petsc4pf/openmpi-1.10-gcc-5.3
/software/user_tools/current/cades-ccsi/perl5/lib/perl5/
/software/dev_tools/swtree/cs400_centos7.2_pe2016-08/netcdf-hdf5parallel/4.3.3.1/centos7.2_gnu5.3.0
/software/dev_tools/swtree/cs400_centos7.2_pe2016-08/pnetcdf/1.9.0/centos7.2_gnu5.3.0
/software/tools/compilers/intel_2017/mkl/lib/intel64
OCSS, ornl-css, CADES-baseline, os is Linux, 128 pes/node, batch system is slurm
.*baseline.*
LINUX
gnu
openmpi,openmpi-amanzitpls,mpi-serial
/gpfs/wolf2/cades/cli185/scratch/$USER
/gpfs/wolf2/cades/cli185/world-shared/e3sm/inputdata
/gpfs/wolf2/cades/cli185/world-shared/e3sm/inputdata/atm/datm7
/gpfs/wolf2/cades/cli185/world-shared/e3sm/archive/$USER/$CASE
/gpfs/wolf2/cades/cli185/world-shared/e3sm/baselines/$COMPILER
/gpfs/wolf2/cades/cli185/world-shared/e3sm/tools/cprnc.baseline
64
e3sm_developer
128
slurm
yuanf -at- ornl.gov
128
128
FALSE
srun
-n {{ total_tasks }}
srun
-n {{ total_tasks }}
/sw/baseline/nsp/lmod/8.7.37/init/sh
/sw/baseline/nsp/lmod/8.7.37/init/csh
/sw/baseline/nsp/lmod/8.7.37/init/env_modules_python.py
/sw/baseline/nsp/lmod/8.7.37/init/perl
module
/sw/baseline/nsp/lmod/8.7.37/libexec/lmod python
module
module
miniforge3/24.11.3-0
cmake/3.30.5
DefApps
gcc/12.4.0
openmpi/5.0.5
netlib-lapack/3.11.0
hdf5/1.14.5-mpi
netcdf-c/4.9.2-mpi-h5f
netcdf-cxx/4.2-mpi
netcdf-fortran/4.6.1-mpi-h5f
parallel-netcdf/1.12.3-mpi
DefApps
gcc/12.4.0
openmpi/5.0.5
openblas/0.3.28
-a /ccsopen/proj/cli185/ats-dev/modulefiles
ats/tpls-0.98.12+master/cades-baseline/openblas-0.3.28-openmpi-5.0.5-gcc-12.4.0/opt
DefApps
gcc/12.4.0
openmpi
netlib-lapack/3.11.0
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
/ccsopen/proj/cli185/petsc-3.x-noopt/
/ccsopen/home/zdr/opt/perl/bin:$ENV{PATH}/
$ENV{OLCF_HDF5_ROOT}
$ENV{OLCF_NETCDF_C_ROOT}
$ENV{OLCF_NETCDF_FORTRAN_ROOT}
$ENV{OLCF_PARALLEL_NETCDF_ROOT}
$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib64
/ccsopen/proj/cli185/ats-dev/amanzi-tpls/install/tpls-0.98.12/cades-baseline/openblas-0.3.28-openmpi-5.0.5-gcc-12.4.0/opt/petsc-3.20/
/ccsopen/home/zdr/opt/perl/bin:$ENV{PATH}/
/ccsopen/proj/cli185/ats-dev/amanzi-tpls/install/tpls-0.98.12/cades-baseline/openblas-0.3.28-openmpi-5.0.5-gcc-12.4.0/opt
/ccsopen/proj/cli185/ats-dev/amanzi-tpls/install/tpls-0.98.12/cades-baseline/openblas-0.3.28-openmpi-5.0.5-gcc-12.4.0/opt
/ccsopen/proj/cli185/ats-dev/amanzi-tpls/install/tpls-0.98.12/cades-baseline/openblas-0.3.28-openmpi-5.0.5-gcc-12.4.0/opt
/ccsopen/proj/cli185/netcdf-hdf5_mpi-serial/bin:/ccsopen/home/zdr/opt/perl/bin:$ENV{PATH}/
/ccsopen/proj/cli185/netcdf-hdf5_mpi-serial/
/ccsopen/proj/cli185/netcdf-hdf5_mpi-serial/
/ccsopen/proj/cli185/netcdf-hdf5_mpi-serial/
$ENV{OLCF_NETLIB_LAPACK_ROOT}/lib64
Chicoma CPU-only nodes at LANL IC. Each node has 2 AMD EPYC 7H12 64-Core (Milan) 512GB
ch-fe*
Linux
gnu,intel,nvidia,amdclang
mpich
/lustre/scratch5/$ENV{USER}/E3SM/scratch/chicoma-cpu
/lustre/scratch5/$ENV{USER}/inputdata
/lustre/scratch5/$ENV{USER}/inputdata/atm/datm7
/lustre/scratch5/$ENV{USER}/E3SM/archive/$CASE
/lustre/scratch5/$ENV{USER}/E3SM/input_data/ccsm_baselines/$COMPILER
/usr/projects/e3sm/software/chicoma-cpu/cprnc
10
e3sm_developer
4
slurm
e3sm
256
128
TRUE
srun
--label
-n {{ total_tasks }} -N {{ num_nodes }}
-c $SHELL{echo 256/`./xmlquery --value MAX_MPITASKS_PER_NODE`|bc}
$SHELL{if [ 128 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;}
-m plane=$SHELL{echo `./xmlquery --value MAX_MPITASKS_PER_NODE`}
/usr/share/lmod/lmod/init/perl
/usr/share/lmod/lmod/init/python
/usr/share/lmod/lmod/init/sh
/usr/share/lmod/lmod/init/csh
/usr/share/lmod/lmod/libexec/lmod perl
/usr/share/lmod/lmod/libexec/lmod python
module
module
cray-hdf5-parallel
cray-netcdf-hdf5parallel
cray-parallel-netcdf
cray-netcdf
cray-hdf5
gcc
gcc-native
intel
intel-oneapi
nvidia
aocc
cudatoolkit
climate-utils
cray-libsci
craype-accel-nvidia80
craype-accel-host
perftools-base
perftools
darshan
PrgEnv-gnu
PrgEnv-intel
PrgEnv-nvidia
PrgEnv-cray
PrgEnv-aocc
PrgEnv-gnu/8.5.0
gcc-native/12.3
cray-libsci/23.12.5
PrgEnv-nvidia/8.5.0
nvidia/24.7
cray-libsci/23.12.5
PrgEnv-intel/8.5.0
intel/2023.2.0
intel-mkl/2023.2.0
PrgEnv-aocc/8.4.0
aocc/3.2.0
cray-libsci/23.05.1.4
craype-accel-host
craype/2.7.30
cray-mpich/8.1.28
cray-hdf5-parallel/1.12.2.9
cray-netcdf-hdf5parallel/4.9.0.9
cray-parallel-netcdf/1.12.3.9
cmake/3.29.6
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
1
1
128M
spread
threads
FALSE
/usr/projects/climate/SHARED_CLIMATE/software/chicoma-cpu/perl5-only-switch/lib/perl5
romio_ds_write=disable;romio_ds_read=disable;romio_cb_write=enable;romio_cb_read=enable
software
MPI_Bcast
$ENV{CRAY_NETCDF_HDF5PARALLEL_PREFIX}
$ENV{CRAY_PARALLEL_NETCDF_PREFIX}
/usr/lib64/gcc/x86_64-suse-linux/12:$ENV{LD_LIBRARY_PATH}
-1
Chicoma GPU nodes at LANL IC. Each GPU node has single AMD EPYC 7713 64-Core (Milan) (256GB) and 4 nvidia A100'
ch-fe*
Linux
gnugpu,gnu,nvidiagpu,nvidia
mpich
/lustre/scratch5/$ENV{USER}/E3SM/scratch/chicoma-gpu
/usr/projects/e3sm/inputdata
/usr/projects/e3sm/inputdata/atm/datm7
/lustre/scratch5/$ENV{USER}/E3SM/archive/$CASE
/lustre/scratch5/$ENV{USER}/E3SM/input_data/ccsm_baselines/$COMPILER
/usr/projects/e3sm/software/chicoma-cpu/cprnc
10
e3sm_developer
4
slurm
e3sm
128
256
256
4
64
64
TRUE
srun
--label
-n {{ total_tasks }} -N {{ num_nodes }}
-c $ENV{OMP_NUM_THREADS}
$SHELL{if [ 128 -ge `./xmlquery --value MAX_MPITASKS_PER_NODE` ]; then echo "--cpu_bind=cores"; else echo "--cpu_bind=threads";fi;}
-m plane=$SHELL{echo `./xmlquery --value MAX_MPITASKS_PER_NODE`}
/usr/share/lmod/lmod/init/perl
/usr/share/lmod/lmod/init/python
/usr/share/lmod/lmod/init/sh
/usr/share/lmod/lmod/init/csh
/usr/share/lmod/lmod/libexec/lmod perl
/usr/share/lmod/lmod/libexec/lmod python
module
module
cray-hdf5-parallel
cray-netcdf-hdf5parallel
cray-parallel-netcdf
cray-netcdf
cray-hdf5
intel
intel-oneapi
nvidia
aocc
cudatoolkit
climate-utils
cray-libsci
craype-accel-nvidia80
craype-accel-host
perftools-base
perftools
darshan
PrgEnv-gnu
PrgEnv-intel
PrgEnv-nvidia
PrgEnv-cray
PrgEnv-aocc
PrgEnv-gnu/8.5.0
gcc/12.2.0
cray-libsci/23.05.1.4
PrgEnv-nvidia/8.4.0
nvidia/22.7
cray-libsci/23.05.1.4
cudatoolkit/22.7_11.7
craype-accel-nvidia80
cudatoolkit/22.7_11.7
craype-accel-nvidia80
gcc-mixed/11.2.0
craype-accel-host
craype-accel-host
craype-accel-host
craype/2.7.21
cray-mpich/8.1.26
cray-hdf5-parallel/1.12.2.3
cray-netcdf-hdf5parallel/4.9.0.3
cray-parallel-netcdf/1.12.3.3
cmake/3.27.7
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
1
1
128M
spread
threads
FALSE
/usr/projects/climate/SHARED_CLIMATE/software/chicoma-cpu/perl5-only-switch/lib/perl5
romio_ds_write=disable;romio_ds_read=disable;romio_cb_write=enable;romio_cb_read=enable
software
MPI_Bcast
$ENV{CRAY_NETCDF_HDF5PARALLEL_PREFIX}
$ENV{CRAY_PARALLEL_NETCDF_PREFIX}
/usr/projects/e3sm/cudatoolkit:$ENV{PKG_CONFIG_PATH}
/opt/cray/pe/gcc/12.2.0/snos/lib64:$ENV{LD_LIBRARY_PATH}
-1
Mesabi batch queue
LINUX
intel
openmpi
/home/reichpb/scratch
/home/reichpb/shared/cesm_inputdata
/home/reichpb/shared/cesm_inputdata/atm/datm7
USERDEFINED_optional_run
USERDEFINED_optional_run/$COMPILER
USERDEFINED_optional_test
2
pbs
chen1718 at umn dot edu
24
24
TRUE
aprun
-n {{ total_tasks }}
-S {{ tasks_per_numa }}
-N $MAX_MPITASKS_PER_NODE
-d $ENV{OMP_NUM_THREADS}
$CASEROOT/run
$CASEROOT/exedir
Itasca batch queue
LINUX
intel
openmpi
/home/reichpb/scratch
/home/reichpb/shared/cesm_inputdata
/home/reichpb/shared/cesm_inputdata/atm/datm7
USERDEFINED_optional_run
USERDEFINED_optional_run/$COMPILER
USERDEFINED_optional_test
2
pbs
chen1718 at umn dot edu
8
8
aprun
-n {{ total_tasks }}
-S {{ tasks_per_numa }}
-N $MAX_MPITASKS_PER_NODE
-d $ENV{OMP_NUM_THREADS}
$CASEROOT/run
$CASEROOT/exedir
/soft/netcdf/fortran-4.4-intel-sp1-update3-parallel/lib
Lawrencium LR3 cluster at LBL, OS is Linux (intel), batch system is SLURM
n000*
LINUX
intel,gnu
openmpi
ac_acme
/global/scratch/$ENV{USER}
/global/scratch/$ENV{USER}/cesm_input_datasets/
/global/scratch/$ENV{USER}/cesm_input_datasets/atm/datm7
$CIME_OUTPUT_ROOT/cesm_archive/$CASE
$CIME_OUTPUT_ROOT/cesm_baselines/$COMPILER
/$CIME_OUTPUT_ROOT/cesm_tools/cprnc/cprnc
4
slurm
rgknox and glemieux at lbl dot gov
8
8
TRUE
mpirun
-np {{ total_tasks }}
-npernode $MAX_MPITASKS_PER_NODE
mpirun
-np {{ total_tasks }}
-npernode $MAX_MPITASKS_PER_NODE
/etc/profile.d/modules.sh
/etc/profile.d/modules.csh
/usr/Modules/init/perl.pm
/usr/Modules/python.py
module
module
/usr/Modules/bin/modulecmd perl
/usr/Modules/bin/modulecmd python
cmake/3.15.0
perl
xml-libxml/2.0116
python/3.6
intel/2016.4.072
mkl
netcdf/4.4.1.1-intel-s
openmpi
netcdf/4.4.1.1-intel-p
gcc/6.3.0
lapack/3.8.0-gcc
netcdf/5.4.1.1-gcc-s
openmpi/2.0.2-gcc
openmpi/3.0.1-gcc
netcdf/4.4.1.1-gcc-p
openmpi/2.0.2-gcc
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
$ENV{NETCDF_DIR}
$ENV{PNETCDF_DIR}
/global/software/sl-6.x86_64/modules/intel/2016.1.150/lapack/3.6.0-intel
Lawrencium LR6 cluster at LBL, OS is Linux (intel), batch system is SLURM
n000*
LINUX
intel,gnu
openmpi
ac_acme
/global/scratch/$ENV{USER}
/global/scratch/$ENV{USER}/cesm_input_datasets/
/global/scratch/$ENV{USER}/cesm_input_datasets/atm/datm7
$CIME_OUTPUT_ROOT/cesm_archive/$CASE
$CIME_OUTPUT_ROOT/cesm_baselines/$COMPILER
/$CIME_OUTPUT_ROOT/cesm_tools/cprnc/cprnc
4
slurm
rgknox and glemieux at lbl dot gov
32
32
TRUE
mpirun
-np {{ total_tasks }}
mpirun
-np {{ total_tasks }}
/etc/profile.d/modules.sh
/etc/profile.d/modules.csh
/usr/Modules/init/perl.pm
/usr/Modules/python.py
module
module
/usr/Modules/bin/modulecmd perl
/usr/Modules/bin/modulecmd python
cmake/3.15.0
perl
xml-libxml/2.0116
python/3.6
intel/2016.4.072
mkl
netcdf/4.4.1.1-intel-s
openmpi
netcdf/4.4.1.1-intel-p
gcc/6.3.0
lapack/3.8.0-gcc
netcdf/5.4.1.1-gcc-s
openmpi/2.0.2-gcc
openmpi/3.0.1-gcc
netcdf/4.4.1.1-gcc-p
openmpi/2.0.2-gcc
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
$ENV{NETCDF_DIR}
$ENV{PNETCDF_DIR}
/global/software/sl-6.x86_64/modules/intel/2016.1.150/lapack/3.6.0-intel
FATES development machine at LBNL, System76 Thelio Massive Workstation Pop!_OS 20.04
lobata
LINUX
gnu
openmpi
$ENV{HOME}/scratch/
/data/cesmdataroot/inputdata
/data/cesmdataroot/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
$ENV{HOME}/scratch/ctsm-baselines
/home/glemieux/Repos/cime/tools/cprnc/cprnc
make
16
none
glemieux at lbl dot gov
4
4
FALSE
mpirun
-np {{ total_tasks }}
--map-by ppr:{{ tasks_per_node }}:socket:PE=$ENV{OMP_NUM_THREADS} --bind-to hwthread
/usr/share/modules/init/python.py
/usr/share/modules/init/perl.pm
/usr/share/modules/init/sh
/usr/share/modules/init/csh
/usr/bin/modulecmd python
/usr/bin/modulecmd perl
module
module
hdf5
netcdf-c
netcdf-fortran
esmf
small developer workhorse at lbl climate sciences
LINUX
gnu
openmpi
ngeet
/raid1/lbleco/e3sm/
/home/rgknox/Models/InputDatasets/cesm_input_data/
/home/rgknox/Models/InputDatasets/cesm_input_data/atm/datm7/
/home/rgknox/Models//cesm_archive/$CASE
/home/rgknox/Models//cesm_baselines/$COMPILER
/raid1/lbleco/cesm/cesm_tools/cprnc/cprnc
1
none
rgknox at lbl gov
16
16
FALSE
mpirun
-np {{ total_tasks }}
-npernode $MAX_MPITASKS_PER_NODE
/usr/local/share/cmake-3.21/
$ENV{NETCDF_HOME}
ORNL Summit. Node: 2x POWER9 + 6x Volta V100, 22 cores/socket, 4 HW threads/core.
.*summit.*
LINUX
ibm,ibmgpu,pgi,pgigpu,gnu,gnugpu
spectrum-mpi
cli115
cli115
/gpfs/alpine/proj-shared/cli115
.*
/gpfs/alpine2/$PROJECT/proj-shared/$ENV{USER}/e3sm_scratch
/gpfs/alpine2/atm146/world-shared/e3sm/inputdata
/gpfs/alpine2/atm146/world-shared/e3sm/inputdata/atm/datm7
/gpfs/alpine/$PROJECT/proj-shared/$ENV{USER}/archive/$CASE
/gpfs/alpine2/atm146/world-shared/e3sm/baselines/$COMPILER
/gpfs/alpine2/atm146/world-shared/e3sm/tools/cprnc.summit/cprnc
8
e3sm_developer
4
lsf
e3sm
84
18
42
42
84
18
42
42
TRUE
jsrun
-X 1
$SHELL{if [ {{ total_tasks }} -eq 1 ];then echo --nrs 1 --rs_per_host 1;else echo --nrs $NUM_RS --rs_per_host $RS_PER_NODE;fi}
--tasks_per_rs $SHELL{echo "({{ tasks_per_node }} + $RS_PER_NODE - 1)/$RS_PER_NODE"|bc}
-d plane:$SHELL{echo "({{ tasks_per_node }} + $RS_PER_NODE - 1)/$RS_PER_NODE"|bc}
--cpu_per_rs $ENV{CPU_PER_RS}
--gpu_per_rs $ENV{GPU_PER_RS}
--bind packed:smt:$ENV{OMP_NUM_THREADS}
--latency_priority $ENV{LTC_PRT}
--stdio_mode prepended
$ENV{JSRUN_THREAD_VARS}
$ENV{SMPIARGS}
/sw/summit/lmod/8.4/init/sh
/sw/summit/lmod/8.4/init/csh
/sw/summit/lmod/8.4/init/env_modules_python.py
/sw/summit/lmod/8.4/init/perl
module
/sw/summit/lmod/8.4/libexec/lmod python
module
module
DefApps-2023
python/3.7-anaconda3
subversion/1.14.0
git/2.31.1
cmake/3.20.2
essl/6.3.0
netlib-lapack/3.8.0
nvhpc/21.11
xl/16.1.1-10
gcc/9.1.0
cuda/10.1.243
cuda/11.0.3
cuda/10.1.243
spectrum-mpi/10.4.0.3-20210112
hdf5/1.10.7
netcdf-c/4.8.0
netcdf-fortran/4.4.5
parallel-netcdf/1.12.2
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
$ENV{OLCF_NETCDF_C_ROOT}
$ENV{OLCF_NETCDF_FORTRAN_ROOT}
$ENV{OLCF_NETLIB_LAPACK_ROOT}
Generic
$ENV{OLCF_ESSL_ROOT}
$ENV{OLCF_HDF5_ROOT}
True
$ENV{OLCF_PARALLEL_NETCDF_ROOT}
0
True
-E OMP_NUM_THREADS=$ENV{OMP_NUM_THREADS} -E OMP_PROC_BIND=spread -E OMP_PLACES=threads -E OMP_STACKSIZE=256M
--smpiargs="-gpu"
2
21
0
cpu-cpu
$SHELL{echo "2*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}
$SHELL{echo "(`./xmlquery --value MAX_TASKS_PER_NODE`+41)/42"|bc}
2
21
0
cpu-cpu
$SHELL{echo "2*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}
$SHELL{echo "(`./xmlquery --value MAX_TASKS_PER_NODE`+41)/42"|bc}
1
1
mlx5_3:1,mlx5_0:1
mlx5_0:1,mlx5_3:1
2
21
0
cpu-cpu
$SHELL{echo "2*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}
$SHELL{echo "(`./xmlquery --value MAX_TASKS_PER_NODE`+41)/42"|bc}
6
7
1
gpu-cpu
$SHELL{echo "6*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}
6
3
1
gpu-cpu
$SHELL{echo "6*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}
6
7
1
gpu-cpu
$SHELL{echo "6*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}
1
1
mlx5_3:1,mlx5_0:1
mlx5_0:1,mlx5_3:1
ORNL Ascent. Node: 2x POWER9 + 6x Volta V100, 22 cores/socket, 4 HW threads/core.
.*ascent.*
LINUX
ibm,ibmgpu,pgi,pgigpu,gnu,gnugpu
spectrum-mpi
cli115
cli115
/gpfs/wolf/proj-shared/$PROJECT
cli115
/gpfs/wolf/$PROJECT/proj-shared/$ENV{USER}/e3sm_scratch
/gpfs/wolf/cli115/world-shared/e3sm/inputdata
/gpfs/wolf/cli115/world-shared/e3sm/inputdata/atm/datm7
/gpfs/wolf/$PROJECT/proj-shared/$ENV{USER}/archive/$CASE
/gpfs/wolf/cli115/world-shared/e3sm/baselines/$COMPILER
/gpfs/wolf/cli115/world-shared/e3sm/tools/cprnc/cprnc
8
e3sm_integration
4
lsf
e3sm
84
18
42
42
84
18
42
42
TRUE
jsrun
-X 1
$SHELL{if [ {{ total_tasks }} -eq 1 ];then echo --nrs 1 --rs_per_host 1;else echo --nrs $NUM_RS --rs_per_host $RS_PER_NODE;fi}
--tasks_per_rs $SHELL{echo "({{ tasks_per_node }} + $RS_PER_NODE - 1)/$RS_PER_NODE"|bc}
-d plane:$SHELL{echo "({{ tasks_per_node }} + $RS_PER_NODE - 1)/$RS_PER_NODE"|bc}
--cpu_per_rs $ENV{CPU_PER_RS}
--gpu_per_rs $ENV{GPU_PER_RS}
--bind packed:smt:$ENV{OMP_NUM_THREADS}
--latency_priority $ENV{LTC_PRT}
--stdio_mode prepended
$ENV{JSRUN_THREAD_VARS}
$ENV{SMPIARGS}
/sw/ascent/lmod/lmod/init/sh
/sw/ascent/lmod/lmod/init/csh
/sw/ascent/lmod/init/env_modules_python.py
/sw/ascent/lmod/lmod/libexec/lmod python
module
module
DefApps
python/3.8-anaconda3
git/2.31.1
subversion
cmake/3.22.2
essl/6.3.0
netlib-lapack/3.9.1
nvhpc/21.11
cuda/10.1.243
xl/16.1.1-10
cuda/11.0.3
gcc/9.1.0
spectrum-mpi/10.4.0.3-20210112
hdf5/1.10.7
netcdf-c/4.8.1
netcdf-fortran/4.4.5
parallel-netcdf/1.12.2
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
9000
/gpfs/wolf/cli115/world-shared/e3sm/soft/perl/5.26.0/bin:$ENV{PATH}
$SHELL{dirname $(dirname $(which nc-config))}
$SHELL{dirname $(dirname $(which nf-config))}
$SHELL{dirname $(dirname $(which pnetcdf_version))}
$ENV{OLCF_NETLIB_LAPACK_ROOT}
Generic
$ENV{OLCF_ESSL_ROOT}
$ENV{OLCF_HDF5_ROOT}
True
0
-E OMP_NUM_THREADS=$ENV{OMP_NUM_THREADS} -E OMP_PROC_BIND=spread -E OMP_PLACES=threads -E OMP_STACKSIZE=256M
--smpiargs="-gpu"
2
21
0
cpu-cpu
$SHELL{echo "2*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}
$SHELL{echo "(`./xmlquery --value MAX_TASKS_PER_NODE`+41)/42"|bc}
6
7
1
gpu-cpu
$SHELL{echo "6*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}
6
3
1
gpu-cpu
$SHELL{echo "6*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}
pgc++
6
7
1
gpu-cpu
$SHELL{echo "6*((`./xmlquery --value TOTAL_TASKS` + `./xmlquery --value TASKS_PER_NODE` - 1)/`./xmlquery --value TASKS_PER_NODE`)"|bc}
Medium sized linux cluster at BNL, torque scheduler.
LINUX
gnu
openmpi,mpi-serial
/data/$ENV{USER}
/data/Model_Data/cesm_input_datasets/
/data/Model_Data/cesm_input_datasets/atm/datm7
$CIME_OUTPUT_ROOT/cesm_archive/$CASE
$CIME_OUTPUT_ROOT/cesm_baselines
/data/software/cesm_tools/cprnc/cprnc
4
pbs
sserbin@bnl.gov
12
12
12
FALSE
mpirun
-np {{ total_tasks }}
-npernode $MAX_TASKS_PER_NODE
/etc/profile.d/modules.sh
/etc/profile.d/modules.csh
/usr/share/Modules/init/perl.pm
/usr/share/Modules/init/python.py
module
module
/usr/bin/modulecmd perl
/usr/bin/modulecmd python
perl/5.22.1
libxml2/2.9.2
maui/3.3.1
python/2.7.15
python/3.6.2
gcc/5.4.0
gfortran/5.4.0
hdf5/1.8.19fates
netcdf/4.4.1.1-gnu540-fates
openmpi/2.1.1-gnu540
openmpi/2.1.1-gnu540
/data/software/hdf5/1.8.19fates
/data/software/netcdf/4.4.1.1-gnu540-fates
ORNL experimental/evaluation cluster
tulip.*
LINUX
gnu
openmpi
/home/groups/coegroup/e3sm/scratch/$USER
/home/groups/coegroup/e3sm/inputdata2
/home/groups/coegroup/e3sm/inputdata2/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/home/groups/coegroup/e3sm/baselines/$COMPILER
/home/groups/coegroup/e3sm/tools/cprnc/cprnc
16
e3sm_developer
4
slurm
e3sm
64
32
FALSE
mpirun
--tag-output -n {{ total_tasks }}
--map-by ppr:1:core:PE=$ENV{OMP_NUM_THREADS} --bind-to core
/cm/local/apps/environment-modules/current/init/python
/cm/local/apps/environment-modules/current/init/sh
/cm/local/apps/environment-modules/current/init/csh
/cm/local/apps/environment-modules/current/bin/modulecmd python
module
module
gcc
cce
PrgEnv-cray
cray-mvapich2
cmake/3.17.0
/home/users/twhite/share/modulefiles
svn/1.10.6
gcc/8.1.0
blas/gcc/64/3.8.0
lapack/gcc/64/3.8.0
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
/home/groups/coegroup/e3sm/soft/perl5/lib/perl5
/home/groups/coegroup/e3sm/soft/netcdf/4.4.1c-4.2cxx-4.4.4f/gcc/8.2.0
gcc
g++
gfortran
/home/groups/coegroup/e3sm/soft/openmpi/2.1.6/gcc/8.2.0/bin:$ENV{PATH}
/home/groups/coegroup/e3sm/soft/openmpi/2.1.6/gcc/8.2.0/lib:/home/groups/coegroup/e3sm/soft/netcdf/4.4.1c-4.2cxx-4.4.4f/gcc/8.2.0/lib:$ENV{LD_LIBRARY_PATH}
/home/groups/coegroup/e3sm/soft/pnetcdf/1.12.1/gcc/8.2.0/openmpi/2.1.6
128M
threads
RIKEN-CCS Fugaku: Fujitsu A64FX 48 cores/node.
fn01sv.*
LINUX
gnu,fj
fujitsu
hp210190
/data/hp210190/
.*
/data/hp210190/$USER/scratch
/data/hp210190/inputdata
/data/hp210190/inputdata/atm/datm7
/data/hp210190/$USER/scratch/archive/$CASE
/data/hp210190/baselines/$COMPILER
/data/hp210190/tools/cprnc/cprnc
8
e3sm_integration
4
moab
E3SM
48
48
FALSE
mpirun
-n {{ total_tasks }} -std e3sm.log.$LID
/vol0003/hp210190/data/soft/spack-v0.17.0/share/spack/setup-env.sh
/vol0003/hp210190/data/soft/spack-v0.17.0/share/spack/setup-env.csh
spack
spack
--all
gcc @11.2.0%gcc@8.4.1 arch=linux-rhel8-a64fx
fujitsu-mpi @head%gcc@11.2.0 arch=linux-rhel8-a64fx
--loaded;ln -sf /lib64/libhwloc.so.15 /tmp/libhwloc.so.5
--all
netcdf-c @4.8.1 %fj@4.7.0 arch=linux-rhel8-a64fx
netcdf-cxx @4.2 %fj@4.7.0 arch=linux-rhel8-a64fx
netcdf-fortran @4.5.3 %fj@4.7.0 arch=linux-rhel8-a64fx
parallel-netcdf@1.12.2%fj@4.7.0 arch=linux-rhel8-a64fx
netlib-lapack @3.9.1 %fj@4.7.0 arch=linux-rhel8-a64fx
--loaded
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
1000
/data/hp210190/soft/perl5/lib/perl5:/home/hp210190/u02380/perl5/lib/perl5
share_band
/data/hp210190/soft/netcdf/4.4.1c-4.2cxx-4.4.4f/gcc8.3.1
/data/hp210190/soft/spack-v0.16/opt/spack/linux-rhel8-a64fx/gcc-8.3.1/netlib-lapack-3.8.0-jhmofiqoky6ajxmda5caawfhqnrirmm5
/tmp:/data/hp210190/soft/spack-v0.16/opt/spack/linux-rhel8-a64fx/gcc-8.3.1/netlib-lapack-3.8.0-jhmofiqoky6ajxmda5caawfhqnrirmm5/lib64:$ENV{LD_LIBRARY_PATH}
$SHELL{dirname $(dirname $(which nc-config))}
$SHELL{dirname $(dirname $(which nf-config))}
$SHELL{dirname $(dirname $(which pnetcdf_version))}
128M
cores
ERDC XC40, os is CNL, 44 pes/node, batch system is PBS
onyx
CNL
intel
mpich
NPSCA07935242
/p/app/unsupported/RASM/acme
.*
$ENV{WORKDIR}
/p/app/unsupported/RASM/acme/inputdata
/p/app/unsupported/RASM/acme/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/p/app/unsupported/RASM/acme/baselines/$COMPILER
/p/app/unsupported/RASM/tools/cprnc/cprnc
8
e3sm_developer
pbs
rasm
44
44
aprun
-n {{ total_tasks }}
-N $SHELL{if [ `./xmlquery --value MAX_MPITASKS_PER_NODE` -gt `./xmlquery --value TOTAL_TASKS` ];then echo `./xmlquery --value TOTAL_TASKS`;else echo `./xmlquery --value MAX_MPITASKS_PER_NODE`;fi;}
--cc depth -d $SHELL{echo `./xmlquery --value MAX_TASKS_PER_NODE`/`./xmlquery --value MAX_MPITASKS_PER_NODE`|bc} -j $SHELL{if [ 64 -ge `./xmlquery --value MAX_TASKS_PER_NODE` ];then echo 1;else echo `./xmlquery --value MAX_TASKS_PER_NODE`/64|bc;fi;}
/opt/modules/default/init/perl.pm
/opt/modules/default/init/python.py
/opt/modules/default/init/sh
/opt/modules/default/init/csh
/opt/modules/default/bin/modulecmd perl
/opt/modules/default/bin/modulecmd python
module
module
PrgEnv-intel
PrgEnv-cray
PrgEnv-gnu
PrgEnv-pgi
intel
cce
gcc
cray-parallel-netcdf
cray-parallel-hdf5
pmi
cray-libsci
cray-mpich2
cray-mpich
cray-netcdf
cray-hdf5
cray-netcdf-hdf5parallel
craype-mic-knl
craype-sandybridge
craype-ivybridge
craype
papi
cray-petsc
cray-libsci
esmf
craype
PrgEnv-intel/6.0.9
intel
intel/19.1.3.304
cray-mpich
cray-mpich/7.7.16
cray-hdf5
cray-hdf5-parallel
cray-netcdf-hdf5parallel
cray-parallel-netcdf
netcdf
cray-netcdf/4.7.4.0
cray-hdf5/1.12.0.0
cray-parallel-netcdf/1.12.1.0
cray-libsci
cmake/intel-19.1.3.304/3.21.0
cray-libsci/20.09.1
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
1
1
/opt/cray/pe/netcdf/4.7.4.0/intel/19.1
64M
spread
threads
FALSE
$ENV{NETCDF_DIR}
yes
yes
NavyDSRC Cray EX, os is CNL, 128 pes/node, batch system is PBS
narwhal
CNL
intel
mpich
NPSCA07935242
/p/work1/projects/RASM/acme
.*
$ENV{WORKDIR}
/p/work1/projects/RASM/acme/inputdata
/p/work1/projects/RASM/acme/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/p/work1/projects/RASM/acme/baselines/$COMPILER
/p/work1/projects/RASM/tools/cprnc/cprnc
8
e3sm_developer
pbs
e3sm
128
128
TRUE
aprun
-n {{ total_tasks }}
-N $SHELL{if [ `./xmlquery --value MAX_MPITASKS_PER_NODE` -gt `./xmlquery --value TOTAL_TASKS` ];then echo `./xmlquery --value TOTAL_TASKS`;else echo `./xmlquery --value MAX_MPITASKS_PER_NODE`;fi;}
--cc depth -d $SHELL{echo `./xmlquery --value MAX_TASKS_PER_NODE`/`./xmlquery --value MAX_MPITASKS_PER_NODE`|bc} -j $SHELL{if [ 64 -ge `./xmlquery --value MAX_TASKS_PER_NODE` ];then echo 1;else echo `./xmlquery --value MAX_TASKS_PER_NODE`/64|bc;fi;}
/opt/cray/pe/modules/3.2.11.5/init/perl.pm
/opt/cray/pe/modules/3.2.11.5/init/python.py
/opt/cray/pe/modules/3.2.11.5/init/sh
/opt/cray/pe/modules/3.2.11.5/init/csh
/opt/cray/pe/modules/3.2.11.5/bin/modulecmd perl
/opt/cray/pe/modules/3.2.11.5/bin/modulecmd python
module
module
PrgEnv-intel
PrgEnv-cray
PrgEnv-gnu
PrgEnv-nvidia
PrgEnv-aocc
intel
cray-mpich
cray-hdf5
cray-hdf5-parallel
cray-netcdf
cray-netcdf-hdf5parallel
cray-parallel-netcdf
PrgEnv-intel/8.0.0
intel
intel-classic
intel-classic/2021.3.0
cray-mpich
cray-mpich/8.1.14
cray-netcdf/4.7.4.7
cray-hdf5/1.12.0.7
cray-parallel-netcdf/1.12.1.7
cray-libsci
cray-libsci/21.08.1.2
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
1
1
64M
spread
threads
FALSE
yes
yes
AFRL Cray EX, os is CNL, 128 pes/node, batch system is PBS
warhawk
CNL
intel
mpich
NPSCA07935242
/p/work1/projects/RASM/acme
.*
$ENV{WORKDIR}
/p/work1/projects/RASM/acme/inputdata
/p/work1/projects/RASM/acme/inputdata/atm/datm7
$CIME_OUTPUT_ROOT/archive/$CASE
/p/work1/projects/RASM/acme/baselines/$COMPILER
/p/work1/projects/RASM/tools/cprnc/cprnc
8
e3sm_developer
pbs
rasm
128
128
aprun
-n {{ total_tasks }}
-N $SHELL{if [ `./xmlquery --value MAX_MPITASKS_PER_NODE` -gt `./xmlquery --value TOTAL_TASKS` ];then echo `./xmlquery --value TOTAL_TASKS`;else echo `./xmlquery --value MAX_MPITASKS_PER_NODE`;fi;}
--cc depth -d $SHELL{echo `./xmlquery --value MAX_TASKS_PER_NODE`/`./xmlquery --value MAX_MPITASKS_PER_NODE`|bc} -j $SHELL{if [ 64 -ge `./xmlquery --value MAX_TASKS_PER_NODE` ];then echo 1;else echo `./xmlquery --value MAX_TASKS_PER_NODE`/64|bc;fi;}
/p/app/Modules/4.7.1/init/perl.pm
/p/app/Modules/4.7.1/init/python.py
/p/app/Modules/4.7.1/init/sh
/p/app/Modules/4.7.1/init/csh
/p/app/Modules/4.7.1/bin/modulecmd perl
/p/app/Modules/4.7.1/bin/modulecmd python
module
module
PrgEnv-intel
PrgEnv-cray
PrgEnv-gnu
PrgEnv-nvidia
craype-x86-rome
craype-network-ofi
cray-dsmml
perftools-base
cray-libsci
cce
intel
gcc
cray-mpich
cray-hdf5
cray-hdf5-parallel
cray-netcdf
cray-netcdf-hdf5parallel
cray-parallel-netcdf
PrgEnv-intel/8.0.0
intel
intel-classic/2021.3.0
cray-mpich
cray-mpich/8.1.9
cray-pals/1.0.17
cray-netcdf/4.7.4.4
cray-hdf5/1.12.0.4
cray-parallel-netcdf/1.12.1.4
cray-libsci
cmake/3.21.4
cray-libsci/21.08.1.2
$CIME_OUTPUT_ROOT/$CASE/run
$CIME_OUTPUT_ROOT/$CASE/bld
0.1
1
1
64M
spread
threads
FALSE
yes
yes
${EXEROOT}/e3sm.exe
>> e3sm.log.$LID 2>&1