forked from spack/spack
-
Notifications
You must be signed in to change notification settings - Fork 15
JURECA Deployment
Pramod Kumbhar edited this page Dec 21, 2018
·
16 revisions
- Make sure SSH key from JURECA account is added to bbpcode and SSH config is in place:
$ cat ~/.ssh/config
Host bbpcode.epfl.ch
HostName bbpcode.epfl.ch
User GASPER_USERNAME
- Install simulation stack using script :
#!/bin/bash
set -x
# pass the argument of partition
if [ "$1" == "" ]; then
echo "Error : pass --cluster or --booster"
exit 1
fi
module --force purge all
module use /usr/local/software/jureca/OtherStages
system=
while [ "$1" != "" ]; do
case $1 in
-c | --cluster) shift
system=cluster
module load Architecture/Haswell
module load Stages/2018a
module load Intel IntelMPI
;;
-b | --booster ) system=booster
module load Architecture/KNL
module load Stages/2018a
module load Intel IntelMPI
;;
* ) echo "Error : --cluster or --booster"
exit 1
esac
shift
done
set -e
# Deployment directory
DEPLOYMENT_HOME=`pwd`/HBP/jureca-$system/$(date '+%d-%m-%Y')
mkdir -p $DEPLOYMENT_HOME
mkdir -p $DEPLOYMENT_HOME/sources
mkdir -p $DEPLOYMENT_HOME/install
# Clone spack repository and setup environment
cd $DEPLOYMENT_HOME/sources
git clone https://github.com/BlueBrain/spack.git
export SPACK_ROOT=`pwd`/spack
export PATH=$SPACK_ROOT/bin:$PATH
source $SPACK_ROOT/share/spack/setup-env.sh
# Copy configurations
mkdir -p $SPACK_ROOT/etc/spack/defaults/linux/
cp $SPACK_ROOT/sysconfig/jureca-$system/* $SPACK_ROOT/etc/spack/defaults/linux/
# Directory for deployment
export SOFTS_DIR_PATH=$DEPLOYMENT_HOME/install/
mkdir -p $SOFTS_DIR_PATH
spack spec -I neurodamus@hippocampus~coreneuron~syntool ^intel-mpi-external
spack install --dirty --keep-stage -v neurodamus@hippocampus~coreneuron~syntool ^intel-mpi-external
spack spec -I -l py-bluepyopt ^python@2.7.15 ^intel-mpi-external
spack install --dirty --keep-stage -v py-bluepyopt ^python@2.7.15 ^intel-mpi-external
spack module tcl refresh --delete-tree -y
# create symbolic link
cd $DEPLOYMENT_HOME/../install
rm latest
ln -s ../$(date '+%d-%m-%Y')/install/modules/tcl/linux-centos7-x86_64 latest
and invoke it as:
$ bash -x hbp-cluster.sh --booster # for booster partition
# or
$ bash -x hbp-cluster.sh --cluster # for cluster partition
- Notify user for new deployed modules
export MODULEPATH=$MODULEPATH:/p/project/cvsk25/vsk2514/HBP/jureca-cluster/install/latest
# or
export MODULEPATH=$MODULEPATH:/p/project/cvsk25/vsk2514/HBP/jureca-booster/install/latest
- Sample job script for cpu/cluster partition
#!/bin/bash
#SBATCH --nodes=100
#SBATCH --ntasks-per-node=24
#SBATCH --job-name=test-hippo-cluster
#SBATCH --time=6:00:00
#SBATCH --partition=batch
#SBATCH --mail-user=email
#SBATCH --mail-type=ALL
#SBATCH --account=vsk25
module --force purge all
module use /usr/local/software/jureca/OtherStages
module load Architecture/Haswell
module load Stages/2018a
module load Intel IntelMPI imkl
module load HDF5
export MODULEPATH=/p/project/cvsk25/vsk2514/HBP/jureca-cluster/install/latest:$MODULEPATH
# module load py-bluepyopt
module load neurodamus/hippocampus
module list
export I_MPI_DEBUG=5
srun --cpus-per-task=1 special -NFRAME 1024 $HOC_LIBRARY_PATH/init.hoc -mpi
- Sample job script for knl/booster partition
#!/bin/bash
#SBATCH --nodes=100
#SBATCH --ntasks-per-node=68
#SBATCH --job-name=test-full
#SBATCH --time=12:00:00
#SBATCH --partition=largebooster
#SBATCH --mail-user=email
#SBATCH --mail-type=ALL
#SBATCH --account=vsk25
module --force purge all
module use /usr/local/software/jureca/OtherStages
module load Architecture/KNL
module load Stages/2018a
module load Intel IntelMPI imkl
module load HDF5
export MODULEPATH=/p/project/cvsk25/vsk2514/HBP/jureca-booster/install/latest:$MODULEPATH
# module load py-bluepyopt
module load neurodamus/hippocampus
module list
export I_MPI_DEBUG=5
srun --cpus-per-task=1 special -NFRAME 1024 $HOC_LIBRARY_PATH/init.hoc -mpi