diff --git a/README.md b/README.md index 82aff5fb5673f5e953231ad71ed0b4c77c4efbf8..df1627eae9fa9f0d655ef5d986bb4712fca4bd5c 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,54 @@ # PyMol_ray_pipeline -Ray-trace a PyMol session on cheaha \ No newline at end of file +Ray-trace a PyMol session on cheaha + +WARNING: PyMol licensing issues have not been addressed. + +Sample session to render can be found in test_1aon_struct_spectrum.pse +which comes from the "[Huge Surfaces](https://pymolwiki.org/index.php/Huge_surfaces)" PyMolWiki page + +This project provides 2 different ways to run a pymol ray job: + +1. bash shell script that can be submitted with sbatch +2. Snakemake pipeline script (coming soon) + +## sbatch usage + +By default, it loads the session "test_1aon_struct_spectrum.pse" and creates the image "test_1aon_struct_spectrum.ray.1200x900.300dpi.png", which takes about 90 minutes to run. + +``` +module load Anaconda3/5.3.0 +git clone git@gitlab.rc.uab.edu:CCTS-Informatics-Pipelines/pymol_ray_pipeline.git +cd pymol_ray_pipeline +sbatch job.80s_ray_1200x900_dpi300.sh +``` +this should launch a cluster job, which will wait in the queue. +You should get 2 emails: + * when the job comes off the queue and start running +``` +From: slurm@cheaha.rc.uab.edu [mailto:slurm@cheaha.rc.uab.edu] +Subject: SLURM Job_id=2107497 Name=pymol_24c_draw_3572x2400 Began, Queued time 01:40:38 +``` + * when the jobs finishes successfully (COMPLETED, ExitCode 0) +``` +From: slurm@cheaha.rc.uab.edu [mailto:slurm@cheaha.rc.uab.edu] +Subject: SLURM Job_id=2107403 Name=pymol_24c_ray_3572x2400 Ended, Run time 01:49:52, COMPLETED, ExitCode 0 +``` + * or when if it fails (FAILED, ExitCode 1) +``` +From: slurm@cheaha.rc.uab.edu [mailto:slurm@cheaha.rc.uab.edu] +Subject: SLURM Job_id=2107403 Name=pymol_24c_ray_3572x2400 Failed, Run time 01:49:52, FAILED, ExitCode 1 +``` + + +# Snakemake usage (not complete) + +``` +module load snakemakeslurm/4.8.0/1 Anaconda3/5.3.0 dvctools/0.9 +git clone git@gitlab.rc.uab.edu:CCTS-Informatics-Pipelines/pymol_ray_pipeline.git +cd pymol_ray_pipeline +snakemakeslurm test_ray + +``` + + diff --git a/job.80s_ray_1200x900_dpi300.sh b/job.80s_ray_1200x900_dpi300.sh new file mode 100644 index 0000000000000000000000000000000000000000..df36e040d697c2c4115c66cece2cbe230899f942 --- /dev/null +++ b/job.80s_ray_1200x900_dpi300.sh @@ -0,0 +1,38 @@ +#!/bin/bash +#SBATCH --job-name pymol_ray_24core_1200x900_300dpi +#SBATCH --ntasks=1 ##Number of PROCESSES +# ====== entire hi-mem node ====== +#SBATCH --cpus-per-task=24 ##Number of CPUs per task +#SBATCH --mem-per-cpu=10000 ##Memory specified for each core used (in MB) (no cores, use --mem=) +##SBATCH --mem-per-cpu=15000 ##Memory specified for each core used (in MB) (no cores, use --mem=) +# ##Runtime in D-HH:MM:SS +#SBATCH --partition=medium -t 2-02:00:00 ## express(2h), short(12h), medium(2d2h), long(6d6h), interactive(2h) +#--share ## !!!don't share!!! +# +##SBATCH --mail-user=BLAZERID@uab.edu +#SBATCH --mail-type=ALL ## BEGIN, END, ERROR, ALL +# +#SBATCH --error=logs/%j.%N.err.txt ##File to which STDERR will be written +#SBATCH --output=logs/%j.%N.out.txt ## File to which STDOUT will be written + +PSE_FILE="test_1aon_struct_spectrum.pse" +PNG_FILE=$(basename $PSE_FILE .pse).ray.1200x900.300dpi.png + +module purge +module load shared rc-base +module load Anaconda3/5.3.0 + +. /share/apps/rc/software/Anaconda3/5.3.0/etc/profile.d/conda.sh + +# this assumes you have created a pymol2 conda environment previously via : +if [ ! -e ~/.conda/envs/pymol2 ]; then + # install pymol2, if necessary (only first time run, for each users) + echo "# INSTALLING pymol into ~/$USER/.conda/envs/pymol2 using Anaconda" + conda create --name pymol2 -c schrodinger pymol +fi +conda activate pymol2 + +date; +pymol -c -d "load $PSE_FILE; set antialias, 2; ray 1200,900; png $PNG_FILE, width=4in, height=3in, dpi=300; quit;" +date + diff --git a/job.80s_ray_192x192_dpi96.sh b/job.80s_ray_192x192_dpi96.sh new file mode 100644 index 0000000000000000000000000000000000000000..2feba1049bb36699cbb8f3ddddc2da8c7c468e63 --- /dev/null +++ b/job.80s_ray_192x192_dpi96.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# +# fast running test script +# +# +#SBATCH --job-name pymol_ray_8core_192x192_96dpi +#SBATCH --ntasks=1 ##Number of PROCESSES +# ====== entire hi-mem node ====== +#SBATCH --cpus-per-task=8 ##Number of CPUs per task +#SBATCH --mem-per-cpu=1000 ##Memory specified for each core used (in MB) (no cores, use --mem=) +##SBATCH --mem-per-cpu=15000 ##Memory specified for each core used (in MB) (no cores, use --mem=) +# ##Runtime in D-HH:MM:SS +#SBATCH --partition=medium -t 2-02:00:00 ## express(2h), short(12h), medium(2d2h), long(6d6h), interactive(2h) +#--share ## !!!don't share!!! +# +##SBATCH --mail-user=BLAZERID@uab.edu +#SBATCH --mail-type=ALL ## BEGIN, END, ERROR, ALL +# +#SBATCH --error=logs/%j.%N.err.txt ##File to which STDERR will be written +#SBATCH --output=logs/%j.%N.out.txt ## File to which STDOUT will be written + +PSE_FILE="test_1aon_struct_spectrum.pse" +PNG_FILE=$(basename $SESS_FILE .pse).ray.192x192.96dpi.png + +module purge +module load shared rc-base +module load Anaconda3/5.3.0 + +. /share/apps/rc/software/Anaconda3/5.3.0/etc/profile.d/conda.sh + +# this assumes you have created a pymol2 conda environment previously via : +if [ ! -e ~/.conda/envs/pymol2 ]; then + # install pymol2, if necessary (only first time run, for each users) + echo "# INSTALLING pymol into ~/$USER/.conda/envs/pymol2 using Anaconda" + conda create --name pymol2 -c schrodinger pymol +fi +conda activate pymol2 + +date; +pymol -c -d "set max_threads, $SLURM_JOB_CPUS_PER_NODE; load $PSE_FILE; set antialias, 2; ray 192,192; png $PNG_FILE, width=2in, height=2in, dpi=96; quit;" +date + diff --git a/logs/README.md b/logs/README.md new file mode 100644 index 0000000000000000000000000000000000000000..5bea8d89c350fb56eac63b8ed90f34dd55838961 --- /dev/null +++ b/logs/README.md @@ -0,0 +1 @@ +Place-holder README so that the logs/ directory always exists diff --git a/test_1aon_struct_spectrum.pse b/test_1aon_struct_spectrum.pse new file mode 100644 index 0000000000000000000000000000000000000000..0304136f838879ab037ddb92fad17d9c33171b0c Binary files /dev/null and b/test_1aon_struct_spectrum.pse differ