Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • rrand11/terraform-openstack
  • louistw/terraform-openstack
  • chirag24/terraform-openstack
  • mmoo97/terraform-openstack
  • jpr/terraform-openstack
  • ravi89/terraform-openstack
  • noe121/terraform-openstack
  • ishan747/terraform-openstack
  • clint93/terraform-openstack
  • ravi89/terraform_openstack
  • krish94/terraform-openstack
  • rc/terraform-openstack
12 results
Show changes
Showing
with 0 additions and 760 deletions
---
- name: config warewulf interface
lineinfile: dest=/etc/warewulf/provision.conf regexp="network device =" line="network device = {{ private_interface }}"
#" for vim
- name: enable tftp
lineinfile: dest=/etc/xinetd.d/tftp regexp="^(\s+)disable(\s+)= yes" line="\1disable\2= no" backrefs=yes
- name: enable httpd access to /srv/warewulf
command: chcon -t httpd_sys_content_t -R /srv/warewulf
# target: '/srv/warewulf(/.*)?'
# setype: httpd_sys_content_t
# state: present
#No need for this in OHPC-1.3.3
# - name: config warewulf http
# lineinfile:
# dest: /etc/httpd/conf.d/warewulf-httpd.conf
# regexp: "{{ item.regex }}"
# line: "{{ item.line }}"
# state: "{{ item.state }}"
# backrefs: yes
# with_items:
# - { regex: '^(.*)cgi-bin>$', line: '\1cgi-bin> \n Require all granted', state: present}
# - { regex: '(\s+)Allow from all', line: '\1Require all granted', state: present }
# - { regex: 'Order allow,deny', line: "", state: absent }
- name: reload xinetd
service: name=xinetd state=restarted
- name: enable mariadb
service: name=mariadb enabled=yes
- name: restart mariadb
service: name=mariadb state=restarted
- name: enable httpd
service: name=httpd enabled=yes
- name: restart httpdd
service: name=httpd state=restarted
- name: exportfs home entry on headnode
lineinfile: line="/home {{ private_network }}/{{ private_network_mask }}(rw,no_subtree_check,fsid=10,no_root_squash)" dest=/etc/exports state=present
- name: exportfs opt entry on headnode
lineinfile: line="/opt/ohpc/pub {{ private_network }}/{{ private_network_mask }}(ro,no_subtree_check,fsid=11)" dest=/etc/exports state=present
- name: exportfs software dir entry on headnode
lineinfile: line="/export {{ private_network }}/{{ private_network_mask }}(rw,no_subtree_check,fsid=12)" dest=/etc/exports state=present
- name: exportfs reload
command: exportfs -a
- name: restart rpcbind
service: name=rpcbind state=restarted enabled=yes
- name: restart nfs-server
service: name=nfs-server state=restarted enabled=yes
- name: initialize database
command: wwinit database
- name: cluster ssh keys
command: wwinit ssh_keys
args:
chdir: /root/
creates: /etc/warewulf/vnfs/ssh/ssh_host_key.pub
#need to copy these to localhost first!
- name: copy key back to management machine
fetch:
src: /root/.ssh/cluster.pub
dest: ./cluster_root.pub
flat: yes
#
# DHCP Server Configuration file.
# see /usr/share/doc/dhcp*/dhcpd.conf.example
# see dhcpd.conf(5) man page
#
{% for host in groups['headnode'] %}
server-identifier {{ hostvars[host]['inventory_hostname'] }};
{% endfor %}
subnet {{ private_network }} netmask {{ private_network_long_netmask }} {
not authoritative;
option subnet-mask {{ private_network_long_netmask }};
}
---
- name: start and enable munge
service:
name: munge
state: started
enabled: yes
# General Resource definitions for SLURM#
#########################################
REPO_NAME="os-base"
YUM_CONF="/root/yum-ww.conf"
YUM_CMD="yum -c $CHROOTDIR/$YUM_CONF --tolerant --installroot $CHROOTDIR -y"
sanity_check() {
if [ ! -x $WAREWULF_PREFIX/bin/cluster-env ]; then
echo "warewulf-cluster package is recommended on nodes you are building VNFS images on.";
sleep 2;
else
$WAREWULF_PREFIX/bin/cluster-env;
fi
if ! rpm -q yum >/dev/null 2>&1; then
echo "ERROR: Could not query RPM for YUM"
return 1
fi
return 0
}
prechroot() {
if [ -n "$OS_MIRROR" ]; then
YUM_MIRROR="$OS_MIRROR"
fi
if [[ -z "$YUM_MIRROR" && -z "$INSTALL_ISO" ]]; then
echo "ERROR: You must define the \$YUM_MIRROR variable in the template"
cleanup
exit 1
fi
VERSION=`rpm -qf /etc/redhat-release --qf '%{VERSION}\n'`
mkdir -p $CHROOTDIR
mkdir -p $CHROOTDIR/etc
cp -rap /etc/yum.conf /etc/yum.repos.d $CHROOTDIR/etc
sed -i -e "s/\$releasever/$VERSION/g" `find $CHROOTDIR/etc/yum* -type f`
YUM_CONF_DIRNAME=`dirname $YUM_CONF`
mkdir -m 0755 -p $CHROOTDIR/$YUM_CONF_DIRNAME
> $CHROOTDIR/$YUM_CONF
echo "[main]" >> $CHROOTDIR/$YUM_CONF
echo 'cachedir=/var/cache/yum/$basearch/$releasever' >> $CHROOTDIR/$YUM_CONF
echo "keepcache=0" >> $CHROOTDIR/$YUM_CONF
echo "debuglevel=2" >> $CHROOTDIR/$YUM_CONF
echo "logfile=/var/log/yum.log" >> $CHROOTDIR/$YUM_CONF
echo "exactarch=1" >> $CHROOTDIR/$YUM_CONF
echo "obsoletes=1" >> $CHROOTDIR/$YUM_CONF
echo "gpgcheck=0" >> $CHROOTDIR/$YUM_CONF
echo "plugins=1" >> $CHROOTDIR/$YUM_CONF
echo "reposdir=0" >> $CHROOTDIR/$YUM_CONF
echo "" >> $CHROOTDIR/$YUM_CONF
if [ -z "$INSTALL_ISO" ]; then
echo "[$REPO_NAME]" >> $CHROOTDIR/$YUM_CONF
echo 'name=Linux $releasever - $basearch' >> $CHROOTDIR/$YUM_CONF
echo "baseurl=$YUM_MIRROR" >> $CHROOTDIR/$YUM_CONF
echo "enabled=1" >> $CHROOTDIR/$YUM_CONF
echo "gpgcheck=0" >> $CHROOTDIR/$YUM_CONF
else
for i in `ls -d $MEDIA_MOUNTPATH.*`; do
if [ -z "$INSTALLDIRS" ]; then
if [ -d $i/repodata ]; then
# RHEL 6.x
INSTALLDIRS="file://$i"
elif [ -d $i/Server/repodata ]; then
# RHEL 5.x
INSTALLDIRS="file://$i/Server"
fi
else
INSTALLDIRS="$INSTALLDIRS,file://$i"
fi
done
echo "[$REPO_NAME]" >> $CHROOTDIR/$YUM_CONF
echo 'name=Linux $releasever - $basearch' >> $CHROOTDIR/$YUM_CONF
echo "baseurl=$INSTALLDIRS" >> $CHROOTDIR/$YUM_CONF
echo "enabled=1" >> $CHROOTDIR/$YUM_CONF
echo "gpgcheck=0" >> $CHROOTDIR/$YUM_CONF
YUM_MIRROR=$INSTALLDIRS
fi
# 03/13/15 karl.w.schulz@intel.com - honor proxy setting if configured on local host
proxy_host=`grep "^proxy=" /etc/yum.conf`
if [ $? -eq 0 ];then
echo $proxy_host >> $CHROOTDIR/$YUM_CONF
fi
}
buildchroot() {
# first install the base package list
if [ -z "$PKGLIST" ]; then
echo "ERROR: You must define the \$PKGLIST variable in the template!"
cleanup
exit 1
fi
$YUM_CMD install $PKGLIST
if [ $? -ne 0 ]; then
echo "ERROR: Failed to create chroot"
return 1
fi
# if we have defined additional packages ...
if [ ${#ADDITIONALPACKAGES[@]} -ne 0 ] ; then
for PACKAGEGROUP in "${ADDITIONALPACKAGES[@]}"
do
$YUM_CMD install $PACKAGEGROUP
if [ $? -ne 0 ]; then
echo "ERROR: Failed to add packages from \$PACKAGEGROUP"
return 1
fi
done
fi
return 0
}
postchroot() {
touch $CHROOTDIR/fastboot
if grep -q rename_device $CHROOTDIR/etc/sysconfig/network-scripts/network-functions; then
echo "" >> $CHROOTDIR/etc/sysconfig/network-scripts/network-functions
echo "# This is a kludge added by Warewulf so devices don't get renamed (broke things with IB)" >> $CHROOTDIR/etc/sysconfig/network-scripts/network-functions
echo "rename_device() { return 0; }" >> $CHROOTDIR/etc/sysconfig/network-scripts/network-functions
fi
return 0
}
# vim:filetype=sh:syntax=sh:expandtab:ts=4:sw=4:
# To edit this file start with a cluster line for the new cluster
# Cluster - 'cluster_name':MaxNodesPerJob=50
# Followed by Accounts you want in this fashion (root is created by default)...
# Parent - 'root'
# Account - 'cs':MaxNodesPerJob=5:MaxJobs=4:MaxTRESMins=cpu=20:FairShare=399:MaxWallDuration=40:Description='Computer Science':Organization='LC'
# Any of the options after a ':' can be left out and they can be in any order.
# If you want to add any sub accounts just list the Parent THAT HAS ALREADY
# BEEN CREATED before the account line in this fashion...
# Parent - 'cs'
# Account - 'test':MaxNodesPerJob=1:MaxJobs=1:MaxTRESMins=cpu=1:FairShare=1:MaxWallDuration=1:Description='Test Account':Organization='Test'
# To add users to a account add a line like this after a Parent - 'line'
# User - 'lipari':MaxNodesPerJob=2:MaxJobs=3:MaxTRESMins=cpu=4:FairShare=1:MaxWallDurationPerJob=1
Cluster - 'xcbc-example':Fairshare=1:QOS='normal'
Parent - 'root'
User - 'root':DefaultAccount='root':AdminLevel='Administrator':Fairshare=1
Account - 'test':Description='test users':Organization='xsede':Fairshare=1
Account - 'xcbc-users':Description='xsede users':Organization='xsede':Fairshare=1
Parent - 'test'
User - 'test-user':DefaultAccount='test':Fairshare=1
Parent - 'xcbc-users'
User - 'jecoulte':DefaultAccount='xcbc-users':Fairshare=1
---
- name: install OpenHPC base, warewulf, and slurm server
yum:
state: latest
name:
- "ohpc-base"
- "ohpc-warewulf"
- "ohpc-slurm-server"
# - name: yum update
# yum: name=* state=latest update_cache=yes
- name: add slurm user
user: name=slurm state=present system=yes
- name: create slurm.conf
template: src=slurm_conf.j2 dest=/etc/slurm/slurm.conf
- name: create slurmdbd.conf
template: src=slurmdbd_conf.j2 dest=/etc/slurm/slurmdbd.conf
- name: put the sacctmgr config in /etc/slurm
template: src=xcbc-example.j2 dest=/etc/slurm/sacctmgr-heirarchy.cfg
- name: set the gres.conf on the master
copy: src=gres.conf dest=/etc/slurm/gres.conf
- name: Remove innodb log file
file:
path: "{{ item }}"
state: absent
with_items:
- /var/lib/mysql/ib_logfile0
- /var/lib/mysql/ib_logfile1
- name: Update Mariadb setting
lineinfile:
dest: /etc/my.cnf
insertafter: '\[mysqld\]'
line: "{{ item }}"
state: present
with_items:
- innodb_buffer_pool_size=1024M
- innodb_log_file_size=64M
- innodb_lock_wait_timeout=900
- name: restart mariadb
service:
name: mariadb
state: restarted
enabled: yes
- name: create slurm log folder
file:
path: /var/log/slurm
state: directory
owner: slurm
group: slurm
mode: 0755
- name: initialize slurmdb databases
mysql_db:
name: "{{ slurm_acct_db }}"
state: present
- name: create slurmdb user
mysql_user:
name: "{{ slurmdb_sql_user }}"
password: "{{ slurmdb_sql_pass }}"
priv: "{{ slurm_acct_db }}.*:ALL"
state: present
- name: start and enable munge
service:
name: munge
state: started
enabled: yes
- name: start and enable slurmdbd
service:
name: slurmdbd
state: started
enabled: yes
- name: insert rhel-xcbc WW template
copy: src=include-rhel-xcbc dest="{{ template_path }}include-rhel-xcbc"
- name: fix the warewulf wwsh script... ARGH (line 29)
lineinfile:
dest: /bin/wwsh
insertafter: '^\$ENV\{\"PATH\"\}'
line: "delete @ENV{'PATH', 'IFS', 'CDPATH', 'ENV', 'BASH_ENV'};"
state: present
- name: fix the warewulf wwnodescan script... ARGH (line 96)
lineinfile:
dest: /bin/wwnodescan
insertafter: '^\$ENV\{\"PATH\"\}'
line: "delete @ENV{'PATH', 'IFS', 'CDPATH', 'ENV', 'BASH_ENV'};"
state: present
- name: load sacctmgr config
command: sacctmgr -i load /etc/slurm/sacctmgr-heirarchy.cfg
- name: start and enable slurmctld
service:
name: slurmctld
state: started
enabled: yes
#
# Example slurm.conf file. Please run configurator.html
# (in doc/html) to build a configuration file customized
# for your environment.
#
#
# slurm.conf file generated by configurator.html.
#
# See the slurm.conf man page for more information.
#
ClusterName={{ cluster_name }}
ControlMachine={{ inventory_hostname }}
#ControlAddr=
#BackupController=
#BackupAddr=
#
SlurmUser=slurm
SlurmdUser=root
SlurmctldPort=6817
SlurmdPort=6818
AuthType=auth/munge
#JobCredentialPrivateKey=
#JobCredentialPublicCertificate=
StateSaveLocation=/tmp
SlurmdSpoolDir=/tmp/slurmd
SwitchType=switch/none
MpiDefault=none
SlurmctldPidFile=/var/run/slurmctld.pid
SlurmdPidFile=/var/run/slurmd.pid
ProctrackType=proctrack/pgid
#PluginDir=
#FirstJobId=
ReturnToService=2
#MaxJobCount=
#PlugStackConfig=
#PropagatePrioProcess=
#PropagateResourceLimits=
#PropagateResourceLimitsExcept=
#Prolog=
#Epilog=
#SrunProlog=
#SrunEpilog=
#TaskProlog=
#TaskEpilog=
#TaskPlugin=
#TrackWCKey=no
#TreeWidth=50
#TmpFS=
#UsePAM=
#
# TIMERS
SlurmctldTimeout=300
SlurmdTimeout=300
InactiveLimit=0
MinJobAge=300
KillWait=30
Waittime=0
#
# SCHEDULING
SchedulerType=sched/backfill
#SchedulerAuth=
#SchedulerPort=
#SchedulerRootFilter=
SelectType=select/cons_res
SelectTypeParameters=CR_CPU
FastSchedule=0
#PriorityType=priority/multifactor
#PriorityDecayHalfLife=14-0
#PriorityUsageResetPeriod=14-0
#PriorityWeightFairshare=100000
#PriorityWeightAge=1000
#PriorityWeightPartition=10000
#PriorityWeightJobSize=1000
#PriorityMaxAge=1-0
#
# LOGGING
SlurmctldDebug=3
SlurmctldLogFile=/var/log/slurm/slurmctld.log
SlurmdDebug=3
SlurmdLogFile=/var/log/slurm/slurmd.log
JobCompType=jobcomp/none
#JobCompLoc=
#
# ACCOUNTING
JobAcctGatherType=jobacct_gather/linux
JobAcctGatherFrequency=30
#
AccountingStorageType=accounting_storage/slurmdbd
AccountingStorageHost={{ inventory_hostname }}
#AccountingStorageLoc=/var/log/slurm/slurmacct.log
AccountingStorageLoc={{ slurm_acct_db }}
AcctGatherNodeFreq=30
AccountingStorageEnforce=associations,limits
AccountingStoragePort={{ slurmdb_storage_port }}
#AccountingStoragePass=
#AccountingStorageUser=
#
#GENERAL RESOURCE
GresTypes={{ gres_types|default('""') }}
#
#EXAMPLE CONFIGURATION - copy,comment out, and edit
#
#COMPUTE NODES
NodeName=c0 Sockets=1 CoresPerSocket=1 State=UNKNOWN
#NodeName=compute-1 Sockets=1 CoresPerSocket=1 State=UNKNOWN
#NodeName=gpu-compute-1 Gres=gpu:gtx_TitanX:4 Sockets=2 CoresPerSocket=8 State=UNKNOWN
# PARTITIONS
#PartitionName=high Nodes=compute-[0-1] Default=YES MaxTime=INFINITE State=UP PriorityTier=10
#PartitionName=gpu Nodes=gpu-compute-1 Default=YES MaxTime=INFINITE State=UP PriorityTier=5 AllowGroups=slurmusers
PartitionName=low Nodes=c0 Default=YES MaxTime=2-00:00:00 State=UP
#
# Example slurmdbd.conf file.
#
# See the slurmdbd.conf man page for more information.
#
# Archive info
#ArchiveJobs=yes
#ArchiveDir="/tmp"
#ArchiveSteps=yes
#ArchiveScript=
#JobPurge=12
#StepPurge=1
#
# Authentication info
AuthType=auth/munge
#AuthInfo=/var/run/munge/munge.socket.2
#
# slurmDBD info
DbdAddr=localhost
DbdHost=localhost
DbdPort={{ slurmdb_storage_port }}
SlurmUser=slurm
#MessageTimeout=300
DebugLevel=3
#DefaultQOS=normal,standby
LogFile=/var/log/slurm/slurmdbd.log
PidFile=/var/run/slurmdbd.pid
#PluginDir=/usr/lib/slurm
#PrivateData=accounts,users,usage,jobs
#TrackWCKey=yes
#
# Database info
StorageType=accounting_storage/mysql
StorageHost=localhost
StoragePort={{ slurmdb_port }}
StoragePass={{ slurmdb_sql_pass }}
StorageUser={{ slurmdb_sql_user }}
StorageLoc={{ slurm_acct_db }}
# To edit this file start with a cluster line for the new cluster
# Cluster - 'cluster_name':MaxNodesPerJob=50
# Followed by Accounts you want in this fashion (root is created by default)...
# Parent - 'root'
# Account - 'cs':MaxNodesPerJob=5:MaxJobs=4:MaxTRESMins=cpu=20:FairShare=399:MaxWallDuration=40:Description='Computer Science':Organization='LC'
# Any of the options after a ':' can be left out and they can be in any order.
# If you want to add any sub accounts just list the Parent THAT HAS ALREADY
# BEEN CREATED before the account line in this fashion...
# Parent - 'cs'
# Account - 'test':MaxNodesPerJob=1:MaxJobs=1:MaxTRESMins=cpu=1:FairShare=1:MaxWallDuration=1:Description='Test Account':Organization='Test'
# To add users to a account add a line like this after a Parent - 'line'
# User - 'lipari':MaxNodesPerJob=2:MaxJobs=3:MaxTRESMins=cpu=4:FairShare=1:MaxWallDurationPerJob=1
Cluster - '{{ cluster_name }}':Fairshare=1:QOS='normal'
Parent - 'root'
User - 'root':DefaultAccount='root':AdminLevel='Administrator':Fairshare=1
Account - 'test':Description='test users':Organization='xsede':Fairshare=1
Account - 'xcbc-users':Description='xsede users':Organization='xsede':Fairshare=1
Parent - 'test'
User - 'test-user':DefaultAccount='test':Fairshare=1
Parent - 'xcbc-users'
{% for user in cluster_users %}
User - '{{ user }}':DefaultAccount='xcbc-users':Fairshare=1
{% endfor %}
---
- name: Install Anaconda3 for jupyter.
shell: |
source /etc/profile.d/lmod.sh
export EASYBUILD_PREFIX={{ easybuild_prefix }}
module load EasyBuild
eb Anaconda3-5.3.0.eb --try-toolchain-name=dummy -r --force
become_user: build
args:
executable: /bin/bash
- name: Install nb_conda_kernels to manage jupyter kernels
shell : |
source /etc/profile.d/lmod.sh
export EASYBUILD_PREFIX={{ easybuild_prefix }}
module load Anaconda3
conda install -y nb_conda_kernels
become_user: root
args:
executable: /bin/bash
- name: install the latest version of libXtst
yum:
name: libXt
state: latest
- name: Create directory
file:
path: "{{ matlab_clustershare }}"
state: directory
mode: 0755
- name: Download matlab
get_url:
url: "{{ matlab_download_url }}"
dest: "{{ matlab_destination }}"
- name: Extract matlab
unarchive:
src: "{{ matlab_destination }}"
dest: "{{ matlab_clustershare }}"
remote_src: yes
- name: Create directory
file:
path: "{{ matlab_module_path }}/{{ matlab_module_appdir }}"
state: directory
mode: 0755
- name: Copy modulefile from template to module path
template:
src: "{{ matlab_module_file }}"
dest: "{{ matlab_module_path }}/{{ matlab_module_appdir }}/{{ matlab_module_file }}"
#%Module
set ver {{ matlab_ver }}
set matlabroot {{ matlab_install_root }}
set url {{ matlab_docs_url }}
set msg "This module adds Matlab $ver to various paths\n\nSee $url for usage examples\n"
proc ModulesHelp { } {
puts stderr $msg
}
module-whatis $msg
setenv MATLAB $matlabroot
setenv MATLABROOT $matlabroot
setenv MATLAB_HOME $matlabroot
setenv MLM_LICENSE_FILE {{ matlab_license_file }}
prepend-path PATH $matlabroot/bin
- name: Create directory
file:
path: "{{ sas_clustershare }}/{{ sas_module_file }}/SASFoundation/{{ sas_module_file }}"
state: directory
mode: 0755
- name: Install SAS (Executable file to run xfce4desktop)
template:
src: "{{ sas_module_appdir }}"
dest: "{{ sas_clustershare }}/{{ sas_module_file }}/SASFoundation/{{ sas_module_file }}/{{ sas_module_appdir }}"
mode: a+x
- name: Create modules directory
file:
path: "{{ sas_module_path }}/{{ sas_module_appdir }}"
state: directory
mode: 0755
- name: Copy modulefile from template to module path
template:
src: "{{ sas_module_file }}"
dest: "{{ sas_module_path }}/{{ sas_module_appdir }}/{{ sas_module_file }}"
#%Module1.0####################################################################
##
## mymodule modulefile
##
## Sets up the SAS environment
##
set ver 9.4
set name sas
#set loading [module-info mode load]
#set subname [lrange [split $name - ] 0 0 ]
proc ModulesHelp { } {
puts stderr "\tThis module sets the environment for $name v$ver"
}
module-whatis "Set environment variables to use $name version $ver"
set base /export/apps/$name/$ver
## Add bin directories to the path
prepend-path PATH $base/SASFoundation/9.4
if { [ module-info mode load ] } {
puts stderr "Note: $name $ver environment loaded."
}
#!/bin/bash
xfce4-terminal
---
- name: Create RegUser on OHPC
user:
name: "{{ RegUser_app_user }}"
comment: "{{ RegUser_app_user_full_name }}"
shell: /bin/bash
createhome: yes
home: /home/{{ RegUser_app_user }}
state: present
- name: Create Cluster keys for RegUser on OHPC
script: /usr/bin/cluster-env
become_user: "{{ RegUser_app_user }}"
- name: WareWulf Sync for RegUser on OHPC
command: wwsh file resync passwd group shadow
vagrant:$apr1$skdJeq64$2xEe6FBObbYeIsEOMwQaX/
---
- name: Enable the Software Collections repository
yum:
name: "centos-release-scl"
state: present
when: ansible_distribution == "CentOS"
- name: Enable the Software Collections repository on RHEL 7
rhsm_repository:
name: rhel-server-rhscl-7-rpms
state: enabled
when: ansible_distribution == "RedHat" and ansible_distribution_major_version == '7'
- name: Add Open OnDemand’s repository hosted by the Ohio Supercomputer Center
yum:
name: "{{ ood_rpm_repo }}"
state: present
- name: Install OnDemand and all of its dependencies
yum:
name: "ondemand"
state: present
- name: Create clusters.d directory
file:
path: /etc/ood/config/clusters.d
state: directory
- name: Add cluster configuration files
template:
src: cluster.yml
dest: /etc/ood/config/clusters.d/{{ cluster_name }}.yml
- name: Create interactive desktop settings directory
file:
path: /etc/ood/config/apps/bc_desktop
state: directory
- name: Add interactive desktop settings
template:
src: bc_desktop/cluster.yml
dest: /etc/ood/config/apps/bc_desktop/{{ cluster_name }}.yml
- name: Create interactive desktop job submit script directory
file:
path: /etc/ood/config/apps/bc_desktop/submit
state: directory
- name: Add interactive desktop job submit script
template:
src: bc_desktop/submit.yml.erb
dest: /etc/ood/config/apps/bc_desktop/submit/submit.yml.erb
- name: Enable reverse proxy
replace:
path: /etc/ood/config/ood_portal.yml
regexp: '{{ item.regexp }}'
replace: '{{ item.replace }}'
with_items:
- { regexp: "^#?host_regex:.*$", replace: "host_regex: '{{ compute_node_glob }}'" }
- { regexp: "^#?node_uri:.*$", replace: "node_uri: '/node'" }
- { regexp: "^#?rnode_uri:.*$", replace: "rnode_uri: '/rnode'" }
- name: Stage http authz file for ood
copy:
src: htpasswd
dest: /opt/rh/httpd24/root/etc/httpd/.htpasswd
owner: root
group: root
mode: 0644
- name: Build the updated Apache config
command: /opt/ood/ood-portal-generator/sbin/update_ood_portal
ignore_errors: yes
---
title: "HPC Desktop"
cluster: "{{ cluster_name }}"
attributes:
desktop: "xfce"