Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • dwheel7/hpc-factory
  • rc/hpc-factory
  • louistw/hpc-factory
  • jpr/hpc-factory
  • krish94/hpc-factory
  • atlurie/hpc-factory
6 results
Show changes
Commits on Source (247)
Showing
with 844 additions and 472 deletions
This diff is collapsed.
---
- name: Setup node for use as a virtual cheaha node
hosts: default
become: true
roles:
- { name: 'cheaha.node', tags: 'cheaha.node' }
- { name: 'nfs_mounts', tags: 'nfs_mounts' }
- { name: 'ldap_config', tags: 'ldap_config' }
- { name: 'slurm_client', tags: 'slurm_client' }
---
- name: Setup node for use as a virtual cheaha node
hosts: all
become: true
roles:
- { name: 'cheaha.node', tags: 'cheaha.node' }
- { name: 'nfs_mounts', tags: 'nfs_mounts', when: enable_nfs_mounts }
- { name: 'ldap_config', tags: 'ldap_config' }
- { name: 'slurm_client', tags: 'slurm_client', when: enable_slurm_client }
- { name: 'ssh_host_keys', tags: 'ssh_host_keys' }
- { name: 'ssh_proxy_config', tags: 'ssh_proxy_config', when: enable_ssh_proxy_config }
- { name: 'ssl_cert', tags: 'ssl_cert', when: enable_ssl_certs }
- { name: 'rsyslog_config', tags: 'rsyslog_config', when: enable_rsyslog_config }
- { name: 'rewrite_map', tags: 'rewrite_map', when: enable_rewrite_map }
- { name: 'fail2ban', tags: 'fail2ban', when: enable_fail2ban }
- { name: 'install_node_exporter', tags: 'install_node_exporter', when: enable_node_exporter }
......@@ -4,9 +4,88 @@
yum_repo_files: []
pkg_list: []
slurm_version: 18.08.9
enable_slurm_client: false
# NHC related
nhc_download_url: "https://github.com/mej/nhc/releases/download/1.4.3/lbnl-nhc-1.4.3-1.el7.noarch.rpm"
nhc_download_path: "/tmp"
nhc_git_repo: "https://gitlab.rc.uab.edu/rc/nhc.git"
nhc_git_repo_path: "/tmp/nhc"
root_ssh_key: ""
# cheaha.node related
hostname_lookup_table:
- "10.141.255.254 master.cm.cluster master localmaster.cm.cluster localmaster ldapserver.cm.cluster ldapserver"
domain_search_list:
- openstack.internal
- cm.cluster
nameserver_list:
- 10.141.255.254
# ldap_config related
ldap_cert_path: "/etc/openldap/certs"
ldap_uri: "ldap://ldapserver"
# nfs_mounts related
enable_nfs_mounts: true
use_autofs: false
use_fstab: false
mount_points:
- { "src": "master:/gpfs4", "path": "/gpfs4", "opts": "ro,sync,hard", "mode": "0755" }
- { "src": "master:/gpfs5", "path": "/gpfs5", "opts": "ro,sync,hard", "mode": "0755" }
autofs_mounts:
- { "src": "master:/gpfs4/&", "path": "/gpfs4", "opts": "fstype=nfs,vers=3,_netdev,default", "mode": '0755', "mount_point": "/gpfs4", "map_name": "gpfs4", key: "*" }
- { "src": "master:/gpfs5/&", "path": "/gpfs5", "opts": "fstype=nfs,vers=3,_netdev,default", "mode": '0755', "mount_point": "/gpfs5", "map_name": "gpfs5", key: "*" }
#SSH Host Keys
S3_ENDPOINT: ""
SSH_HOST_KEYS_S3_BUCKET: ""
SSH_HOST_KEYS_S3_OBJECT: ""
# AWS credentials
LTS_ACCESS_KEY: ""
LTS_SECRET_KEY: ""
# ssh proxy
enable_ssh_proxy_config: false
sshpiper_dest_dir: "/opt/sshpiper"
# rsyslog
enable_rsyslog_config: true
rsyslog_target: "*.* @master:514"
# ssl certs
enable_ssl_certs: false
ssl_cert_s3_bucket: ""
ssl_cert_key_location: "/etc/pki/tls/private"
ssl_cert_file_location: "/etc/pki/tls/certs"
ssl_cert_key: ""
ssl_cert_file: ""
ssl_cert_chain_file: ""
ssl_apache_config: ""
apache_service: "httpd"
# rewrite map
enable_rewrite_map: false
target_groups:
- {"name": "gpfs4", "host": "login001", "default": True }
- {"name": "gpfs5", "host": "login002", "default": False }
# account app
account_app_port: 8000
# fail2ban
enable_fail2ban: false
maxretry: 1
findtime: 600
bantime: 1200
fail2ban_white_list: "127.0.0.1/8"
# Node Exporter
enable_node_exporter: false
node_exporter_ver: "1.8.2"
node_exporter_filename: "node_exporter-{{ node_exporter_ver }}.linux-amd64"
node_exporter_user: node_exporter
node_exporter_group: node_exporter
node_exporter_port: 9100
---
# cheaha.node related
hostname_lookup_table:
- "172.20.0.24 cheaha-master02.cm.cluster cheaha-master02"
- "172.20.0.22 cheaha-master01.cm.cluster cheaha-master01"
- "172.20.0.25 master.cm.cluster master localmaster.cm.cluster localmaster ldapserver.cm.cluster ldapserver"
domain_search_list:
- cm.cluster
- rc.uab.edu
- ib.cluster
- drac.cluster
- eth.cluster
- ib-hdr.cluster
nameserver_list:
- 172.20.0.25
bright_openldap_path: "/cm/local/apps/openldap"
ldap_cert_path: "{{bright_openldap_path}}/etc/certs"
ldap_uri: "ldaps://ldapserver"
# proxy_config
target_groups:
- {"name": "gpfs5", "host": "login002", "default": False, "authorized_keys":"/gpfs5/data/user/home/$DOWNSTREAM_USER/.ssh/authorized_keys", "private_key":"/gpfs5/data/user/home/$DOWNSTREAM_USER/.ssh/id_ecdsa"}
- {"name": "gpfs4", "host": "login001", "default": True, "authorized_keys":"/gpfs4/data/user/home/$DOWNSTREAM_USER/.ssh/authorized_keys", "private_key":"/gpfs4/data/user/home/$DOWNSTREAM_USER/.ssh/id_ecdsa"}
......@@ -5,8 +5,5 @@
roles:
- { name: 'fix_centos_repo', tags: 'fix_centos_repo' }
- { name: 'install_packages', tags: 'install_packages' }
- { name: 'pam_slurm_adopt', tags: 'pam_slurm_adopt' }
- { name: 'install_nhc', tags: 'install_nhc'}
- name: Setup node for use as a virtual cheaha node
ansible.builtin.import_playbook: cheaha.yml
......@@ -6,6 +6,3 @@
- { name: 'fix_centos_repo', tags: 'fix_centos_repo' }
- { name: 'install_packages', tags: 'install_packages' }
- { name: 'install_zsh', tags: 'install_zsh' }
- name: Setup node for use as a virtual cheaha node
ansible.builtin.import_playbook: cheaha.yml
......@@ -4,15 +4,24 @@
path: /etc/hosts
line: "{{ item }}"
loop:
- "172.20.0.24 cheaha-master02.cm.cluster cheaha-master02"
- "172.20.0.22 cheaha-master01.cm.cluster cheaha-master01"
- "172.20.0.25 master.cm.cluster master localmaster.cm.cluster localmaster ldapserver.cm.cluster ldapserver"
"{{ hostname_lookup_table }}"
- name: Add proper DNS search to lookup other nodes on the cluster
ansible.builtin.lineinfile:
path: /etc/dhcp/dhclient.conf
insertbefore: BOF
line: 'append domain-name " cm.cluster rc.uab.edu ib.cluster drac.cluster eth.cluster ib-hdr.cluster";'
create: true
state: present
- name: Template resolv.conf
ansible.builtin.template:
src: resolv.conf.j2
dest: /etc/resolv.conf
owner: root
group: root
mode: 0644
backup: true
- name: Disable SELinux
ansible.posix.selinux:
......@@ -25,6 +34,7 @@
owner: root
group: root
mode: 0644
when: "'cm.repo' in yum_repo_files"
- name: Add ssh key for root access
ansible.posix.authorized_key:
......@@ -35,3 +45,7 @@
- name: Set timezone to America/Chicago
community.general.timezone:
name: America/Chicago
retries: 3
delay: 3
register: result
until: not result.failed
search {{ domain_search_list | join(' ') }}
{% for name_server in nameserver_list %}
nameserver {{ name_server }}
{% endfor %}
---
- name: Install fail2ban
ansible.builtin.package:
name: "{{ item }}"
state: present
loop:
- fail2ban
- fail2ban-firewalld
- name: Configure fail2ban
ansible.builtin.template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
backup: true
loop:
- { src: 'jail.local.j2', dest: '/etc/fail2ban/jail.local' }
- { src: 'sshpiperd_filter.local.j2', dest: '/etc/fail2ban/filter.d/sshpiperd.local' }
- { src: 'sshpiperd_jail.local.j2', dest: '/etc/fail2ban/jail.d/sshpiperd.local' }
- name: Activate the firewalld support for fail2ban
ansible.builtin.command:
cmd: mv /etc/fail2ban/jail.d/00-firewalld.conf /etc/fail2ban/jail.d/00-firewalld.local
- name: Configure firewalld to allow ssh and sshpiper traffic
ansible.posix.firewalld:
port: "{{ item }}"
zone: public
state: enabled
permanent: true
loop:
- 2222/tcp
- 22/tcp
- name: Enable and start firewalld
ansible.builtin.service:
name: firewalld
enabled: true
state: restarted
- name: Enable and start fail2ban
ansible.builtin.service:
name: fail2ban
enabled: true
state: restarted
[DEFAULT]
banaction = firewalld
bantime = {{ bantime }}
ignoreip = {{ fail2ban_white_list }}
[sshd]
enabled = true
# Refer to https://github.com/fail2ban/fail2ban/wiki/Developing-Regex-in-Fail2ban for developing regex using fail2ban
#
[INCLUDES]
before = common.conf
[DEFAULT]
_daemon = sshpiperd
__iso_datetime = "\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(?:[+-]\d{2}:\d{2}|Z)"
__pref = time=%(__iso_datetime)s level=(?:debug|error)
[Definition]
# Define the prefix regex for the log lines
prefregex = ^<F-MLFID>%(__prefix_line)s%(__pref)s</F-MLFID>\s+<F-CONTENT>.+</F-CONTENT>$
# Failregex to match the specific failure log lines (prefregex is automatically included)
failregex = ^msg="connection from .*failtoban: ip <HOST> too auth many failures"$
ignoreregex =
mode = normal
maxlines = 1
# This configuration will block the remote host after {{maxretry}} failed SSH login attempts.
[sshpiperd]
enabled = true
filter = sshpiperd
logpath = /var/log/messages
port = 22
maxretry = {{ maxretry }}
backend = auto
findtime = {{ findtime }}
---
- name: Download node_exporter binary
ansible.builtin.get_url:
url: "https://github.com/prometheus/node_exporter/releases/download/v{{ node_exporter_ver }}/{{ node_exporter_filename }}.tar.gz"
dest: "/tmp/{{ node_exporter_filename }}.tar.gz"
- name: Extract node_exporter
ansible.builtin.unarchive:
src: "/tmp/{{ node_exporter_filename }}.tar.gz"
dest: "/tmp"
remote_src: yes
- name: Create system group for user account {{ node_exporter_group }}
ansible.builtin.group:
name: "{{ node_exporter_group }}"
system: true
state: present
- name: Create system user account {{ node_exporter_user }}
ansible.builtin.user:
name: "{{ node_exporter_user }}"
comment: Prometheus node_exporter system account
group: "{{ node_exporter_group }}"
system: true
home: /var/lib/node_exporter
create_home: false
shell: /sbin/nologin
state: present
- name: Copy node_exporter binary
ansible.builtin.copy:
src: "/tmp/{{ node_exporter_filename }}/node_exporter"
dest: /usr/local/bin/node_exporter
remote_src: yes
owner: root
group: root
mode: 0755
- name: Copy systemd unit file
ansible.builtin.template:
src: node_exporter.service.j2
dest: /etc/systemd/system/node_exporter.service
owner: root
group: root
mode: '0644'
- name: Clean up /tmp
ansible.builtin.file:
path: "/tmp/{{ item }}"
state: absent
loop:
- "{{ node_exporter_filename }}.tar.gz"
- "{{ node_exporter_filename }}"
- name: Restart node_exporter service
ansible.builtin.systemd:
daemon_reload: yes
name: node_exporter
state: restarted
enabled: true
- name: Collect facts about system services
ansible.builtin.service_facts:
- name: Configure firewalld to allow prometheus
ansible.posix.firewalld:
port: "{{ node_exporter_port }}/tcp"
zone: public
state: enabled
permanent: true
when:
- "'firewalld.service' in ansible_facts.services"
- ansible_facts.services["firewalld.service"].state == "running"
- name: Enable and start firewalld
ansible.builtin.service:
name: firewalld
enabled: true
state: restarted
when:
- "'firewalld.service' in ansible_facts.services"
- ansible_facts.services["firewalld.service"].state == "running"
[Unit]
Description=Node Exporter
After=network.target
[Service]
User={{ node_exporter_user }}
Group={{ node_exporter_group }}
Type=simple
ExecStart=/usr/local/bin/node_exporter --web.listen-address=:{{ node_exporter_port }} --collector.filesystem.mount-points-exclude "^/(dev|proc|run/user/.+|run/credentials/.+|sys|var/lib/docker/.+)($|/)" --collector.filesystem.fs-types-exclude "^(autofs|binfmt_misc|bpf|cgroup|tmpfs|sunrpc|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$"
[Install]
WantedBy=multi-user.target
......@@ -25,7 +25,7 @@
- name: Copy ldap cert(s) into place
ansible.builtin.copy:
src: "{{ item.src }}"
dest: "/cm/local/apps/openldap/etc/certs/{{ item.src }}"
dest: "{{ ldap_cert_path }}/{{ item.src }}"
owner: ldap
group: ldap
mode: 0440
......@@ -33,10 +33,11 @@
- { src: ca.pem }
- { src: ldap.key }
- { src: ldap.pem }
when: ldap_uri | regex_search('^ldaps://')
- name: Copy ldap config into place
ansible.builtin.copy:
src: nslcd.conf
ansible.builtin.template:
src: nslcd.conf.j2
dest: /etc/nslcd.conf
owner: root
group: root
......@@ -46,5 +47,6 @@
ansible.builtin.service:
name: "{{ item }}"
enabled: yes
state: restarted
loop:
- nslcd
# This is the configuration file for the LDAP nameservice
# switch library's nslcd daemon. It configures the mapping
# between NSS names (see /etc/nsswitch.conf) and LDAP
# information in the directory.
# See the manual page nslcd.conf(5) for more information.
# The user and group nslcd should run as.
uid nslcd
gid ldap
# The uri pointing to the LDAP server to use for name lookups.
# Multiple entries may be specified. The address that is used
# here should be resolvable without using LDAP (obviously).
#uri ldap://127.0.0.1/
#uri ldaps://127.0.0.1/
#uri ldapi://%2fvar%2frun%2fldapi_sock/
# Note: %2f encodes the '/' used as directory separator
uri {{ ldap_uri }}
# The LDAP version to use (defaults to 3
# if supported by client library)
#ldap_version 3
# The distinguished name of the search base.
base dc=cm,dc=cluster
# The distinguished name to bind to the server with.
# Optional: default is to bind anonymously.
#binddn cn=proxyuser,dc=example,dc=com
# The credentials to bind with.
# Optional: default is no credentials.
# Note that if you set a bindpw you should check the permissions of this file.
#bindpw secret
# The distinguished name to perform password modifications by root by.
#rootpwmoddn cn=admin,dc=example,dc=com
# The default search scope.
#scope sub
#scope one
#scope base
# Customize certain database lookups.
#base group ou=Groups,dc=example,dc=com
#base passwd ou=People,dc=example,dc=com
#base shadow ou=People,dc=example,dc=com
#scope group onelevel
#scope hosts sub
# Bind/connect timelimit.
#bind_timelimit 30
# Search timelimit.
#timelimit 30
# Idle timelimit. nslcd will close connections if the
# server has not been contacted for the number of seconds.
idle_timelimit 240
# Use StartTLS without verifying the server certificate.
#ssl start_tls
#tls_reqcert never
{% if ldap_uri | regex_search('^ldaps://') %}
ssl on
tls_reqcert demand
# CA certificates for server certificate verification
#tls_cacertdir /etc/ssl/certs
tls_cacertfile /cm/local/apps/openldap/etc/certs/ca.pem
tls_cert /cm/local/apps/openldap/etc/certs/ldap.pem
tls_key /cm/local/apps/openldap/etc/certs/ldap.key
{% endif %}
# Seed the PRNG if /dev/urandom is not provided
#tls_randfile /var/run/egd-pool
# SSL cipher suite
# See man ciphers for syntax
#tls_ciphers TLSv1
# Client certificate and key
# Use these, if your server requires client authentication.
# Mappings for Services for UNIX 3.5
#filter passwd (objectClass=User)
#map passwd uid msSFU30Name
#map passwd userPassword msSFU30Password
#map passwd homeDirectory msSFU30HomeDirectory
#map passwd homeDirectory msSFUHomeDirectory
#filter shadow (objectClass=User)
#map shadow uid msSFU30Name
#map shadow userPassword msSFU30Password
#filter group (objectClass=Group)
#map group member msSFU30PosixMember
# Mappings for Services for UNIX 2.0
#filter passwd (objectClass=User)
#map passwd uid msSFUName
#map passwd userPassword msSFUPassword
#map passwd homeDirectory msSFUHomeDirectory
#map passwd gecos msSFUName
#filter shadow (objectClass=User)
#map shadow uid msSFUName
#map shadow userPassword msSFUPassword
#map shadow shadowLastChange pwdLastSet
#filter group (objectClass=Group)
#map group member posixMember
# Mappings for Active Directory
#pagesize 1000
#referrals off
#idle_timelimit 800
#filter passwd (&(objectClass=user)(!(objectClass=computer))(uidNumber=*)(unixHomeDirectory=*))
#map passwd uid sAMAccountName
#map passwd homeDirectory unixHomeDirectory
#map passwd gecos displayName
#filter shadow (&(objectClass=user)(!(objectClass=computer))(uidNumber=*)(unixHomeDirectory=*))
#map shadow uid sAMAccountName
#map shadow shadowLastChange pwdLastSet
#filter group (objectClass=group)
# Alternative mappings for Active Directory
# (replace the SIDs in the objectSid mappings with the value for your domain)
#pagesize 1000
#referrals off
#idle_timelimit 800
#filter passwd (&(objectClass=user)(objectClass=person)(!(objectClass=computer)))
#map passwd uid cn
#map passwd uidNumber objectSid:S-1-5-21-3623811015-3361044348-30300820
#map passwd gidNumber objectSid:S-1-5-21-3623811015-3361044348-30300820
#map passwd homeDirectory "/home/$cn"
#map passwd gecos displayName
#map passwd loginShell "/bin/bash"
#filter group (|(objectClass=group)(objectClass=person))
#map group gidNumber objectSid:S-1-5-21-3623811015-3361044348-30300820
# Mappings for AIX SecureWay
#filter passwd (objectClass=aixAccount)
#map passwd uid userName
#map passwd userPassword passwordChar
#map passwd uidNumber uid
#map passwd gidNumber gid
#filter group (objectClass=aixAccessGroup)
#map group cn groupName
#map group gidNumber gid
# This comment prevents repeated auto-migration of settings.
---
- name: Create base directories
ansible.builtin.file:
path: "{{ item.path }}"
state: directory
mode: "{{ item.mode }}"
loop:
- { path: /local, mode: '0777' }
- { path: /share, mode: '0755' }
- name: Create mountpoint dirs
ansible.builtin.file:
path: "{{ item.path }}"
state: directory
mode: "{{ item.mode }}"
loop:
"{{ autofs_mounts }}"
- name: Remove unused entry in master map
ansible.builtin.replace:
dest: /etc/auto.master
regexp: '{{ item.regexp }}'
replace: '{{ item.replace }}'
backup: true
loop:
- { regexp: '^(/misc)', replace: '#\1' }
- { regexp: '^(/net)', replace: '#\1' }
- { regexp: '^(\+auto.master)', replace: '#\1' }
- name: Add master map file
ansible.builtin.lineinfile:
path: "/etc/auto.master.d/gpfs.autofs"
line: "{{ item.mount_point }} /etc/auto.{{ item.map_name }}"
create: yes
loop:
"{{ autofs_mounts }}"
- name: Set up autofs map files
ansible.builtin.lineinfile:
path: "/etc/auto.{{ item.map_name }}"
line: "{{ item.key }} -{{ item.opts }} {{ item.src }}"
create: true
loop:
"{{ autofs_mounts }}"
- name: Create symbolic links
ansible.builtin.file:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
owner: root
group: root
force: yes
state: link
loop:
- { src: /data/rc/apps, dest: /share/apps }
- name: Enable and start autofs service
ansible.builtin.service:
name: autofs
enabled: true
state: restarted
---
- name: Create base directories
ansible.builtin.file:
path: "{{ item.path }}"
state: directory
mode: "{{ item.mode }}"
loop:
"{{ mount_points }}"
- name: Mount the directories
ansible.posix.mount:
src: "{{ item.src }}"
path: "{{ item.path }}"
opts: "{{ item.opts }}"
state: mounted
fstype: nfs
loop:
"{{ mount_points }}"
---
- name: Create base directories
ansible.builtin.file:
path: "{{ item.dir }}"
state: directory
mode: "{{ item.mode }}"
loop:
- { dir: /local, mode: '0777' }
- { dir: /scratch, mode: '0755' }
- { dir: /share, mode: '0755' }
- { dir: /data/rc/apps, mode: '0755' } # this is only required for the symlink to be happy
- { dir: /data/user, mode: '0755' }
- { dir: /data/project, mode: '0755' }
- name: nfs_mounts using fstab
include_tasks: fstab.yml
when: use_fstab
- name: Remove unused entry in master map
ansible.builtin.replace:
dest: /etc/auto.master
regexp: '{{ item.regexp }}'
replace: '{{ item.replace }}'
backup: true
loop:
- { regexp: '^(/misc)', replace: '#\1' }
- { regexp: '^(/net)', replace: '#\1' }
- { regexp: '^(\+auto.master)', replace: '#\1' }
- name: Add master map file
ansible.builtin.lineinfile:
path: "/etc/auto.master.d/gpfs.autofs"
line: "{{ item.mount_point }} /etc/auto.{{ item.map_name }}"
create: yes
loop:
- { mount_point: "/cm/shared", map_name: "cm-share" }
- { mount_point: "/data/project", map_name: "data-project" }
- { mount_point: "/data/user", map_name: "data-user" }
- { mount_point: "/data/rc/apps", map_name: "data-rc-apps" }
- { mount_point: "/-", map_name: "scratch" }
- { mount_point: "/home", map_name: "home" }
- name: Set up autofs map files
ansible.builtin.lineinfile:
path: "/etc/auto.{{ item.map_name }}"
line: "{{ item.key }} -{{ item.opts }} {{ item.src }}"
create: true
loop:
- { map_name: "cm-share", key: "*", src: "gpfs.rc.uab.edu:/data/cm/shared-8.2/&", opts: "fstype=nfs,vers=3,_netdev,defaults" }
- { map_name: "data-project", key: "*", src: "gpfs.rc.uab.edu:/data/project/&", opts: "fstype=nfs,vers=3,_netdev,defaults" }
- { map_name: "data-user", key: "*", src: "gpfs.rc.uab.edu:/data/user/&", opts: "fstype=nfs,vers=3,_netdev,local_lock=posix,defaults" }
- { map_name: "data-rc-apps", key: "*", src: "gpfs.rc.uab.edu:/data/rc/apps/&", opts: "fstype=nfs,vers=3,_netdev,defaults" }
- { map_name: "scratch", key: "/scratch", src: "gpfs.rc.uab.edu:/scratch", opts: "fstype=nfs,vers=3,_netdev,local_lock=posix,defaults" }
- { map_name: "home", key: "*", src: ":/data/user/home/&", opts: 'fstype=bind' }
- name: Create symbolic links
ansible.builtin.file:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
owner: root
group: root
force: yes
state: link
loop:
- { src: /data/rc/apps, dest: /share/apps }
- name: Enable autofs service
ansible.builtin.service:
name: autofs
enabled: true
- name: nfs_mounts using autofs
include_tasks: autofs.yml
when: use_autofs