Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • dwheel7/hpc-factory
  • rc/hpc-factory
  • louistw/hpc-factory
  • jpr/hpc-factory
  • krish94/hpc-factory
  • atlurie/hpc-factory
6 results
Show changes
Commits on Source (96)
Showing
with 401 additions and 82 deletions
...@@ -85,7 +85,7 @@ workflow: ...@@ -85,7 +85,7 @@ workflow:
if [ $CI_PIPELINE_SOURCE == 'merge_request_event' ]; then if [ $CI_PIPELINE_SOURCE == 'merge_request_event' ]; then
export PKR_VAR_image_name="${BUILD_TARGET}-PR-${CI_MERGE_REQUEST_IID}" export PKR_VAR_image_name="${BUILD_TARGET}-PR-${CI_MERGE_REQUEST_IID}"
elif [ $CI_PIPELINE_SOURCE == 'schedule' ]; then elif [ $CI_PIPELINE_SOURCE == 'schedule' ]; then
export PKR_VAR_image_name="${BUILD_TARGET}-${BUILD_DATE}" export PKR_VAR_image_name="${BUILD_TARGET}-${BUILD_TAG:-${BUILD_DATE}}"
fi fi
# Ansible var overrides # Ansible var overrides
- | - |
...@@ -113,7 +113,7 @@ workflow: ...@@ -113,7 +113,7 @@ workflow:
build_http_proxy_image: build_http_proxy_image:
stage: build stage: build
environment: environment:
name: $ENV name: build
tags: tags:
- build - build
variables: variables:
...@@ -126,7 +126,7 @@ build_http_proxy_image: ...@@ -126,7 +126,7 @@ build_http_proxy_image:
build_ssh_proxy_image: build_ssh_proxy_image:
stage: build stage: build
environment: environment:
name: $ENV name: build
tags: tags:
- build - build
variables: variables:
...@@ -148,7 +148,7 @@ build_ssh_proxy_image: ...@@ -148,7 +148,7 @@ build_ssh_proxy_image:
if [ $CI_PIPELINE_SOURCE == 'merge_request_event' ]; then if [ $CI_PIPELINE_SOURCE == 'merge_request_event' ]; then
export PKR_VAR_image_name="${BUILD_TARGET}-PR-${CI_MERGE_REQUEST_IID}" export PKR_VAR_image_name="${BUILD_TARGET}-PR-${CI_MERGE_REQUEST_IID}"
elif [ $CI_PIPELINE_SOURCE == 'schedule' ]; then elif [ $CI_PIPELINE_SOURCE == 'schedule' ]; then
export PKR_VAR_image_name="${BUILD_TARGET}-${BUILD_DATE}" export PKR_VAR_image_name="${BUILD_TARGET}-${BUILD_TAG:-${BUILD_DATE}}"
fi fi
# packer commands # packer commands
- packer init openstack-login - packer init openstack-login
...@@ -165,7 +165,7 @@ build_ssh_proxy_image: ...@@ -165,7 +165,7 @@ build_ssh_proxy_image:
build_login_image: build_login_image:
stage: build stage: build
environment: environment:
name: $ENV name: build
tags: tags:
- build - build
<<: *build_login_image_template <<: *build_login_image_template
...@@ -173,6 +173,46 @@ build_login_image: ...@@ -173,6 +173,46 @@ build_login_image:
- if: $PIPELINE_TARGET == "build" && $BUILD_TARGET == "login" - if: $PIPELINE_TARGET == "build" && $BUILD_TARGET == "login"
when: always when: always
build_ood_image:
stage: build
environment:
name: build
tags:
- build
script:
- *update_ansible_repo
- *get_ansible_files
# packer vars for job env
- export PKR_VAR_flavor="${OOD_BUILD_FLAVOR:-$PKR_VAR_flavor}"
- export PKR_VAR_build_instance_name="${BUILD_TARGET}-${EXT_REPO_HEAD}"
- export PKR_VAR_image_date_suffix=false
- export PKR_VAR_image_name="${BUILD_TARGET}-${BUILD_TAG:-${BUILD_DATE}}"
- |
if [ $ENV = 'knightly' ] || [ $ENV = 'prod' ]; then
curl --header "PRIVATE-TOKEN: ${ANSIBLE_VAR_TOKEN}" \
"${CI_API_V4_URL}/projects/2836/repository/files/$ENV/raw?ref=main" \
-o CRI_XCBC/group_vars/$ENV
sed -i -E "s/(lts_access_key: ).*/\1\"${AWS_ACCESS_KEY_ID}\"/" CRI_XCBC/group_vars/$ENV
sed -i -E "s/(lts_secret_key: ).*/\1\"${AWS_SECRET_ACCESS_KEY}\"/" CRI_XCBC/group_vars/$ENV
sed -i -E "s/(user_register_app_key: ).*/\1\"${SELF_REG_APP_KEY}\"/" CRI_XCBC/group_vars/$ENV
sed -i -E "s/(celery_user_password: ).*/\1\"${CELERY_PASSWD}\"/" CRI_XCBC/group_vars/$ENV
sed -i -E "s|(ssh_pub_key: ).*|\1\"{{ lookup('file', '${SSH_PUB_KEY}') }}\"|" CRI_XCBC/group_vars/$ENV
fi
# packer commands
- packer init openstack-ood
- packer validate openstack-ood
- packer build -machine-readable openstack-ood | tee ood_build.log
- export BUILT_OOD_IMAGE_ID=$(grep 'Image:' ood_build.log | awk '{print $4}')
- echo BUILT_OOD_IMAGE_ID=${BUILT_OOD_IMAGE_ID} | tee -a $CI_PROJECT_DIR/image.env
# set image properties with repo state
- openstack image set --property EXT_PR_SRC_REPO=${EXT_PR_SRC_REPO} --property EXT_PR_SRC_BRANCH_SHA=${EXT_PR_SRC_BRANCH_SHA} --property EXT_PR_TARGET_REPO=${EXT_PR_TARGET_REPO} --property EXT_PR_TARGET_BRANCH_SHA=${EXT_PR_TARGET_BRANCH_SHA} --property PACKER_IMAGE_HEAD=${CI_COMMIT_SHORT_SHA} ${BUILT_OOD_IMAGE_ID}
artifacts:
reports:
dotenv: image.env
rules:
- if: $PIPELINE_TARGET == "build" && $BUILD_TARGET == "ood"
when: always
deploy_http_proxy_node: deploy_http_proxy_node:
stage: deploy stage: deploy
environment: environment:
...@@ -206,10 +246,12 @@ deploy_http_proxy_node: ...@@ -206,10 +246,12 @@ deploy_http_proxy_node:
export cmd="openstack server create" export cmd="openstack server create"
cmd+=" -c id -f value --image $HTTP_PROXY_IMAGE_ID" cmd+=" -c id -f value --image $HTTP_PROXY_IMAGE_ID"
cmd+=" --flavor $INSTANCE_FLAVOR" cmd+=" --flavor $INSTANCE_FLAVOR"
cmd+=" --network $PROXY_NETWORK" for security_group in ${SECURITY_GROUP_LIST[@]};
cmd+=" --security-group webserver_sec_group" do
cmd+=" --security-group allow-ssh" cmd+=" --security-group $security_group"
done
cmd+=" --user-data user_data.txt" cmd+=" --user-data user_data.txt"
if [ -n "$PROXY_NETWORK" ];then cmd+=" --network $PROXY_NETWORK"; fi
if [ -n "$HTTP_PROXY_PORT" ];then cmd+=" --port $HTTP_PROXY_PORT"; fi if [ -n "$HTTP_PROXY_PORT" ];then cmd+=" --port $HTTP_PROXY_PORT"; fi
cmd+=" --wait $HTTP_PROXY_INSTANCE_NAME" cmd+=" --wait $HTTP_PROXY_INSTANCE_NAME"
- export HTTP_PROXY_INSTANCE_ID=$(bash -c "$cmd") - export HTTP_PROXY_INSTANCE_ID=$(bash -c "$cmd")
...@@ -257,9 +299,12 @@ deploy_ssh_proxy_node: ...@@ -257,9 +299,12 @@ deploy_ssh_proxy_node:
export cmd="openstack server create" export cmd="openstack server create"
cmd+=" -c id -f value --image $SSH_PROXY_IMAGE_ID" cmd+=" -c id -f value --image $SSH_PROXY_IMAGE_ID"
cmd+=" --flavor $INSTANCE_FLAVOR" cmd+=" --flavor $INSTANCE_FLAVOR"
cmd+=" --network $PROXY_NETWORK" for security_group in ${SECURITY_GROUP_LIST[@]};
cmd+=" --security-group allow-ssh" do
cmd+=" --security-group $security_group"
done
cmd+=" --user-data user_data.txt" cmd+=" --user-data user_data.txt"
if [ -n "$PROXY_NETWORK" ];then cmd+=" --network $PROXY_NETWORK"; fi
if [ -n "$SSH_PROXY_PORT" ];then cmd+=" --port $SSH_PROXY_PORT"; fi if [ -n "$SSH_PROXY_PORT" ];then cmd+=" --port $SSH_PROXY_PORT"; fi
cmd+=" --wait $SSH_PROXY_INSTANCE_NAME" cmd+=" --wait $SSH_PROXY_INSTANCE_NAME"
- export SSH_PROXY_INSTANCE_ID=$(bash -c "$cmd") - export SSH_PROXY_INSTANCE_ID=$(bash -c "$cmd")
...@@ -300,6 +345,7 @@ deploy_login_node: ...@@ -300,6 +345,7 @@ deploy_login_node:
[$ENV] [$ENV]
127.0.0.1 127.0.0.1
EEOF EEOF
s3cmd get --force -r --access_key=$AWS_ACCESS_KEY_ID --secret_key=$AWS_SECRET_ACCESS_KEY --host=$AWS_HOST --host-bucket=$AWS_HOST s3://cheaha-cloud-ansible-files/ /tmp/${CI_PROJECT_NAME}/ansible/files/
ansible-playbook -c local -i ansible/hosts --extra-vars="$EXTRA_VARS" ansible/cluster.yml | tee -a /tmp/ansible.log ansible-playbook -c local -i ansible/hosts --extra-vars="$EXTRA_VARS" ansible/cluster.yml | tee -a /tmp/ansible.log
rm -rf /tmp/${CI_PROJECT_NAME} rm -rf /tmp/${CI_PROJECT_NAME}
EOF EOF
...@@ -307,9 +353,12 @@ deploy_login_node: ...@@ -307,9 +353,12 @@ deploy_login_node:
export cmd="openstack server create" export cmd="openstack server create"
cmd+=" -c id -f value --image $LOGIN_IMAGE_ID" cmd+=" -c id -f value --image $LOGIN_IMAGE_ID"
cmd+=" --flavor $INSTANCE_FLAVOR" cmd+=" --flavor $INSTANCE_FLAVOR"
cmd+=" --network $INSTANCE_NETWORK" for security_group in ${SECURITY_GROUP_LIST[@]};
cmd+=" --security-group allow-ssh" do
cmd+=" --security-group $security_group"
done
cmd+=" --user-data user_data.txt" cmd+=" --user-data user_data.txt"
if [ -n "$INSTANCE_NETWORK" ];then cmd+=" --network $INSTANCE_NETWORK"; fi
if [ -n "$LOGIN_PORT" ];then cmd+=" --port $LOGIN_PORT"; fi if [ -n "$LOGIN_PORT" ];then cmd+=" --port $LOGIN_PORT"; fi
cmd+=" --wait $LOGIN_INSTANCE_NAME" cmd+=" --wait $LOGIN_INSTANCE_NAME"
- export LOGIN_INSTANCE_ID=$(bash -c "$cmd") - export LOGIN_INSTANCE_ID=$(bash -c "$cmd")
...@@ -324,3 +373,56 @@ deploy_login_node: ...@@ -324,3 +373,56 @@ deploy_login_node:
- if: $PIPELINE_TARGET == "deploy" && $LOGIN_IMAGE_ID - if: $PIPELINE_TARGET == "deploy" && $LOGIN_IMAGE_ID
when: always when: always
deploy_ood_node:
stage: deploy
environment:
name: $ENV
tags:
- build
script:
- openstack image set --accept $OOD_IMAGE_ID || true
- FAILED=false
- |
cat > user_data.txt <<EOF
#!/bin/bash
cat >> /etc/NetworkManager/conf.d/90-dns-none.conf<<EEOF
[main]
dns=none
EEOF
systemctl reload NetworkManager
echo "$DEV_KEY" >> /root/.ssh/authorized_keys
ip route replace default via ${DEFAULT_GATEWAY_IP} dev eth0
git clone ${CI_REPOSITORY_URL} /tmp/${CI_PROJECT_NAME}
cd /tmp/${CI_PROJECT_NAME}
git checkout ${CI_COMMIT_REF_NAME}
cat >> ansible/hosts<<EEOF
[$ENV]
127.0.0.1
EEOF
s3cmd get --force -r --access_key=$AWS_ACCESS_KEY_ID --secret_key=$AWS_SECRET_ACCESS_KEY --host=$AWS_HOST --host-bucket=$AWS_HOST s3://cheaha-cloud-ansible-files/ /tmp/${CI_PROJECT_NAME}/ansible/files/
ansible-playbook -c local -i ansible/hosts --extra-vars="$EXTRA_VARS" ansible/cluster.yml | tee -a /tmp/ansible.log
rm -rf /tmp/${CI_PROJECT_NAME}
EOF
- |
export cmd="openstack server create"
cmd+=" -c id -f value --image $OOD_IMAGE_ID"
cmd+=" --flavor $INSTANCE_FLAVOR"
for security_group in ${SECURITY_GROUP_LIST[@]};
do
cmd+=" --security-group $security_group"
done
cmd+=" --user-data user_data.txt"
if [ -n "$INSTANCE_NETWORK" ];then cmd+=" --network $INSTANCE_NETWORK"; fi
if [ -n "$OOD_PORT" ];then cmd+=" --port $OOD_PORT"; fi
cmd+=" --wait $OOD_INSTANCE_NAME"
- export OOD_INSTANCE_ID=$(bash -c "$cmd")
- |
# Associate the floating IP(s) with the SSH Proxy instance
for OOD_FLOATING_IP in ${OOD_FLOATING_IP_LIST[@]};
do
echo "Associating FLOATING_IP $OOD_FLOATING_IP with OOD_INSTANCE_ID $OOD_INSTANCE_ID"
openstack server add floating ip $OOD_INSTANCE_ID $OOD_FLOATING_IP
done
rules:
- if: $PIPELINE_TARGET == "deploy" && $OOD_IMAGE_ID
when: always
...@@ -11,3 +11,6 @@ ...@@ -11,3 +11,6 @@
- { name: 'ssh_proxy_config', tags: 'ssh_proxy_config', when: enable_ssh_proxy_config } - { name: 'ssh_proxy_config', tags: 'ssh_proxy_config', when: enable_ssh_proxy_config }
- { name: 'ssl_cert', tags: 'ssl_cert', when: enable_ssl_certs } - { name: 'ssl_cert', tags: 'ssl_cert', when: enable_ssl_certs }
- { name: 'rsyslog_config', tags: 'rsyslog_config', when: enable_rsyslog_config } - { name: 'rsyslog_config', tags: 'rsyslog_config', when: enable_rsyslog_config }
- { name: 'rewrite_map', tags: 'rewrite_map', when: enable_rewrite_map }
- { name: 'fail2ban', tags: 'fail2ban', when: enable_fail2ban }
- { name: 'install_node_exporter', tags: 'install_node_exporter', when: enable_node_exporter }
...@@ -30,9 +30,13 @@ ...@@ -30,9 +30,13 @@
# nfs_mounts related # nfs_mounts related
enable_nfs_mounts: true enable_nfs_mounts: true
use_autofs: false use_autofs: false
use_fstab: false
mount_points: mount_points:
- { "src": "master:/gpfs4", "path": "/gpfs4", "opts": "ro,sync,hard", "mode": "0755" } - { "src": "master:/gpfs4", "path": "/gpfs4", "opts": "ro,sync,hard", "mode": "0755" }
- { "src": "master:/gpfs5", "path": "/gpfs5", "opts": "ro,sync,hard", "mode": "0755" } - { "src": "master:/gpfs5", "path": "/gpfs5", "opts": "ro,sync,hard", "mode": "0755" }
autofs_mounts:
- { "src": "master:/gpfs4/&", "path": "/gpfs4", "opts": "fstype=nfs,vers=3,_netdev,default", "mode": '0755', "mount_point": "/gpfs4", "map_name": "gpfs4", key: "*" }
- { "src": "master:/gpfs5/&", "path": "/gpfs5", "opts": "fstype=nfs,vers=3,_netdev,default", "mode": '0755', "mount_point": "/gpfs5", "map_name": "gpfs5", key: "*" }
#SSH Host Keys #SSH Host Keys
S3_ENDPOINT: "" S3_ENDPOINT: ""
...@@ -46,10 +50,9 @@ ...@@ -46,10 +50,9 @@
# ssh proxy # ssh proxy
enable_ssh_proxy_config: false enable_ssh_proxy_config: false
sshpiper_dest_dir: "/opt/sshpiper" sshpiper_dest_dir: "/opt/sshpiper"
fail2ban_cidr_list: "127.0.0.1/8"
# rsyslog # rsyslog
enable_rsyslog_config: false enable_rsyslog_config: true
rsyslog_target: "*.* @master:514" rsyslog_target: "*.* @master:514"
# ssl certs # ssl certs
...@@ -62,3 +65,27 @@ ...@@ -62,3 +65,27 @@
ssl_cert_chain_file: "" ssl_cert_chain_file: ""
ssl_apache_config: "" ssl_apache_config: ""
apache_service: "httpd" apache_service: "httpd"
# rewrite map
enable_rewrite_map: false
target_groups:
- {"name": "gpfs4", "host": "login001", "default": True }
- {"name": "gpfs5", "host": "login002", "default": False }
# account app
account_app_port: 8000
# fail2ban
enable_fail2ban: false
maxretry: 1
findtime: 600
bantime: 1200
fail2ban_white_list: "127.0.0.1/8"
# Node Exporter
enable_node_exporter: false
node_exporter_ver: "1.8.2"
node_exporter_filename: "node_exporter-{{ node_exporter_ver }}.linux-amd64"
node_exporter_user: node_exporter
node_exporter_group: node_exporter
node_exporter_port: 9100
...@@ -5,6 +5,5 @@ ...@@ -5,6 +5,5 @@
roles: roles:
- { name: 'fix_centos_repo', tags: 'fix_centos_repo' } - { name: 'fix_centos_repo', tags: 'fix_centos_repo' }
- { name: 'install_packages', tags: 'install_packages' } - { name: 'install_packages', tags: 'install_packages' }
- { name: 'pam_slurm_adopt', tags: 'pam_slurm_adopt' }
- { name: 'install_nhc', tags: 'install_nhc'} - { name: 'install_nhc', tags: 'install_nhc'}
...@@ -6,6 +6,3 @@ ...@@ -6,6 +6,3 @@
- { name: 'fix_centos_repo', tags: 'fix_centos_repo' } - { name: 'fix_centos_repo', tags: 'fix_centos_repo' }
- { name: 'install_packages', tags: 'install_packages' } - { name: 'install_packages', tags: 'install_packages' }
- { name: 'install_zsh', tags: 'install_zsh' } - { name: 'install_zsh', tags: 'install_zsh' }
- name: Setup node for use as a virtual cheaha node
ansible.builtin.import_playbook: cheaha.yml
---
- name: Install fail2ban
ansible.builtin.package:
name: "{{ item }}"
state: present
loop:
- fail2ban
- fail2ban-firewalld
- name: Configure fail2ban
ansible.builtin.template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
backup: true
loop:
- { src: 'jail.local.j2', dest: '/etc/fail2ban/jail.local' }
- { src: 'sshpiperd_filter.local.j2', dest: '/etc/fail2ban/filter.d/sshpiperd.local' }
- { src: 'sshpiperd_jail.local.j2', dest: '/etc/fail2ban/jail.d/sshpiperd.local' }
- name: Activate the firewalld support for fail2ban
ansible.builtin.command:
cmd: mv /etc/fail2ban/jail.d/00-firewalld.conf /etc/fail2ban/jail.d/00-firewalld.local
- name: Configure firewalld to allow ssh and sshpiper traffic
ansible.posix.firewalld:
port: "{{ item }}"
zone: public
state: enabled
permanent: true
loop:
- 2222/tcp
- 22/tcp
- name: Enable and start firewalld
ansible.builtin.service:
name: firewalld
enabled: true
state: restarted
- name: Enable and start fail2ban
ansible.builtin.service:
name: fail2ban
enabled: true
state: restarted
[DEFAULT] [DEFAULT]
banaction = firewalld banaction = firewalld
bantime = 1200 bantime = {{ bantime }}
ignoreip = {{ fail2ban_cidr_list }} ignoreip = {{ fail2ban_white_list }}
[sshd] [sshd]
enabled = true enabled = true
# Refer to https://github.com/fail2ban/fail2ban/wiki/Developing-Regex-in-Fail2ban for developing regex using fail2ban
#
[INCLUDES]
before = common.conf
[DEFAULT]
_daemon = sshpiperd
__iso_datetime = "\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(?:[+-]\d{2}:\d{2}|Z)"
__pref = time=%(__iso_datetime)s level=(?:debug|error)
[Definition]
# Define the prefix regex for the log lines
prefregex = ^<F-MLFID>%(__prefix_line)s%(__pref)s</F-MLFID>\s+<F-CONTENT>.+</F-CONTENT>$
# Failregex to match the specific failure log lines (prefregex is automatically included)
failregex = ^msg="connection from .*failtoban: ip <HOST> too auth many failures"$
ignoreregex =
mode = normal
maxlines = 1
# This configuration will block the remote host after {{maxretry}} failed SSH login attempts.
[sshpiperd]
enabled = true
filter = sshpiperd
logpath = /var/log/messages
port = 22
maxretry = {{ maxretry }}
backend = auto
findtime = {{ findtime }}
---
- name: Download node_exporter binary
ansible.builtin.get_url:
url: "https://github.com/prometheus/node_exporter/releases/download/v{{ node_exporter_ver }}/{{ node_exporter_filename }}.tar.gz"
dest: "/tmp/{{ node_exporter_filename }}.tar.gz"
- name: Extract node_exporter
ansible.builtin.unarchive:
src: "/tmp/{{ node_exporter_filename }}.tar.gz"
dest: "/tmp"
remote_src: yes
- name: Create system group for user account {{ node_exporter_group }}
ansible.builtin.group:
name: "{{ node_exporter_group }}"
system: true
state: present
- name: Create system user account {{ node_exporter_user }}
ansible.builtin.user:
name: "{{ node_exporter_user }}"
comment: Prometheus node_exporter system account
group: "{{ node_exporter_group }}"
system: true
home: /var/lib/node_exporter
create_home: false
shell: /sbin/nologin
state: present
- name: Copy node_exporter binary
ansible.builtin.copy:
src: "/tmp/{{ node_exporter_filename }}/node_exporter"
dest: /usr/local/bin/node_exporter
remote_src: yes
owner: root
group: root
mode: 0755
- name: Copy systemd unit file
ansible.builtin.template:
src: node_exporter.service.j2
dest: /etc/systemd/system/node_exporter.service
owner: root
group: root
mode: '0644'
- name: Clean up /tmp
ansible.builtin.file:
path: "/tmp/{{ item }}"
state: absent
loop:
- "{{ node_exporter_filename }}.tar.gz"
- "{{ node_exporter_filename }}"
- name: Restart node_exporter service
ansible.builtin.systemd:
daemon_reload: yes
name: node_exporter
state: restarted
enabled: true
- name: Collect facts about system services
ansible.builtin.service_facts:
- name: Configure firewalld to allow prometheus
ansible.posix.firewalld:
port: "{{ node_exporter_port }}/tcp"
zone: public
state: enabled
permanent: true
when:
- "'firewalld.service' in ansible_facts.services"
- ansible_facts.services["firewalld.service"].state == "running"
- name: Enable and start firewalld
ansible.builtin.service:
name: firewalld
enabled: true
state: restarted
when:
- "'firewalld.service' in ansible_facts.services"
- ansible_facts.services["firewalld.service"].state == "running"
[Unit]
Description=Node Exporter
After=network.target
[Service]
User={{ node_exporter_user }}
Group={{ node_exporter_group }}
Type=simple
ExecStart=/usr/local/bin/node_exporter --web.listen-address=:{{ node_exporter_port }} --collector.filesystem.mount-points-exclude "^/(dev|proc|run/user/.+|run/credentials/.+|sys|var/lib/docker/.+)($|/)" --collector.filesystem.fs-types-exclude "^(autofs|binfmt_misc|bpf|cgroup|tmpfs|sunrpc|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$"
[Install]
WantedBy=multi-user.target
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
- nss-pam-ldapd - nss-pam-ldapd
- openldap - openldap
- openldap-clients - openldap-clients
- openldap-servers
- sssd-ldap - sssd-ldap
- name: Update nsswitch.conf to look for ldap - name: Update nsswitch.conf to look for ldap
......
--- ---
- name: Create base directories - name: Create base directories
ansible.builtin.file: ansible.builtin.file:
path: "{{ item.dir }}" path: "{{ item.path }}"
state: directory state: directory
mode: "{{ item.mode }}" mode: "{{ item.mode }}"
loop: loop:
- { dir: /local, mode: '0777' } - { path: /local, mode: '0777' }
- { dir: /scratch, mode: '0755' } - { path: /share, mode: '0755' }
- { dir: /share, mode: '0755' }
- { dir: /data/rc/apps, mode: '0755' } # this is only required for the symlink to be happy - name: Create mountpoint dirs
- { dir: /data/user, mode: '0755' } ansible.builtin.file:
- { dir: /data/project, mode: '0755' } path: "{{ item.path }}"
state: directory
mode: "{{ item.mode }}"
loop:
"{{ autofs_mounts }}"
- name: Remove unused entry in master map - name: Remove unused entry in master map
ansible.builtin.replace: ansible.builtin.replace:
...@@ -29,12 +33,7 @@ ...@@ -29,12 +33,7 @@
line: "{{ item.mount_point }} /etc/auto.{{ item.map_name }}" line: "{{ item.mount_point }} /etc/auto.{{ item.map_name }}"
create: yes create: yes
loop: loop:
- { mount_point: "/cm/shared", map_name: "cm-share" } "{{ autofs_mounts }}"
- { mount_point: "/data/project", map_name: "data-project" }
- { mount_point: "/data/user", map_name: "data-user" }
- { mount_point: "/data/rc/apps", map_name: "data-rc-apps" }
- { mount_point: "/-", map_name: "scratch" }
- { mount_point: "/home", map_name: "home" }
- name: Set up autofs map files - name: Set up autofs map files
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
...@@ -42,12 +41,7 @@ ...@@ -42,12 +41,7 @@
line: "{{ item.key }} -{{ item.opts }} {{ item.src }}" line: "{{ item.key }} -{{ item.opts }} {{ item.src }}"
create: true create: true
loop: loop:
- { map_name: "cm-share", key: "*", src: "gpfs.rc.uab.edu:/data/cm/shared-8.2/&", opts: "fstype=nfs,vers=3,_netdev,defaults" } "{{ autofs_mounts }}"
- { map_name: "data-project", key: "*", src: "gpfs.rc.uab.edu:/data/project/&", opts: "fstype=nfs,vers=3,_netdev,defaults" }
- { map_name: "data-user", key: "*", src: "gpfs.rc.uab.edu:/data/user/&", opts: "fstype=nfs,vers=3,_netdev,local_lock=posix,defaults" }
- { map_name: "data-rc-apps", key: "*", src: "gpfs.rc.uab.edu:/data/rc/apps/&", opts: "fstype=nfs,vers=3,_netdev,defaults" }
- { map_name: "scratch", key: "/scratch", src: "gpfs.rc.uab.edu:/scratch", opts: "fstype=nfs,vers=3,_netdev,local_lock=posix,defaults" }
- { map_name: "home", key: "*", src: ":/data/user/home/&", opts: 'fstype=bind' }
- name: Create symbolic links - name: Create symbolic links
ansible.builtin.file: ansible.builtin.file:
...@@ -60,7 +54,8 @@ ...@@ -60,7 +54,8 @@
loop: loop:
- { src: /data/rc/apps, dest: /share/apps } - { src: /data/rc/apps, dest: /share/apps }
- name: Enable autofs service - name: Enable and start autofs service
ansible.builtin.service: ansible.builtin.service:
name: autofs name: autofs
enabled: true enabled: true
state: restarted
--- ---
- name: nfs_mounts using fstab - name: nfs_mounts using fstab
include_tasks: fstab.yml include_tasks: fstab.yml
when: not use_autofs when: use_fstab
- name: nfs_mounts using autofs - name: nfs_mounts using autofs
include_tasks: autofs.yml include_tasks: autofs.yml
......
---
- name: Add apache rewritemap script config
ansible.builtin.template:
src: rewrite_map_config_py.j2
mode: '600'
owner: root
group: root
dest: /var/www/rewrite_map_config.py
- name: Replace OOD rewrite condition regex in Apache configuration
ansible.builtin.replace:
path: /etc/httpd/conf.d/front-end.conf
regexp: "RewriteCond %{HTTP:REMOTE_USER} '\\^\\(\\.\\+\\)\\$'"
replace: |
RewriteCond %{HTTP:REMOTE_USER} '([a-zA-Z0-9_.+-]+)@uab.edu$' [OR]
RewriteCond %{HTTP:REMOTE_USER} 'urn:mace:incommon:uab.edu!https://uabgrid.uab.edu/shibboleth!(.+)$'
- name: Replace account app port in Apache configuration
ansible.builtin.replace:
path: /etc/httpd/conf.d/front-end.conf
regexp: "account-app:8000"
replace: "account-app:{{ account_app_port }}"
- name: Restart httpd services
ansible.builtin.service:
name: httpd
enabled: true
state: restarted
DEBUG = False
target_groups = {
{% for group in target_groups %}
"{{ group.name }}": "{{ group.host }}",
{% endfor %}
}
{% for group in target_groups %}
{% if group.default %}
default_hostname = "{{ group.host }}"
{% endif %}
{% endfor %}
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
state: present state: present
uid: 450 uid: 450
group: slurm group: slurm
create_home: false
- name: Copy munge key - name: Copy munge key
ansible.builtin.copy: ansible.builtin.copy:
...@@ -28,6 +29,19 @@ ...@@ -28,6 +29,19 @@
group: root group: root
mode: 0400 mode: 0400
- name: Create symbolic links for Slurm config files
ansible.builtin.file:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
state: link
force: yes # Force the creation of the symlinks even if source files do not exist yet
loop:
- { src: "/cm/shared/apps/slurm/var/etc/cgroup.conf", dest: "/etc/slurm/cgroup.conf" }
- { src: "/cm/shared/apps/slurm/var/etc/gres.conf", dest: "/etc/slurm/gres.conf" }
- { src: "/cm/shared/apps/slurm/var/etc/slurm.conf", dest: "/etc/slurm/slurm.conf" }
- { src: "/cm/shared/apps/slurm/var/etc/slurmdbd.conf", dest: "/etc/slurm/slurmdbd.conf" }
- { src: "/cm/shared/apps/slurm/var/etc/job_submit.lua", dest: "/etc/slurm/job_submit.lua" }
- name: Enable services - name: Enable services
ansible.builtin.service: ansible.builtin.service:
name: "{{ item }}" name: "{{ item }}"
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
aws_secret_key: "{{ LTS_SECRET_KEY }}" aws_secret_key: "{{ LTS_SECRET_KEY }}"
vars: vars:
ansible_python_interpreter: /usr/bin/python3 ansible_python_interpreter: /usr/bin/python3
when: SSH_HOST_KEYS_S3_BUCKET | length > 0 and SSH_HOST_KEYS_S3_OBJECT | length > 0
- name: Unpack SSH host keys to /etc/ssh - name: Unpack SSH host keys to /etc/ssh
ansible.builtin.unarchive: ansible.builtin.unarchive:
...@@ -31,6 +32,7 @@ ...@@ -31,6 +32,7 @@
owner: root owner: root
remote_src: yes remote_src: yes
become: true become: true
when: SSH_HOST_KEYS_S3_BUCKET | length > 0 and SSH_HOST_KEYS_S3_OBJECT | length > 0
- name: Remove the temporary folder after put in place - name: Remove the temporary folder after put in place
ansible.builtin.file: ansible.builtin.file:
......
...@@ -10,45 +10,3 @@ ...@@ -10,45 +10,3 @@
name: sshpiperd name: sshpiperd
enabled: true enabled: true
state: restarted state: restarted
- name: Install firewalld
ansible.builtin.package:
name: firewalld
state: present
- name: Configure firewalld
ansible.posix.firewalld:
port: 2222/tcp
zone: public
state: enabled
permanent: true
- name: Enable and start firewalld
ansible.builtin.service:
name: firewalld
enabled: true
state: restarted
- name: Install fail2ban
ansible.builtin.package:
name: "{{ item }}"
state: present
loop:
- fail2ban
- fail2ban-firewalld
- name: Configure fail2ban
ansible.builtin.template:
src: jail.local.j2
dest: "/etc/fail2ban/jail.local"
backup: true
- name: Activate the firewall support
ansible.builtin.command:
cmd: mv /etc/fail2ban/jail.d/00-firewalld.conf /etc/fail2ban/jail.d/00-firewalld.local
- name: Enable and start fail2ban
ansible.builtin.service:
name: fail2ban
enabled: true
state: restarted
...@@ -24,5 +24,11 @@ pipes: ...@@ -24,5 +24,11 @@ pipes:
host: "{{ group.host }}" host: "{{ group.host }}"
ignore_hostkey: true ignore_hostkey: true
private_key: "{{ group.private_key }}" private_key: "{{ group.private_key }}"
- from:
- username: ".*"
username_regex_match: true
to:
host: "{{ group.host }}"
ignore_hostkey: true
{% endif %} {% endif %}
{% endfor %} {% endfor %}