Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • rc/hpc-factory
  • louistw/hpc-factory
  • jpr/hpc-factory
  • krish94/hpc-factory
  • atlurie/hpc-factory
  • dwheel7/hpc-factory
  • jpr/vm-factory
  • rc/vm-factory
  • krish94/vm-factory
9 results
Show changes
Commits on Source (130)
Showing
with 923 additions and 52 deletions
......@@ -26,10 +26,12 @@ variables:
INSTANCE_FLAVOR: "m1.medium-ruffner"
HTTP_PROXY_INSTANCE_NAME: "http-proxy"
SSH_PROXY_INSTANCE_NAME: "ssh-proxy"
NUM_IMAGES_TO_KEEP: 5
stages:
- build
- deploy
- cleanup
workflow:
rules:
......@@ -78,32 +80,25 @@ workflow:
- *update_ansible_repo
- *get_ansible_files
# packer vars for job env
- image_tag=${BUILD_TARGET}${BUILD_TAG:+-${BUILD_TAG}}-${ENV}
- export PKR_VAR_flavor="${PROXY_BUILD_FLAVOR:-$PKR_VAR_flavor}"
- export PKR_VAR_build_instance_name="${BUILD_TARGET}-${EXT_REPO_HEAD}"
- export PKR_VAR_image_date_suffix=false
- export PKR_VAR_image_tags="[\"${image_tag}\"]"
- export PKR_VAR_image_name=${image_tag}-${BUILD_DATE}
- |
if [ $CI_PIPELINE_SOURCE == 'merge_request_event' ]; then
export PKR_VAR_image_name="${BUILD_TARGET}-PR-${CI_MERGE_REQUEST_IID}"
elif [ $CI_PIPELINE_SOURCE == 'schedule' ]; then
export PKR_VAR_image_name="${BUILD_TARGET}-${BUILD_DATE}"
export PKR_VAR_image_name="${BUILD_TARGET}-${BUILD_TAG:-${BUILD_DATE}}"
fi
# Ansible var overrides
- |
if [ -n "${PROXY_ENABLE_VAR}" ]; then
sed -i -E "s/(${PROXY_ENABLE_VAR}: ).*/\1true/" $EXT_REPO_DIR/group_vars/all
fi
- 'sed -i -E "s|(s3_endpoint: ).*|\1\"${S3_ENDPOINT}\"|" $EXT_REPO_DIR/group_vars/all'
- 'sed -i -E "s/(lts_access_key: ).*/\1\"${AWS_ACCESS_KEY_ID}\"/" $EXT_REPO_DIR/group_vars/all'
- 'sed -i -E "s/(lts_secret_key: ).*/\1\"${AWS_SECRET_ACCESS_KEY}\"/" $EXT_REPO_DIR/group_vars/all'
- 'sed -i -E "s/(s3_shibboleth_bucket_name: ).*/\1\"${S3_SHIBBOLETH_BUCKET_NAME}\"/" $EXT_REPO_DIR/group_vars/all'
- 'sed -i -E "s/(s3_shibboleth_object_name: ).*/\1\"${S3_SHIBBOLETH_OBJECT_NAME}\"/" $EXT_REPO_DIR/group_vars/all'
- 'sed -i -E "s|(ssh_pub_key: ).*|\1\"{{ lookup(''file'', ''${SSH_PUB_KEY}'') }}\"|" $EXT_REPO_DIR/group_vars/all'
# packer commands
- packer init openstack-proxy
- packer validate openstack-proxy
- packer build -machine-readable openstack-proxy | tee proxy_build.log
- export BUILT_PROXY_IMAGE_ID=$(grep 'Image:' proxy_build.log | awk '{print $4}')
- echo BUILT_PROXY_IMAGE_ID=${BUILT_PROXY_IMAGE_ID} | tee -a $CI_PROJECT_DIR/image.env
- echo image_tag=${image_tag} | tee -a $CI_PROJECT_DIR/image.env
# set image properties with repo state
- openstack image set --property EXT_PR_SRC_REPO=${EXT_PR_SRC_REPO} --property EXT_PR_SRC_BRANCH_SHA=${EXT_PR_SRC_BRANCH_SHA} --property EXT_PR_TARGET_REPO=${EXT_PR_TARGET_REPO} --property EXT_PR_TARGET_BRANCH_SHA=${EXT_PR_TARGET_BRANCH_SHA} --property PACKER_IMAGE_HEAD=${PACKER_IMAGE_HEAD} ${BUILT_PROXY_IMAGE_ID}
artifacts:
......@@ -113,7 +108,7 @@ workflow:
build_http_proxy_image:
stage: build
environment:
name: $ENV
name: build
tags:
- build
variables:
......@@ -126,7 +121,7 @@ build_http_proxy_image:
build_ssh_proxy_image:
stage: build
environment:
name: $ENV
name: build
tags:
- build
variables:
......@@ -141,14 +136,17 @@ build_ssh_proxy_image:
- *update_ansible_repo
- *get_ansible_files
# packer vars for job env
- image_tag=${BUILD_TARGET}${BUILD_TAG:+-${BUILD_TAG}}-${ENV}
- export PKR_VAR_flavor="${PROXY_BUILD_FLAVOR:-$PKR_VAR_flavor}"
- export PKR_VAR_build_instance_name="${BUILD_TARGET}-${EXT_REPO_HEAD}"
- export PKR_VAR_image_date_suffix=false
- export PKR_VAR_image_tags="[\"${image_tag}\"]"
- export PKR_VAR_image_name=${image_tag}-${BUILD_DATE}
- |
if [ $CI_PIPELINE_SOURCE == 'merge_request_event' ]; then
export PKR_VAR_image_name="${BUILD_TARGET}-PR-${CI_MERGE_REQUEST_IID}"
elif [ $CI_PIPELINE_SOURCE == 'schedule' ]; then
export PKR_VAR_image_name="${BUILD_TARGET}-${BUILD_DATE}"
export PKR_VAR_image_name="${BUILD_TARGET}-${BUILD_TAG:-${BUILD_DATE}}"
fi
# packer commands
- packer init openstack-login
......@@ -156,6 +154,7 @@ build_ssh_proxy_image:
- packer build -machine-readable openstack-login | tee login_build.log
- export BUILT_LOGIN_IMAGE_ID=$(grep 'Image:' login_build.log | awk '{print $4}')
- echo BUILT_LOGIN_IMAGE_ID=${BUILT_LOGIN_IMAGE_ID} | tee -a $CI_PROJECT_DIR/image.env
- echo image_tag=${image_tag} | tee -a $CI_PROJECT_DIR/image.env
# set image properties with repo state
- openstack image set --property EXT_PR_SRC_REPO=${EXT_PR_SRC_REPO} --property EXT_PR_SRC_BRANCH_SHA=${EXT_PR_SRC_BRANCH_SHA} --property EXT_PR_TARGET_REPO=${EXT_PR_TARGET_REPO} --property EXT_PR_TARGET_BRANCH_SHA=${EXT_PR_TARGET_BRANCH_SHA} --property PACKER_IMAGE_HEAD=${CI_COMMIT_SHORT_SHA} ${BUILT_LOGIN_IMAGE_ID}
artifacts:
......@@ -165,7 +164,7 @@ build_ssh_proxy_image:
build_login_image:
stage: build
environment:
name: $ENV
name: build
tags:
- build
<<: *build_login_image_template
......@@ -173,6 +172,44 @@ build_login_image:
- if: $PIPELINE_TARGET == "build" && $BUILD_TARGET == "login"
when: always
build_ood_image:
stage: build
environment:
name: build
tags:
- build
script:
- *update_ansible_repo
- *get_ansible_files
# packer vars for job env
- image_tag=${BUILD_TARGET}${BUILD_TAG:+-${BUILD_TAG}}-${ENV}
- export PKR_VAR_flavor="${OOD_BUILD_FLAVOR:-$PKR_VAR_flavor}"
- export PKR_VAR_build_instance_name="${BUILD_TARGET}-${EXT_REPO_HEAD}"
- export PKR_VAR_image_date_suffix=false
- export PKR_VAR_image_tags="[\"${image_tag}\"]"
- export PKR_VAR_image_name=${image_tag}-${BUILD_DATE}
- |
if [ $ENV = 'knightly' ] || [ $ENV = 'prod' ]; then
curl --header "PRIVATE-TOKEN: ${ANSIBLE_VAR_TOKEN}" \
"${CI_API_V4_URL}/projects/2836/repository/files/$ENV/raw?ref=main" \
-o CRI_XCBC/group_vars/$ENV
fi
# packer commands
- packer init openstack-ood
- packer validate openstack-ood
- packer build -machine-readable openstack-ood | tee ood_build.log
- export BUILT_OOD_IMAGE_ID=$(grep 'Image:' ood_build.log | awk '{print $4}')
- echo BUILT_OOD_IMAGE_ID=${BUILT_OOD_IMAGE_ID} | tee -a $CI_PROJECT_DIR/image.env
- echo image_tag=${image_tag} | tee -a $CI_PROJECT_DIR/image.env
# set image properties with repo state
- openstack image set --property EXT_PR_SRC_REPO=${EXT_PR_SRC_REPO} --property EXT_PR_SRC_BRANCH_SHA=${EXT_PR_SRC_BRANCH_SHA} --property EXT_PR_TARGET_REPO=${EXT_PR_TARGET_REPO} --property EXT_PR_TARGET_BRANCH_SHA=${EXT_PR_TARGET_BRANCH_SHA} --property PACKER_IMAGE_HEAD=${CI_COMMIT_SHORT_SHA} ${BUILT_OOD_IMAGE_ID}
artifacts:
reports:
dotenv: image.env
rules:
- if: $BUILD_TARGET == "ood"
when: always
deploy_http_proxy_node:
stage: deploy
environment:
......@@ -206,10 +243,12 @@ deploy_http_proxy_node:
export cmd="openstack server create"
cmd+=" -c id -f value --image $HTTP_PROXY_IMAGE_ID"
cmd+=" --flavor $INSTANCE_FLAVOR"
cmd+=" --network $PROXY_NETWORK"
cmd+=" --security-group webserver_sec_group"
cmd+=" --security-group allow-ssh"
for security_group in ${SECURITY_GROUP_LIST[@]};
do
cmd+=" --security-group $security_group"
done
cmd+=" --user-data user_data.txt"
if [ -n "$PROXY_NETWORK" ];then cmd+=" --network $PROXY_NETWORK"; fi
if [ -n "$HTTP_PROXY_PORT" ];then cmd+=" --port $HTTP_PROXY_PORT"; fi
cmd+=" --wait $HTTP_PROXY_INSTANCE_NAME"
- export HTTP_PROXY_INSTANCE_ID=$(bash -c "$cmd")
......@@ -257,9 +296,12 @@ deploy_ssh_proxy_node:
export cmd="openstack server create"
cmd+=" -c id -f value --image $SSH_PROXY_IMAGE_ID"
cmd+=" --flavor $INSTANCE_FLAVOR"
cmd+=" --network $PROXY_NETWORK"
cmd+=" --security-group allow-ssh"
for security_group in ${SECURITY_GROUP_LIST[@]};
do
cmd+=" --security-group $security_group"
done
cmd+=" --user-data user_data.txt"
if [ -n "$PROXY_NETWORK" ];then cmd+=" --network $PROXY_NETWORK"; fi
if [ -n "$SSH_PROXY_PORT" ];then cmd+=" --port $SSH_PROXY_PORT"; fi
cmd+=" --wait $SSH_PROXY_INSTANCE_NAME"
- export SSH_PROXY_INSTANCE_ID=$(bash -c "$cmd")
......@@ -300,6 +342,7 @@ deploy_login_node:
[$ENV]
127.0.0.1
EEOF
s3cmd get --force -r --access_key=$AWS_ACCESS_KEY_ID --secret_key=$AWS_SECRET_ACCESS_KEY --host=$AWS_HOST --host-bucket=$AWS_HOST s3://cheaha-cloud-ansible-files/ /tmp/${CI_PROJECT_NAME}/ansible/files/
ansible-playbook -c local -i ansible/hosts --extra-vars="$EXTRA_VARS" ansible/cluster.yml | tee -a /tmp/ansible.log
rm -rf /tmp/${CI_PROJECT_NAME}
EOF
......@@ -307,9 +350,12 @@ deploy_login_node:
export cmd="openstack server create"
cmd+=" -c id -f value --image $LOGIN_IMAGE_ID"
cmd+=" --flavor $INSTANCE_FLAVOR"
cmd+=" --network $INSTANCE_NETWORK"
cmd+=" --security-group allow-ssh"
for security_group in ${SECURITY_GROUP_LIST[@]};
do
cmd+=" --security-group $security_group"
done
cmd+=" --user-data user_data.txt"
if [ -n "$INSTANCE_NETWORK" ];then cmd+=" --network $INSTANCE_NETWORK"; fi
if [ -n "$LOGIN_PORT" ];then cmd+=" --port $LOGIN_PORT"; fi
cmd+=" --wait $LOGIN_INSTANCE_NAME"
- export LOGIN_INSTANCE_ID=$(bash -c "$cmd")
......@@ -324,3 +370,101 @@ deploy_login_node:
- if: $PIPELINE_TARGET == "deploy" && $LOGIN_IMAGE_ID
when: always
deploy_ood_node:
stage: deploy
environment:
name: $ENV
tags:
- build
before_script:
- |
for OOD_FLOATING_IP in ${OOD_FLOATING_IP_LIST[@]}; do
OOD_FIXED_IP=$(
openstack floating ip list \
--floating-ip-address $OOD_FLOATING_IP -c "Fixed IP Address" -f value)
CURRENT_INSTANCE_ID=$(
openstack server list \
--name $OOD_INSTANCE_NAME --ip $OOD_FIXED_IP -c ID -f value)
openstack server remove floating ip $CURRENT_INSTANCE_ID $OOD_FLOATING_IP
done
- |
if [ -n "$OOD_PORT" ];then
openstack server remove port $CURRENT_INSTANCE_ID $OOD_PORT
fi
script:
- OOD_IMAGE_ID="${BUILT_OOD_IMAGE_ID:-$OOD_IMAGE_ID}"
- openstack image set --accept $OOD_IMAGE_ID || true
- FAILED=false
- |
cat > user_data.txt <<EOF
#!/bin/bash
cat >> /etc/NetworkManager/conf.d/90-dns-none.conf<<EEOF
[main]
dns=none
EEOF
systemctl reload NetworkManager
echo "$DEV_KEY" >> /root/.ssh/authorized_keys
ip route replace default via ${DEFAULT_GATEWAY_IP} dev eth0
git clone ${CI_REPOSITORY_URL} /tmp/${CI_PROJECT_NAME}
cd /tmp/${CI_PROJECT_NAME}
git checkout ${CI_COMMIT_REF_NAME}
cat >> ansible/hosts<<EEOF
[$ENV]
127.0.0.1
EEOF
s3cmd get --force -r --access_key=$AWS_ACCESS_KEY_ID --secret_key=$AWS_SECRET_ACCESS_KEY --host=$AWS_HOST --host-bucket=$AWS_HOST s3://cheaha-cloud-ansible-files/ /tmp/${CI_PROJECT_NAME}/ansible/files/
ansible-playbook -c local -i ansible/hosts --extra-vars="$EXTRA_VARS" ansible/cluster.yml | tee -a /tmp/ansible.log
rm -rf /tmp/${CI_PROJECT_NAME}
EOF
- |
export cmd="openstack server create"
cmd+=" -c id -f value --image $OOD_IMAGE_ID"
cmd+=" --flavor $INSTANCE_FLAVOR"
for security_group in ${SECURITY_GROUP_LIST[@]};
do
cmd+=" --security-group $security_group"
done
cmd+=" --user-data user_data.txt"
if [ -n "$INSTANCE_NETWORK" ];then cmd+=" --network $INSTANCE_NETWORK"; fi
if [ -n "$OOD_PORT" ];then cmd+=" --port $OOD_PORT"; fi
cmd+=" --wait $OOD_INSTANCE_NAME"
- export OOD_INSTANCE_ID=$(bash -c "$cmd")
- |
# Associate the floating IP(s) with the SSH Proxy instance
for OOD_FLOATING_IP in ${OOD_FLOATING_IP_LIST[@]};
do
echo "Associating FLOATING_IP $OOD_FLOATING_IP with OOD_INSTANCE_ID $OOD_INSTANCE_ID"
openstack server add floating ip $OOD_INSTANCE_ID $OOD_FLOATING_IP
done
rules:
- if: $DEPLOY_TARGET == "ood"
when: always
cleanup_img:
stage: cleanup
environment:
name: $ENV
tags:
- build
script:
- |
OS_PROJECT_ID=$(
openstack application credential \
show $OS_APPLICATION_CREDENTIAL_ID -f value -c project_id
)
- |
IMAGES_TO_DELETE=($(
openstack image list --tag ${image_tag} \
--sort created_at:desc -f value -c ID \
--property owner=$OS_PROJECT_ID | tail -n +$((NUM_IMAGES_TO_KEEP+1))
))
- |
for img in ${IMAGES_TO_DELETE[@]}; do
echo "Deleting image $img"
openstack image delete ${img}
done
rules:
- if: $NUM_IMAGES_TO_KEEP =~ /^(-\d+|0)$/
when: never
- if: $BUILD_TARGET
[defaults]
# change the default callback, you can only have one 'stdout' type enabled at a time.
#stdout_callback = skippy
stdout_callback = yaml
## Ansible ships with some plugins that require whitelisting,
## this is done to avoid running all of a type by default.
## These setting lists those that you want enabled for your system.
## Custom plugins should not need this unless plugin author specifies it.
# enable callback plugins, they can output to stdout but cannot be 'stdout' type.
callbacks_enabled = timer, debug, profile_roles, profile_tasks, minimal
# Force color
force_color = true
......@@ -11,3 +11,8 @@
- { name: 'ssh_proxy_config', tags: 'ssh_proxy_config', when: enable_ssh_proxy_config }
- { name: 'ssl_cert', tags: 'ssl_cert', when: enable_ssl_certs }
- { name: 'rsyslog_config', tags: 'rsyslog_config', when: enable_rsyslog_config }
- { name: 'rewrite_map', tags: 'rewrite_map', when: enable_rewrite_map }
- { name: 'fail2ban', tags: 'fail2ban', when: enable_fail2ban }
- { name: 'install_node_exporter', tags: 'install_node_exporter', when: enable_node_exporter }
- { name: 'ood_config', tags: 'ood_config', when: enable_ood_config }
......@@ -34,6 +34,9 @@
mount_points:
- { "src": "master:/gpfs4", "path": "/gpfs4", "opts": "ro,sync,hard", "mode": "0755" }
- { "src": "master:/gpfs5", "path": "/gpfs5", "opts": "ro,sync,hard", "mode": "0755" }
autofs_mounts:
- { "src": "master:/gpfs4/&", "path": "/gpfs4", "opts": "fstype=nfs,vers=3,_netdev,default", "mode": '0755', "mount_point": "/gpfs4", "map_name": "gpfs4", key: "*" }
- { "src": "master:/gpfs5/&", "path": "/gpfs5", "opts": "fstype=nfs,vers=3,_netdev,default", "mode": '0755', "mount_point": "/gpfs5", "map_name": "gpfs5", key: "*" }
#SSH Host Keys
S3_ENDPOINT: ""
......@@ -47,10 +50,9 @@
# ssh proxy
enable_ssh_proxy_config: false
sshpiper_dest_dir: "/opt/sshpiper"
fail2ban_cidr_list: "127.0.0.1/8"
# rsyslog
enable_rsyslog_config: false
enable_rsyslog_config: true
rsyslog_target: "*.* @master:514"
# ssl certs
......@@ -63,3 +65,42 @@
ssl_cert_chain_file: ""
ssl_apache_config: ""
apache_service: "httpd"
# rewrite map
enable_rewrite_map: false
target_groups:
- {"name": "gpfs4", "host": "login001", "default": True }
- {"name": "gpfs5", "host": "login002", "default": False }
# account app
account_app_port: 8000
# fail2ban
enable_fail2ban: false
maxretry: 1
findtime: 600
bantime: 1200
fail2ban_white_list: "127.0.0.1/8"
# Node Exporter
enable_node_exporter: false
node_exporter_ver: "1.8.2"
node_exporter_filename: "node_exporter-{{ node_exporter_ver }}.linux-amd64"
node_exporter_user: node_exporter
node_exporter_group: node_exporter
node_exporter_port: 9100
# CentOS Repo
centos_base_url: "http://vault.centos.org"
# ood_config
enable_ood_config: false
ood_internal_ip: OOD_INTERNAL_IP
ood_hostname: ood-gpfs5
login_hostname: login001
ood_domain: https://rc.uab.edu
account_app: account
account_app_port: 8000
account_app_bind_address: ["0.0.0.0:{{account_app_port}}"]
ood_user_regex: "([^!]+?)(@uab.edu)?$"
cluster_name: CoD
......@@ -5,6 +5,5 @@
roles:
- { name: 'fix_centos_repo', tags: 'fix_centos_repo' }
- { name: 'install_packages', tags: 'install_packages' }
- { name: 'pam_slurm_adopt', tags: 'pam_slurm_adopt' }
- { name: 'install_nhc', tags: 'install_nhc'}
......@@ -6,6 +6,3 @@
- { name: 'fix_centos_repo', tags: 'fix_centos_repo' }
- { name: 'install_packages', tags: 'install_packages' }
- { name: 'install_zsh', tags: 'install_zsh' }
- name: Setup node for use as a virtual cheaha node
ansible.builtin.import_playbook: cheaha.yml
---
- name: Install fail2ban
ansible.builtin.package:
name: "{{ item }}"
state: present
loop:
- fail2ban
- fail2ban-firewalld
- name: Configure fail2ban
ansible.builtin.template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
backup: true
loop:
- { src: 'jail.local.j2', dest: '/etc/fail2ban/jail.local' }
- { src: 'sshpiperd_filter.local.j2', dest: '/etc/fail2ban/filter.d/sshpiperd.local' }
- { src: 'sshpiperd_jail.local.j2', dest: '/etc/fail2ban/jail.d/sshpiperd.local' }
- name: Activate the firewalld support for fail2ban
ansible.builtin.command:
cmd: mv /etc/fail2ban/jail.d/00-firewalld.conf /etc/fail2ban/jail.d/00-firewalld.local
- name: Configure firewalld to allow ssh and sshpiper traffic
ansible.posix.firewalld:
port: "{{ item }}"
zone: public
state: enabled
permanent: true
loop:
- 2222/tcp
- 22/tcp
- name: Enable and start firewalld
ansible.builtin.service:
name: firewalld
enabled: true
state: restarted
- name: Enable and start fail2ban
ansible.builtin.service:
name: fail2ban
enabled: true
state: restarted
[DEFAULT]
banaction = firewalld
bantime = 1200
ignoreip = {{ fail2ban_cidr_list }}
bantime = {{ bantime }}
ignoreip = {{ fail2ban_white_list }}
[sshd]
enabled = true
# Refer to https://github.com/fail2ban/fail2ban/wiki/Developing-Regex-in-Fail2ban for developing regex using fail2ban
#
[INCLUDES]
before = common.conf
[DEFAULT]
_daemon = sshpiperd
__iso_datetime = "\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(?:[+-]\d{2}:\d{2}|Z)"
__pref = time=%(__iso_datetime)s level=(?:debug|error)
[Definition]
# Define the prefix regex for the log lines
prefregex = ^<F-MLFID>%(__prefix_line)s%(__pref)s</F-MLFID>\s+<F-CONTENT>.+</F-CONTENT>$
# Failregex to match the specific failure log lines (prefregex is automatically included)
failregex = ^msg="connection from .*failtoban: ip <HOST> too auth many failures"$
ignoreregex =
mode = normal
maxlines = 1
# This configuration will block the remote host after {{maxretry}} failed SSH login attempts.
[sshpiperd]
enabled = true
filter = sshpiperd
logpath = /var/log/messages
port = 22
maxretry = {{ maxretry }}
backend = auto
findtime = {{ findtime }}
......@@ -15,6 +15,6 @@
ansible.builtin.replace:
path: "{{ item }}"
regexp: '^#baseurl=http://mirror.centos.org'
replace: 'baseurl=http://vault.centos.org'
replace: 'baseurl={{ centos_base_url }}'
backup: yes
with_items: "{{ repo_files.stdout_lines }}"
---
- name: Download node_exporter binary
ansible.builtin.get_url:
url: "https://github.com/prometheus/node_exporter/releases/download/v{{ node_exporter_ver }}/{{ node_exporter_filename }}.tar.gz"
dest: "/tmp/{{ node_exporter_filename }}.tar.gz"
- name: Extract node_exporter
ansible.builtin.unarchive:
src: "/tmp/{{ node_exporter_filename }}.tar.gz"
dest: "/tmp"
remote_src: yes
- name: Create system group for user account {{ node_exporter_group }}
ansible.builtin.group:
name: "{{ node_exporter_group }}"
system: true
state: present
- name: Create system user account {{ node_exporter_user }}
ansible.builtin.user:
name: "{{ node_exporter_user }}"
comment: Prometheus node_exporter system account
group: "{{ node_exporter_group }}"
system: true
home: /var/lib/node_exporter
create_home: false
shell: /sbin/nologin
state: present
- name: Copy node_exporter binary
ansible.builtin.copy:
src: "/tmp/{{ node_exporter_filename }}/node_exporter"
dest: /usr/local/bin/node_exporter
remote_src: yes
owner: root
group: root
mode: 0755
- name: Copy systemd unit file
ansible.builtin.template:
src: node_exporter.service.j2
dest: /etc/systemd/system/node_exporter.service
owner: root
group: root
mode: '0644'
- name: Clean up /tmp
ansible.builtin.file:
path: "/tmp/{{ item }}"
state: absent
loop:
- "{{ node_exporter_filename }}.tar.gz"
- "{{ node_exporter_filename }}"
- name: Restart node_exporter service
ansible.builtin.systemd:
daemon_reload: yes
name: node_exporter
state: restarted
enabled: true
- name: Collect facts about system services
ansible.builtin.service_facts:
- name: Configure firewalld to allow prometheus
ansible.posix.firewalld:
port: "{{ node_exporter_port }}/tcp"
zone: public
state: enabled
permanent: true
when:
- "'firewalld.service' in ansible_facts.services"
- ansible_facts.services["firewalld.service"].state == "running"
- name: Enable and start firewalld
ansible.builtin.service:
name: firewalld
enabled: true
state: restarted
when:
- "'firewalld.service' in ansible_facts.services"
- ansible_facts.services["firewalld.service"].state == "running"
[Unit]
Description=Node Exporter
After=network.target
[Service]
User={{ node_exporter_user }}
Group={{ node_exporter_group }}
Type=simple
ExecStart=/usr/local/bin/node_exporter --web.listen-address=:{{ node_exporter_port }} --collector.filesystem.mount-points-exclude "^/(dev|proc|run/user/.+|run/credentials/.+|sys|var/lib/docker/.+)($|/)" --collector.filesystem.fs-types-exclude "^(autofs|binfmt_misc|bpf|cgroup|tmpfs|sunrpc|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$"
[Install]
WantedBy=multi-user.target
......@@ -7,7 +7,6 @@
- nss-pam-ldapd
- openldap
- openldap-clients
- openldap-servers
- sssd-ldap
- name: Update nsswitch.conf to look for ldap
......
---
- name: Create base directories
ansible.builtin.file:
path: "{{ item.dir }}"
path: "{{ item.path }}"
state: directory
mode: "{{ item.mode }}"
loop:
- { dir: /local, mode: '0777' }
- { dir: /scratch, mode: '0755' }
- { dir: /share, mode: '0755' }
- { dir: /data/rc/apps, mode: '0755' } # this is only required for the symlink to be happy
- { dir: /data/user, mode: '0755' }
- { dir: /data/project, mode: '0755' }
- { path: /local, mode: '0777' }
- { path: /share, mode: '0755' }
- name: Create mountpoint dirs
ansible.builtin.file:
path: "{{ item.path }}"
state: directory
mode: "{{ item.mode }}"
loop:
"{{ autofs_mounts }}"
- name: Remove unused entry in master map
ansible.builtin.replace:
......@@ -29,12 +33,7 @@
line: "{{ item.mount_point }} /etc/auto.{{ item.map_name }}"
create: yes
loop:
- { mount_point: "/cm/shared", map_name: "cm-share" }
- { mount_point: "/data/project", map_name: "data-project" }
- { mount_point: "/data/user", map_name: "data-user" }
- { mount_point: "/data/rc/apps", map_name: "data-rc-apps" }
- { mount_point: "/-", map_name: "scratch" }
- { mount_point: "/home", map_name: "home" }
"{{ autofs_mounts }}"
- name: Set up autofs map files
ansible.builtin.lineinfile:
......@@ -42,12 +41,7 @@
line: "{{ item.key }} -{{ item.opts }} {{ item.src }}"
create: true
loop:
- { map_name: "cm-share", key: "*", src: "gpfs.rc.uab.edu:/data/cm/shared-8.2/&", opts: "fstype=nfs,vers=3,_netdev,defaults" }
- { map_name: "data-project", key: "*", src: "gpfs.rc.uab.edu:/data/project/&", opts: "fstype=nfs,vers=3,_netdev,defaults" }
- { map_name: "data-user", key: "*", src: "gpfs.rc.uab.edu:/data/user/&", opts: "fstype=nfs,vers=3,_netdev,local_lock=posix,defaults" }
- { map_name: "data-rc-apps", key: "*", src: "gpfs.rc.uab.edu:/data/rc/apps/&", opts: "fstype=nfs,vers=3,_netdev,defaults" }
- { map_name: "scratch", key: "/scratch", src: "gpfs.rc.uab.edu:/scratch", opts: "fstype=nfs,vers=3,_netdev,local_lock=posix,defaults" }
- { map_name: "home", key: "*", src: ":/data/user/home/&", opts: 'fstype=bind' }
"{{ autofs_mounts }}"
- name: Create symbolic links
ansible.builtin.file:
......@@ -60,7 +54,8 @@
loop:
- { src: /data/rc/apps, dest: /share/apps }
- name: Enable autofs service
- name: Enable and start autofs service
ansible.builtin.service:
name: autofs
enabled: true
state: restarted
......@@ -13,6 +13,6 @@
path: "{{ item.path }}"
opts: "{{ item.opts }}"
state: mounted
fstype: nfs
fstype: "{{ item.fstype | default('nfs') }}"
loop:
"{{ mount_points }}"
const fs = require('fs');
const http = require('http');
const path = require('path');
const WebSocket = require('ws');
const express = require('express');
const pty = require('node-pty');
const hbs = require('hbs');
const dotenv = require('dotenv');
const Tokens = require('csrf');
const url = require('url');
const yaml = require('js-yaml');
const glob = require('glob');
const port = 3000;
const host_path_rx = '/ssh/([^\\/\\?]+)([^\\?]+)?(\\?.*)?$';
const helpers = require('./utils/helpers');
const pingInterval = 30000;
// Read in environment variables
dotenv.config({path: '.env.local'});
if (process.env.NODE_ENV === 'production') {
dotenv.config({path: '/etc/ood/config/apps/shell/env'});
}
// Keep app backwards compatible
if (fs.existsSync('.env')) {
console.warn('[DEPRECATION] The file \'.env\' is being deprecated. Please move this file to \'/etc/ood/config/apps/shell/env\'.');
dotenv.config({path: '.env'});
}
// Load color themes
var color_themes = {dark: [], light: []};
glob.sync('./color_themes/light/*').forEach(f => color_themes.light.push(require(path.resolve(f))));
glob.sync('./color_themes/dark/*').forEach(f => color_themes.dark.push(require(path.resolve(f))));
color_themes.json_array = JSON.stringify([...color_themes.light, ...color_themes.dark]);
const tokens = new Tokens({});
const secret = tokens.secretSync();
// Create all your routes
var router = express.Router();
router.get(['/', '/ssh'], function (req, res) {
res.redirect(req.baseUrl + '/ssh/default');
});
router.get('/ssh*', function (req, res) {
var theHost, theDir;
[theHost, theDir] = host_and_dir_from_url(req.url);
res.render('index',
{
baseURI: req.baseUrl,
csrfToken: tokens.create(secret),
host: theHost,
dir: theDir,
colorThemes: color_themes,
siteTitle: (process.env.OOD_DASHBOARD_TITLE || "Open OnDemand"),
});
});
router.use(express.static(path.join(__dirname, 'public')));
// Setup app
var app = express();
// Setup template engine
app.set('view engine', 'hbs');
app.set('views', path.join(__dirname, 'views'));
// Mount the routes at the base URI
app.use(process.env.PASSENGER_BASE_URI || '/', router);
// Setup websocket server
const server = new http.createServer(app);
const wss = new WebSocket.Server({ noServer: true });
let host_allowlist = new Set;
if (process.env.OOD_SSHHOST_ALLOWLIST){
host_allowlist = new Set(process.env.OOD_SSHHOST_ALLOWLIST.split(':'));
}
let default_sshhost, first_available_host;
glob.sync(path.join((process.env.OOD_CLUSTERS || '/etc/ood/config/clusters.d'), '*.y*ml'))
.map(yml => {
try {
return yaml.safeLoad(fs.readFileSync(yml));
} catch(err) { /** just keep going. dashboard should have an alert about it */}
})
.filter(config => (config && config.v2 && config.v2.login && config.v2.login.host) && ! (config.v2 && config.v2.metadata && config.v2.metadata.hidden))
.forEach((config) => {
let host = config.v2.login.host; //Already did checking above
let isDefault = config.v2.login.default;
host_allowlist.add(host);
if (isDefault) default_sshhost = host;
if (!first_available_host) first_available_host = host;
});
default_sshhost = process.env.OOD_DEFAULT_SSHHOST || process.env.DEFAULT_SSHHOST || default_sshhost || first_available_host;
if (default_sshhost) host_allowlist.add(default_sshhost);
function host_and_dir_from_url(url){
let match = url.match(host_path_rx),
hostname = null,
directory = null;
if (match) {
hostname = match[1] === "default" ? default_sshhost : match[1];
directory = match[2] ? decodeURIComponent(match[2]) : null;
}
return [hostname, directory];
}
function heartbeat() {
this.isAlive = true;
}
wss.on('connection', function connection (ws, req) {
var dir,
term,
args,
host,
cmd = process.env.OOD_SSH_WRAPPER || 'ssh';
ws.isAlive = true;
ws.on('pong', heartbeat);
console.log('Connection established');
[host, dir] = host_and_dir_from_url(req.url);
args = dir ? [host, '-t', 'cd \'' + dir.replace(/\'/g, "'\\''") + '\' ; exec ${SHELL} -l'] : [host];
process.env.LANG = 'en_US.UTF-8'; // this patch (from b996d36) lost when removing wetty (2c8a022)
term = pty.spawn(cmd, args, {
name: 'xterm-16color',
cols: 80,
rows: 30
});
console.log('Opened terminal: ' + term.pid);
term.on('data', function (data) {
ws.send(data, function (error) {
if (error) console.log('Send error: ' + error.message);
});
});
term.on('error', function (error) {
ws.close();
});
term.on('close', function () {
ws.close();
});
ws.on('message', function (msg) {
msg = JSON.parse(msg);
if (msg.input) term.write(msg.input);
if (msg.resize) term.resize(parseInt(msg.resize.cols), parseInt(msg.resize.rows));
});
ws.on('close', function () {
term.end();
console.log('Closed terminal: ' + term.pid);
});
});
const interval = setInterval(function ping() {
wss.clients.forEach(function each(ws) {
if (ws.isAlive === false) return ws.terminate();
ws.isAlive = false;
ws.ping();
});
}, pingInterval);
function custom_server_origin(default_value = null){
var custom_origin = null;
if(process.env.OOD_SHELL_ORIGIN_CHECK) {
// if ENV is set, do not use default!
if(process.env.OOD_SHELL_ORIGIN_CHECK.startsWith('http')){
custom_origin = process.env.OOD_SHELL_ORIGIN_CHECK;
}
}
else {
custom_origin = default_value;
}
return custom_origin;
}
function default_server_origin(headers){
var origin = null;
if (headers['x-forwarded-proto'] && headers['x-forwarded-host']){
origin = headers['x-forwarded-proto'] + "://" + headers['x-forwarded-host']
}
return origin;
}
server.on('upgrade', function upgrade(request, socket, head) {
const requestToken = new URLSearchParams(url.parse(request.url).search).get('csrf'),
client_origin = request.headers['origin'],
server_origin = custom_server_origin(default_server_origin(request.headers));
var host, dir;
[host, dir] = host_and_dir_from_url(request.url);
if (client_origin &&
client_origin.startsWith('http') &&
server_origin && client_origin !== server_origin) {
socket.write([
'HTTP/1.1 401 Unauthorized',
'Content-Type: text/html; charset=UTF-8',
'Content-Encoding: UTF-8',
'Connection: close',
'X-OOD-Failure-Reason: invalid origin',
].join('\r\n') + '\r\n\r\n');
socket.destroy();
} else if (!tokens.verify(secret, requestToken)) {
socket.write([
'HTTP/1.1 401 Unauthorized',
'Content-Type: text/html; charset=UTF-8',
'Content-Encoding: UTF-8',
'Connection: close',
'X-OOD-Failure-Reason: bad csrf token',
].join('\r\n') + '\r\n\r\n');
socket.destroy();
} else if (!helpers.hostInAllowList(host_allowlist, host)) { // host not in allowlist
socket.write([
'HTTP/1.1 401 Unauthorized',
'Content-Type: text/html; charset=UTF-8',
'Content-Encoding: UTF-8',
'Connection: close',
'X-OOD-Failure-Reason: host not specified in allowlist or cluster configs',
].join('\r\n') + '\r\n\r\n');
socket.destroy();
} else {
wss.handleUpgrade(request, socket, head, function done(ws) {
wss.emit('connection', ws, request);
});
}
});
server.listen(port, function () {
console.log('Listening on ' + port);
});
---
- name: Config ood to run behind proxy
ansible.builtin.template:
src: ood_proxy.conf.j2
dest: /opt/rh/httpd24/root/etc/httpd/conf.d/ood-proxy.conf
- name: Patch shell app with shell timeout fix
ansible.builtin.copy:
src: shell-app.js
dest: /var/www/ood/apps/sys/shell/app.js
- name: Create a directory if it does not exist
ansible.builtin.file:
path: /etc/ood/config/apps/shell
state: directory
mode: "0755"
- name: Shell app configuration env
ansible.builtin.template:
src: shell_app.env.j2
dest: /etc/ood/config/apps/shell/env
- name: Point shell app to login node
ansible.builtin.replace:
path: /etc/ood/config/clusters.d/{{ cluster_name }}.yml
regexp: '^(\s+host:).*'
replace: '\1 "{{ login_hostname }}"'
backup: yes
- name: Modify account app binding to listen to http-proxy
ansible.builtin.lineinfile:
path: /var/www/ood/register/{{account_app}}/{{account_app}}.ini
regexp: '^(bind\s=).*'
line: "bind = {{ account_app_bind_address }}"
- name: Restart httpd24-httpd
ansible.builtin.service:
name: httpd24-httpd
state: restarted
#
# Open OnDemand Portal
#
# Generated using ood-portal-generator version 0.8.0
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# !! !!
# !! DO NOT EDIT THIS FILE !!
# !! !!
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
#
# This file is auto-generated by ood-portal-generator and will be over-written
# in future updates.
#
# 1. To modify this file, first update the global configuration file:
#
# /etc/ood/config/ood_portal.yml
#
# You can find more information about the ood-portal-generator configuration
# at:
#
# https://osc.github.io/ood-documentation/master/infrastructure/ood-portal-generator.html
#
# 2. Then build/install the updated Apache config with:
#
# sudo /opt/ood/ood-portal-generator/sbin/update_ood_portal
#
# 3. Finally, restart Apache to have the changes take effect:
#
# # For CentOS 6
# sudo service httpd24-httpd condrestart
# sudo service httpd24-htcacheclean condrestart
#
# # For CentOS 7
# sudo systemctl try-restart httpd24-httpd.service httpd24-htcacheclean.service
#
# The Open OnDemand portal VirtualHost
#
<VirtualHost {{ ood_internal_ip }}:80>
ServerName {{ ood_hostname }}
ErrorLog "logs/{{ ood_hostname }}_error.log"
CustomLog "logs/{{ ood_hostname }}_access.log" combined
RewriteEngine On
RewriteCond %{HTTP_HOST} !^(rc.uab.edu(:80)?)?$ [NC]
RewriteRule ^(.*) http://rc.uab.edu:80$1 [R=301,NE,L]
# Lua configuration
#
LuaRoot "/opt/ood/mod_ood_proxy/lib"
LogLevel lua_module:info
# Log authenticated user requests (requires min log level: info)
LuaHookLog logger.lua logger
# Authenticated-user to system-user mapping configuration
#
SetEnv OOD_USER_MAP_CMD "/opt/ood/ood_auth_map/bin/user_auth.py"
SetEnv OOD_USER_ENV "REMOTE_USER"
SetEnv OOD_MAP_FAIL_URI "/account"
# Per-user Nginx (PUN) configuration
# NB: Apache will need sudo privs to control the PUNs
#
SetEnv OOD_PUN_STAGE_CMD "sudo /opt/ood/nginx_stage/sbin/nginx_stage"
#
# Below is used for sub-uri's this Open OnDemand portal supports
#
# Serve up publicly available assets from local file system:
#
# http://{{ ood_hostname }}:80/public/favicon.ico
# #=> /var/www/ood/public/favicon.ico
#
Alias "/public" "/var/www/ood/public"
<Directory "/var/www/ood/public">
Options Indexes FollowSymLinks
AllowOverride None
Require all granted
</Directory>
# Reverse proxy traffic to backend webserver through IP sockets:
#
# http://{{ ood_hostname }}:80/node/HOST/PORT/index.html
# #=> http://HOST:PORT/node/HOST/PORT/index.html
#
<LocationMatch "^/node/(?<host>c\d+)/(?<port>\d+)">
RewriteCond %{IS_SUBREQ} ^false$
RewriteCond %{HTTP:Proxy-user} "([^!]+?)(@uab.edu)?$"
RewriteRule . - [E=REMOTE_USER:%1]
# ProxyPassReverse implementation
Header edit Location "^[^/]+//[^/]+" ""
# ProxyPassReverseCookieDomain implemenation
Header edit* Set-Cookie ";\s*(?i)Domain[^;]*" ""
# ProxyPassReverseCookiePath implementation
Header edit* Set-Cookie ";\s*(?i)Path[^;]*" ""
Header edit Set-Cookie "^([^;]+)" "$1; Path=/node/%{MATCH_HOST}e/%{MATCH_PORT}e"
LuaHookFixups node_proxy.lua node_proxy_handler
</LocationMatch>
# Reverse "relative" proxy traffic to backend webserver through IP sockets:
#
# http://{{ ood_hostname }}:80/rnode/HOST/PORT/index.html
# #=> http://HOST:PORT/index.html
#
<LocationMatch "^/rnode/(?<host>c\d+)/(?<port>\d+)(?<uri>/.*|)">
RewriteCond %{IS_SUBREQ} ^false$
RewriteCond %{HTTP:Proxy-user} "([^!]+?)(@uab.edu)?$"
RewriteRule . - [E=REMOTE_USER:%1]
# ProxyPassReverse implementation
Header edit Location "^([^/]+//[^/]+)|(?=/)" "/rnode/%{MATCH_HOST}e/%{MATCH_PORT}e"
# ProxyPassReverseCookieDomain implemenation
Header edit* Set-Cookie ";\s*(?i)Domain[^;]*" ""
# ProxyPassReverseCookiePath implementation
Header edit* Set-Cookie ";\s*(?i)Path[^;]*" ""
Header edit Set-Cookie "^([^;]+)" "$1; Path=/rnode/%{MATCH_HOST}e/%{MATCH_PORT}e"
LuaHookFixups node_proxy.lua node_proxy_handler
</LocationMatch>
# Reverse proxy traffic to backend PUNs through Unix domain sockets:
#
# http://{{ ood_hostname }}:80/pun/dev/app/simulations/1
# #=> unix:/path/to/socket|http://localhost/pun/dev/app/simulations/1
#
SetEnv OOD_PUN_URI "/pun"
<Location "/pun">
RewriteCond %{IS_SUBREQ} ^false$
RewriteCond %{HTTP:Proxy-user} "([^!]+?)(@uab.edu)?$"
RewriteRule . - [E=REMOTE_USER:%1]
ProxyPassReverse "http://localhost/pun"
# ProxyPassReverseCookieDomain implementation (strip domain)
Header edit* Set-Cookie ";\s*(?i)Domain[^;]*" ""
# ProxyPassReverseCookiePath implementation (less restrictive)
Header edit* Set-Cookie ";\s*(?i)Path\s*=(?-i)(?!\s*/pun)[^;]*" "; Path=/pun"
SetEnv OOD_PUN_SOCKET_ROOT "/var/run/ondemand-nginx"
SetEnv OOD_PUN_MAX_RETRIES "5"
LuaHookFixups pun_proxy.lua pun_proxy_handler
</Location>
# Control backend PUN for authenticated user:
# NB: See mod_ood_proxy for more details.
#
# http://{{ ood_hostname }}:80/nginx/stop
# #=> stops the authenticated user's PUN
#
SetEnv OOD_NGINX_URI "/nginx"
<Location "/nginx">
RewriteCond %{IS_SUBREQ} ^false$
RewriteCond %{HTTP:Proxy-user} "([^!]+?)(@uab.edu)?$"
RewriteRule . - [E=REMOTE_USER:%1]
LuaHookFixups nginx.lua nginx_handler
</Location>
# Redirect root URI to specified URI
#
# http://{{ ood_hostname }}:80/
# #=> http://{{ ood_hostname }}:80/pun/sys/dashboard
#
RedirectMatch ^/$ "/pun/sys/dashboard"
# Redirect logout URI to specified redirect URI
#
# http://{{ ood_hostname }}:80/logout
# #=> http://{{ ood_hostname }}:80/pun/sys/dashboard/logout
#
Redirect "/logout" "/pun/sys/dashboard/logout"
# Register and/or unregister the mapping of an authenticated-user to a system-user
# NB: This is not needed for regular expression mapping
#
# http://{{ ood_hostname }}:80/account
# #=> /var/www/ood/register/
#
Alias "/account" "/var/www/ood/register"
<Directory "/var/www/ood/register">
Options Indexes FollowSymLinks
AllowOverride None
RewriteCond %{IS_SUBREQ} ^false$
RewriteCond %{HTTP:Proxy-user} "([^!]+?)(@uab.edu)?$"
RewriteRule . - [E=REMOTE_USER:%1]
</Directory>
</VirtualHost>
OOD_SHELL_ORIGIN_CHECK='{{ ood_domain }}'