diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index d1cb167c73afd956b16a837a07c41372d24b0f61..db544ce6283844bfeda75d2da5150af33fa3e613 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -8,13 +8,12 @@ variables:
   ANSIBLE_REMOTE_TMP: "/tmp"
   AWS_DEFAULT_REGION: "bhm"
   AWS_HOST: "s3.lts.rc.uab.edu"
-  FF_SCRIPT_SECTIONS: "true"
   OS_AUTH_TYPE: "v3applicationcredential"
   OS_AUTH_URL: "https://keystone.cloud.rc.uab.edu:5000/v3"
   OS_IDENTITY_API_VERSION: "3"
   OS_INTERFACE: "public"
   OS_REGION_NAME: "bhm1"
-  OOD_INSTANCE_NETWORK: "knightly-network"
+  PROXY_NETWORK: "proxy-net"
   PKR_VAR_flavor: "m1.medium-ruffner"
   PKR_VAR_source_image: "CentOS-7-x86_64-GenericCloud-2009"
   PKR_VAR_floating_ip_network: "uab-campus"
@@ -22,26 +21,20 @@ variables:
   PKR_VAR_skip_create_image: "false"
   PKR_VAR_ssh_username: "centos"
   PKR_VAR_networks: '["8cf2f12e-905d-46d9-bc70-b0897c65f75a"]'
-  PKR_VAR_image_membership: '["cf6fa1e53d4c40a49f4e0e469c440359"]'
   GIT_AUTHOR_NAME: "Gitlab runner"
   GIT_AUTHOR_EMAIL: "gitlab@runner"
-  NUM_SERVER_TO_KEEP: 1
-  NUM_IMAGE_TO_KEEP: 30
-  TIMESTAMP_REGEXP: '[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{6}'
-  PKR_VAR_root_ssh_key: "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBAFqqWgmYpEaGtHBeTu27ntVJpYjwq/x5aBefrvfhk8Z9lE3cuZ26vJ9n/9tGE4Zn2Pew1mpZgi6PzfJ3vMt8yA= root@master"
-  DEV_KEY: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCpncAcYosVHt7HsUcE2XOYDuCi4HQnmFJv279LOcpZgXtZ6o0BM1fe5FgJS0X1ohBXQUFRuYJuJSW/GSmC1K8T+wCrKjZLJdMbqrubHV27diUZfdoVkoJy1vcAQF5nEcoTC7MpAFbBomdn2rsrpgQe8DGiURV7+soqybXV1OsIR3FFf6npnUaskHYT/oVtG9eBOnscyBxoVgbxzlmyoBLXED/sHKFw4nQSF/glYKEFiDu6TRTsBBEGvv23Qo/66QpQiFJ6TNfApNiyY9L1X+Dy8EWU6lozmNgwGDjXQ70Lr6xHnA0QGVALJlHXa6QjpgtpC5Nefsdvtf1hpfFo2VutpbSB+aq9jk3gWNN+XkhrWN5PiwP7YYJNw/WozyfL+IhwjfHZGxkuws+wGR6ZKxlX9W9Vrsq9ncYNKuhy2SdsR6s2XECQtrEQ6ZlX5jRt6Yh5M9ls5fMsWEqknDPmr1Ui6wV7NxprYngo9fLSdYO/ETIO3S6PB0aEHOZOyGitGaM06EmNpvjQn/QkkaVgt/O8wKL1o1AVzXhDMAFvtG6ejppV6kuTUHXFgSGZF6N9fnP91HuytyzC09F+NMWcmnRdrgXlHapjuuL3zzi+XLCQvk8+aYTzBKx1nU2FPMDRZ9sInGmqdTuM002E7qVbaCy4OxcWaAS/L2UVhGnHr+egYw== louistw@uab.edu"
+  INSTANCE_FLAVOR: "m1.medium-ruffner"
+  HTTP_PROXY_INSTANCE_NAME: "http-proxy"
+  SSH_PROXY_INSTANCE_NAME: "ssh-proxy"
 
 stages:
-  - pre-build
   - build
-  - test
   - deploy
-  - cleanup
 
 workflow:
   rules:
-    - if: $CI_PIPELINE_SOURCE == 'merge_request_event'
-    - if: $CI_PIPELINE_SOURCE == 'schedule'
+    - if: $CI_PIPELINE_SOURCE == "web"
+    - if: $CI_PIPELINE_SOURCE == "schedule"
 
 .get_build_date: &get_build_date
   - export BUILD_DATE=$(TZ=America/Chicago date +%Y-%m-%dT%H%M%S)
@@ -50,356 +43,333 @@ workflow:
 .update_ansible_repo: &update_ansible_repo
   - *get_build_date
   - |
-    if [ ! -d $CI_PROJECT_DIR/CRI_XCBC ]; then
-      git clone https://github.com/uabrc/CRI_XCBC.git
-      cd CRI_XCBC
-      git remote add upstream https://github.com/jprorama/CRI_XCBC.git
+    export EXT_REPO_DIR=$(basename -s .git $EXT_PR_TARGET_REPO)
+    if [ ! -d $CI_PROJECT_DIR/$EXT_REPO_DIR ]; then
+      git clone ${EXT_PR_TARGET_REPO} ${EXT_REPO_DIR}
+      cd ${EXT_REPO_DIR}
+      git remote add upstream ${EXT_PR_SRC_REPO}
       cd ..
     fi
-  - cd CRI_XCBC
+  - cd ${EXT_REPO_DIR}
   - git config user.name "${GIT_AUTHOR_NAME}"
   - git config user.email "${GIT_AUTHOR_EMAIL}"
-  - git fetch origin uab-prod
-  - git fetch upstream dev
-  - git checkout uab-prod
-  - git merge origin/uab-prod
+  - git checkout ${EXT_PR_TARGET_BRANCH}
+  - git fetch origin ${EXT_PR_TARGET_BRANCH}
+  - git merge origin/${EXT_PR_TARGET_BRANCH}
   - git checkout -b integration
-  - git merge upstream/dev
-  - export CRI_XCBC_HEAD=$(git rev-parse --short HEAD)
-  - export CRI_XCBC_dev=$(git rev-parse --short upstream/dev)
-  - export CRI_XCBC_prod=$(git rev-parse --short origin/uab-prod)
+  - git fetch upstream ${EXT_PR_SRC_BRANCH}
+  - git merge upstream/${EXT_PR_SRC_BRANCH}
+  # export vars into job artifacts
+  - export EXT_REPO_HEAD=$(git rev-parse --short HEAD)
+  - export EXT_PR_SRC_BRANCH_SHA=$(git rev-parse --short upstream/${EXT_PR_SRC_BRANCH})
+  - export EXT_PR_TARGET_BRANCH_SHA=$(git rev-parse --short origin/${EXT_PR_TARGET_BRANCH})
   - cd ..
   - export PACKER_IMAGE_HEAD=$(git rev-parse --short HEAD)
-  - echo CRI_XCBC_HEAD=${CRI_XCBC_HEAD} | tee -a $CI_PROJECT_DIR/image.env
-  - echo CRI_XCBC_dev=${CRI_XCBC_dev} | tee -a $CI_PROJECT_DIR/image.env
-  - echo CRI_XCBC_prod=${CRI_XCBC_prod} | tee -a $CI_PROJECT_DIR/image.env
+  - echo EXT_REPO_HEAD=${EXT_REPO_HEAD} | tee -a $CI_PROJECT_DIR/image.env
+  - echo EXT_PR_SRC_BRANCH_SHA=${EXT_PR_SRC_BRANCH_SHA} | tee -a $CI_PROJECT_DIR/image.env
+  - echo EXT_PR_TARGET_BRANCH_SHA=${EXT_PR_TARGET_BRANCH_SHA} | tee -a $CI_PROJECT_DIR/image.env
   - echo PACKER_IMAGE_HEAD=${PACKER_IMAGE_HEAD} | tee -a $CI_PROJECT_DIR/image.env
 
 .get_ansible_files: &get_ansible_files
   - s3cmd get --force -r --host=$AWS_HOST --host-bucket=$AWS_HOST s3://cheaha-cloud-ansible-files/ ansible/files/
 
-build_docker_image:
-  image: docker:20.10.17
-  stage: pre-build
-  services:
-    - docker:20.10.16-dind
-  tags:
-    - dind
-  before_script:
-    - *get_build_date
-    - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
-  script:
-    - docker build -t $CI_REGISTRY_IMAGE:$BUILD_DATE -t $CI_REGISTRY_IMAGE:latest .
-    - >
-      docker run --rm $CI_REGISTRY_IMAGE bash -c
-      'ansible --version &&
-      openstack --version &&
-      packer version &&
-      s3cmd --version &&
-      terraform --version'
-    - docker push --all-tags $CI_REGISTRY_IMAGE
-  rules:
-    - if: $CI_PIPELINE_SOURCE == "merge_request_event"
-      changes:
-        - Dockerfile
-      allow_failure: true
-
-build_ood_image:
-  stage: build
-  tags:
-    - build
+.build_proxy_image_template: &build_proxy_image_template
   script:
     - *update_ansible_repo
     - *get_ansible_files
-    - >
-      curl --header "PRIVATE-TOKEN: ${ANSIBLE_VAR_TOKEN}"
-      "${CI_API_V4_URL}/projects/2836/repository/files/knightly/raw?ref=main"
-      -o CRI_XCBC/group_vars/knightly
-    - 'sed -i -E "s/(lts_access_key: ).*/\1\"${AWS_ACCESS_KEY_ID}\"/" CRI_XCBC/group_vars/knightly'
-    - 'sed -i -E "s/(lts_secret_key: ).*/\1\"${AWS_SECRET_ACCESS_KEY}\"/" CRI_XCBC/group_vars/knightly'
-    - 'sed -i -E "s/(user_register_app_key: ).*/\1\"${SELF_REG_APP_KEY}\"/" CRI_XCBC/group_vars/knightly'
-    - 'sed -i -E "s/(celery_user_password: ).*/\1\"${CELERY_PASSWD}\"/" CRI_XCBC/group_vars/knightly'
-    - 'sed -i -E "s|(ssh_pub_key: ).*|\1\"{{ lookup(''file'', ''${SSH_PUB_KEY}'') }}\"|" CRI_XCBC/group_vars/knightly'
-    - export PKR_VAR_flavor="${OOD_BUILD_FLAVOR:-$PKR_VAR_flavor}"
-    - packer init openstack-ood
-    - packer validate openstack-ood
+    # packer vars for job env
+    - export PKR_VAR_flavor="${PROXY_BUILD_FLAVOR:-$PKR_VAR_flavor}"
+    - export PKR_VAR_build_instance_name="${BUILD_TARGET}-${EXT_REPO_HEAD}"
+    - export PKR_VAR_image_date_suffix=false
     - |
       if [ $CI_PIPELINE_SOURCE == 'merge_request_event' ]; then
-        export PKR_VAR_image_name="ood-PR-${CI_MERGE_REQUEST_IID}"
-        echo INSTANCE_FLAVOR="${PKR_VAR_flavor}" | tee -a $CI_PROJECT_DIR/image.env
-        echo OOD_INSTANCE_NAME="ood-PR-${CI_MERGE_REQUEST_IID}" | tee -a $CI_PROJECT_DIR/image.env
-        export FLOATING_IP=$(openstack floating ip create uab-campus -f value -c floating_ip_address)
-        echo FLOATING_IP=$FLOATING_IP | tee -a $CI_PROJECT_DIR/image.env
-        sed -i -E "s/(ood_servername: ).*/\1\"$CI_COMMIT_REF_SLUG.$FLOATING_IP.nip.io\"/" CRI_XCBC/group_vars/knightly
+        export PKR_VAR_image_name="${BUILD_TARGET}-PR-${CI_MERGE_REQUEST_IID}"
       elif [ $CI_PIPELINE_SOURCE == 'schedule' ]; then
-        export PKR_VAR_image_name="ood-${BUILD_DATE}"
-        echo INSTANCE_FLAVOR="${OOD_INSTANCE_FLAVOR:-cpu16-64g}" | tee -a $CI_PROJECT_DIR/image.env
-        echo OOD_INSTANCE_NAME="ood-knightly" | tee -a $CI_PROJECT_DIR/image.env
-        echo FLOATING_IP=$TEST_IP | tee -a $CI_PROJECT_DIR/image.env
+        export PKR_VAR_image_name="${BUILD_TARGET}-${BUILD_TAG:-${BUILD_DATE}}"
       fi
-    - >
-      PKR_VAR_build_instance_name="ood-${CRI_XCBC_HEAD}"
-      PKR_VAR_image_date_suffix=false
-      packer build -machine-readable openstack-ood | tee ood_build.log
-    - export BUILT_OOD_IMAGE_ID=$(grep 'Image:' ood_build.log | awk '{print $4}')
-    - echo BUILT_OOD_IMAGE_ID=${BUILT_OOD_IMAGE_ID} | tee -a $CI_PROJECT_DIR/image.env
-    - openstack image set --property CRI_XCBC_prod=${CRI_XCBC_prod} --property CRI_XCBC_dev=${CRI_XCBC_dev} --property PACKER_IMAGE_HEAD=${PACKER_IMAGE_HEAD} ${BUILT_OOD_IMAGE_ID}
+    # Ansible var overrides
+    - |
+      if [ -n "${PROXY_ENABLE_VAR}" ]; then
+        sed -i -E "s/(${PROXY_ENABLE_VAR}: ).*/\1true/" $EXT_REPO_DIR/group_vars/all
+      fi
+    - 'sed -i -E "s|(s3_endpoint: ).*|\1\"${S3_ENDPOINT}\"|" $EXT_REPO_DIR/group_vars/all'
+    - 'sed -i -E "s/(lts_access_key: ).*/\1\"${AWS_ACCESS_KEY_ID}\"/" $EXT_REPO_DIR/group_vars/all'
+    - 'sed -i -E "s/(lts_secret_key: ).*/\1\"${AWS_SECRET_ACCESS_KEY}\"/" $EXT_REPO_DIR/group_vars/all'
+    - 'sed -i -E "s/(s3_shibboleth_bucket_name: ).*/\1\"${S3_SHIBBOLETH_BUCKET_NAME}\"/" $EXT_REPO_DIR/group_vars/all'
+    - 'sed -i -E "s/(s3_shibboleth_object_name: ).*/\1\"${S3_SHIBBOLETH_OBJECT_NAME}\"/" $EXT_REPO_DIR/group_vars/all'
+    - 'sed -i -E "s|(ssh_pub_key: ).*|\1\"{{ lookup(''file'', ''${SSH_PUB_KEY}'') }}\"|" $EXT_REPO_DIR/group_vars/all'
+    # packer commands
+    - packer init openstack-proxy
+    - packer validate openstack-proxy
+    - packer build -machine-readable openstack-proxy | tee proxy_build.log
+    - export BUILT_PROXY_IMAGE_ID=$(grep 'Image:' proxy_build.log | awk '{print $4}')
+    - echo BUILT_PROXY_IMAGE_ID=${BUILT_PROXY_IMAGE_ID} | tee -a $CI_PROJECT_DIR/image.env
+    # set image properties with repo state
+    - openstack image set --property EXT_PR_SRC_REPO=${EXT_PR_SRC_REPO} --property EXT_PR_SRC_BRANCH_SHA=${EXT_PR_SRC_BRANCH_SHA} --property EXT_PR_TARGET_REPO=${EXT_PR_TARGET_REPO} --property EXT_PR_TARGET_BRANCH_SHA=${EXT_PR_TARGET_BRANCH_SHA} --property PACKER_IMAGE_HEAD=${PACKER_IMAGE_HEAD} ${BUILT_PROXY_IMAGE_ID}
   artifacts:
     reports:
       dotenv: image.env
 
-test_ood_image:
-  stage: test
-  needs: [build_ood_image]
+build_http_proxy_image:
+  stage: build
   environment:
-    name: knightly
+    name: build
   tags:
     - build
-  script:
-    - openstack image set --accept $BUILT_OOD_IMAGE_ID
-    - FAILED=false
-    - |
-      eval $(ssh-agent -s)
-      chmod 400 "$SSH_PRIV_KEY"
-      ssh-add "$SSH_PRIV_KEY"
-      mkdir ~/.ssh
-      chmod 700 ~/.ssh
-    - OLD_INSTANCE_IP=$(openstack floating ip list --floating-ip-address $CHEAHA_IP -c "Fixed IP Address" -f value)
-    - echo $OLD_INSTANCE_IP
-    - |
-      if [ ! -z $OLD_INSTANCE_IP ]; then
-        export OLD_INSTANCE_ID=$(openstack server list --name $OOD_INSTANCE_NAME --ip $OLD_INSTANCE_IP -c ID -f value)
-      fi
-    - echo OLD_INSTANCE_ID=$OLD_INSTANCE_ID | tee -a instance.env
-    - |
-      cat > user_data.txt << OEOF
-      #!/bin/bash
-      echo "Starting user_data: \$(date)"
-      cat > /etc/resolv.conf << EOF
-      search openstack.internal cm.cluster rc.uab.edu ib.cluster drac.cluster eth.cluster ib-hdr.cluster
-      nameserver 172.20.0.25
-      EOF
-      echo "$DEV_KEY" >> /root/.ssh/authorized_keys
-      mkdir -p /run/shibboleth
-      chown shibd:shibd /run/shibboleth
-      echo "Installing s3cmd: \$(date)"
-      pip3 install s3cmd
-      echo "Downloading hostkey via s3cmd: \$(date)"
-      s3cmd get --force -r --access_key=$AWS_ACCESS_KEY_ID --secret_key=$AWS_SECRET_ACCESS_KEY --host=$AWS_HOST --host-bucket=$AWS_HOST s3://knightly-key/ /etc/ssh/
-      echo "Download completed: \$(date)"
-      OEOF
-    - >
-      export NEW_INSTANCE_ID=$(openstack server create
-      -c id -f value --image $BUILT_OOD_IMAGE_ID
-      --network $OOD_INSTANCE_NETWORK
-      --security-group ood-https-ports
-      --security-group node-exporter
-      --security-group allow-ssh
-      --user-data user_data.txt
-      --flavor $INSTANCE_FLAVOR
-      --wait
-      $OOD_INSTANCE_NAME)
-    - echo NEW_INSTANCE_ID=$NEW_INSTANCE_ID | tee -a instance.env
-    - openstack server add floating ip $NEW_INSTANCE_ID $FLOATING_IP
-    - >
-      curl --retry 10 --retry-delay 20 --retry-connrefused https://knightly.rc.uab.edu/Shibboleth.sso/Metadata --resolve knightly.rc.uab.edu:443:$FLOATING_IP -kf
-      || FAILED=true
-    - |
-      cp "$SSH_KNOWN_HOSTS" ~/.ssh/known_hosts
-      chmod 644 ~/.ssh/known_hosts
-      until ssh acctsvc@$FLOATING_IP hostname; do sleep 5; done
-      ssh acctsvc@$FLOATING_IP '[ $(mount | grep "etc/auto" | wc -l) -eq 6 ]' || FAILED=true
-    - |
-      if [ "$FAILED" = true ]; then
-        if [ "${DELETE_WHEN_FAILED-true}" = true ]; then
-          openstack server delete $NEW_INSTANCE_ID
-          echo "DELETE_BUILT_IMAGE=true" | tee -a instance.env
-        fi
-        false
-      fi
-    - openstack server remove floating ip $NEW_INSTANCE_ID $FLOATING_IP
-  artifacts:
-    reports:
-      dotenv: instance.env
+  variables:
+    PROXY_ENABLE_VAR: "enable_http_proxy"
+  <<: *build_proxy_image_template
   rules:
-    - if: $CI_PIPELINE_SOURCE == "schedule"
+    - if: $PIPELINE_TARGET == "build" && $BUILD_TARGET == "http-proxy"
       when: always
 
-test_ood_image_mr:
-  stage: test
-  needs: [build_ood_image]
+build_ssh_proxy_image:
+  stage: build
+  environment:
+    name: build
   tags:
     - build
+  variables:
+    PROXY_ENABLE_VAR: "enable_ssh_proxy"
+  <<: *build_proxy_image_template
+  rules:
+    - if: $PIPELINE_TARGET == "build" && $BUILD_TARGET == "ssh-proxy"
+      when: always
+
+.build_login_image_template: &build_login_image_template
   script:
-    - export OOD_INSTANCE_NETWORK="cicd-net"
-    - FAILED=false
-    - |
-      eval $(ssh-agent -s)
-      chmod 400 "$SSH_PRIV_KEY"
-      ssh-add "$SSH_PRIV_KEY"
-      mkdir ~/.ssh
-      chmod 700 ~/.ssh
-    - |
-      cat > user_data.txt << OEOF
-      #!/bin/bash
-      cat > /etc/resolv.conf << EOF
-      search openstack.internal cm.cluster rc.uab.edu ib.cluster drac.cluster eth.cluster ib-hdr.cluster
-      nameserver 172.20.0.25
-      EOF
-      echo "$DEV_KEY" >> /root/.ssh/authorized_keys
-      mkdir -p /run/shibboleth
-      chown shibd:shibd /run/shibboleth
-      OEOF
-    - >
-      export NEW_INSTANCE_ID=$(openstack server create
-      -c id -f value --image $BUILT_OOD_IMAGE_ID
-      --network $OOD_INSTANCE_NETWORK
-      --security-group ood-https-ports
-      --security-group allow-ssh
-      --user-data user_data.txt
-      --flavor $INSTANCE_FLAVOR
-      --wait
-      $OOD_INSTANCE_NAME)
-    - echo NEW_INSTANCE_ID=$NEW_INSTANCE_ID | tee -a instance.env
-    - openstack server add floating ip $NEW_INSTANCE_ID $FLOATING_IP
-    - >
-      curl --retry 10 --retry-delay 20 --retry-connrefused https://knightly.rc.uab.edu/Shibboleth.sso/Metadata --resolve knightly.rc.uab.edu:443:$FLOATING_IP -kf
-      || FAILED=true
-    - ssh -o StrictHostKeyChecking=no acctsvc@$FLOATING_IP '[ $(mount | grep "etc/auto" | wc -l) -eq 6 ]' || FAILED=true
+    - *update_ansible_repo
+    - *get_ansible_files
+    # packer vars for job env
+    - export PKR_VAR_flavor="${PROXY_BUILD_FLAVOR:-$PKR_VAR_flavor}"
+    - export PKR_VAR_build_instance_name="${BUILD_TARGET}-${EXT_REPO_HEAD}"
+    - export PKR_VAR_image_date_suffix=false
     - |
-      if [ "$FAILED" = true ]; then
-        if [ "${DELETE_WHEN_FAILED-true}" = true ]; then
-          openstack server delete $NEW_INSTANCE_ID
-          openstack image delete $BUILT_OOD_IMAGE_ID
-        fi
-        false
+      if [ $CI_PIPELINE_SOURCE == 'merge_request_event' ]; then
+        export PKR_VAR_image_name="${BUILD_TARGET}-PR-${CI_MERGE_REQUEST_IID}"
+      elif [ $CI_PIPELINE_SOURCE == 'schedule' ]; then
+        export PKR_VAR_image_name="${BUILD_TARGET}-${BUILD_TAG:-${BUILD_DATE}}"
       fi
+    # packer commands
+    - packer init openstack-login
+    - packer validate openstack-login
+    - packer build -machine-readable openstack-login | tee login_build.log
+    - export BUILT_LOGIN_IMAGE_ID=$(grep 'Image:' login_build.log | awk '{print $4}')
+    - echo BUILT_LOGIN_IMAGE_ID=${BUILT_LOGIN_IMAGE_ID} | tee -a $CI_PROJECT_DIR/image.env
+    # set image properties with repo state
+    - openstack image set --property EXT_PR_SRC_REPO=${EXT_PR_SRC_REPO} --property EXT_PR_SRC_BRANCH_SHA=${EXT_PR_SRC_BRANCH_SHA} --property EXT_PR_TARGET_REPO=${EXT_PR_TARGET_REPO} --property EXT_PR_TARGET_BRANCH_SHA=${EXT_PR_TARGET_BRANCH_SHA} --property PACKER_IMAGE_HEAD=${CI_COMMIT_SHORT_SHA} ${BUILT_LOGIN_IMAGE_ID}
   artifacts:
     reports:
-      dotenv: instance.env
-  rules:
-    - if: $CI_MERGE_REQUEST_ID
-
-deploy_review:
-  stage: deploy
-  script:
-    - echo "Deploy Review App"
-  environment:
-    name: review/$CI_COMMIT_REF_SLUG
-    url: https://$CI_COMMIT_REF_SLUG.$FLOATING_IP.nip.io
-    on_stop: stop_review
-    auto_stop_in: 2 days
-  tags:
-    - build
-  rules:
-    - if: $CI_MERGE_REQUEST_ID
+      dotenv: image.env
 
-stop_review:
-  stage: deploy
-  script:
-    - openstack server delete $NEW_INSTANCE_ID
-    - openstack image delete $BUILT_OOD_IMAGE_ID
-    - openstack floating ip delete $FLOATING_IP
+build_login_image:
+  stage: build
   environment:
-    name: review/$CI_COMMIT_REF_SLUG
-    action: stop
+    name: build
   tags:
     - build
+  <<: *build_login_image_template
   rules:
-    - if: $CI_MERGE_REQUEST_ID
-      when: manual
+    - if: $PIPELINE_TARGET == "build" && $BUILD_TARGET == "login"
+      when: always
 
-deploy_knightly:
-  stage: deploy
+build_ood_image:
+  stage: build
   environment:
-    name: knightly
+    name: build
   tags:
     - build
   script:
+    - *update_ansible_repo
+    - *get_ansible_files
+    # packer vars for job env
+    - export PKR_VAR_flavor="${OOD_BUILD_FLAVOR:-$PKR_VAR_flavor}"
+    - export PKR_VAR_build_instance_name="${BUILD_TARGET}-${EXT_REPO_HEAD}"
+    - export PKR_VAR_image_date_suffix=false
+    - export PKR_VAR_image_name="${BUILD_TARGET}-${BUILD_TAG:-${BUILD_DATE}}"
     - |
-      if [ ! -z $OLD_INSTANCE_ID ]; then
-        openstack server remove floating ip $OLD_INSTANCE_ID $CAMPUS_IP
-        openstack server remove floating ip $OLD_INSTANCE_ID $CHEAHA_IP
-      fi
-    - |
-      if [ ! -z $NEW_INSTANCE_ID ]; then
-        openstack server add floating ip $NEW_INSTANCE_ID $CAMPUS_IP
-        openstack server add floating ip $NEW_INSTANCE_ID $CHEAHA_IP
+      if [ $ENV = 'knightly' ] || [ $ENV = 'prod' ]; then
+        curl --header "PRIVATE-TOKEN: ${ANSIBLE_VAR_TOKEN}" \
+        "${CI_API_V4_URL}/projects/2836/repository/files/$ENV/raw?ref=main" \
+        -o CRI_XCBC/group_vars/$ENV
+        sed -i -E "s/(lts_access_key: ).*/\1\"${AWS_ACCESS_KEY_ID}\"/" CRI_XCBC/group_vars/$ENV
+        sed -i -E "s/(lts_secret_key: ).*/\1\"${AWS_SECRET_ACCESS_KEY}\"/" CRI_XCBC/group_vars/$ENV
+        sed -i -E "s/(user_register_app_key: ).*/\1\"${SELF_REG_APP_KEY}\"/" CRI_XCBC/group_vars/$ENV
+        sed -i -E "s/(celery_user_password: ).*/\1\"${CELERY_PASSWD}\"/" CRI_XCBC/group_vars/$ENV
+        sed -i -E "s|(ssh_pub_key: ).*|\1\"{{ lookup('file', '${SSH_PUB_KEY}') }}\"|" CRI_XCBC/group_vars/$ENV
       fi
-  only:
-    - schedules
+    # packer commands
+    - packer init openstack-ood
+    - packer validate openstack-ood
+    - packer build -machine-readable openstack-ood | tee ood_build.log
+    - export BUILT_OOD_IMAGE_ID=$(grep 'Image:' ood_build.log | awk '{print $4}')
+    - echo BUILT_OOD_IMAGE_ID=${BUILT_OOD_IMAGE_ID} | tee -a $CI_PROJECT_DIR/image.env
+    # set image properties with repo state
+    - openstack image set --property EXT_PR_SRC_REPO=${EXT_PR_SRC_REPO} --property EXT_PR_SRC_BRANCH_SHA=${EXT_PR_SRC_BRANCH_SHA} --property EXT_PR_TARGET_REPO=${EXT_PR_TARGET_REPO} --property EXT_PR_TARGET_BRANCH_SHA=${EXT_PR_TARGET_BRANCH_SHA} --property PACKER_IMAGE_HEAD=${CI_COMMIT_SHORT_SHA} ${BUILT_OOD_IMAGE_ID}
+  artifacts:
+    reports:
+      dotenv: image.env
+  rules:
+    - if: $PIPELINE_TARGET == "build" && $BUILD_TARGET == "ood"
+      when: always
 
-deploy_cheaha:
+deploy_http_proxy_node:
   stage: deploy
   environment:
-    name: cheaha
+    name: $ENV
   tags:
     - build
   script:
-    - echo "Job placeholder to deploy to Cheaha"
-  when: manual
-  only:
-    - main
-
-cleanup_knightly:
-  stage: cleanup
-  environment:
-    name: knightly
-  tags:
-    - build
-  script:
-    - >
-      SERVER_TO_BE_DELETE=($(openstack server list --name $OOD_INSTANCE_NAME --sort-column Image --sort-descending -f value -c ID
-      | awk -v NSTK=$NUM_SERVER_TO_KEEP -v OID=$OLD_INSTANCE_ID '$0 != OID {count++}
-      $0 != OID && count>NSTK {print}'))
+    - openstack image set --accept $HTTP_PROXY_IMAGE_ID || true
+    - FAILED=false
     - |
-      for svr in ${SERVER_TO_BE_DELETE[@]}; do
-        echo "Deleting server $svr"
-        openstack server delete ${svr}
+      cat > user_data.txt <<EOF
+      #!/bin/bash
+      cat >> /etc/NetworkManager/conf.d/90-dns-none.conf<<EEOF
+      [main]
+      dns=none
+      EEOF
+      systemctl reload NetworkManager
+      echo "$DEV_KEY" >> /root/.ssh/authorized_keys
+      ip route replace default via ${DEFAULT_GATEWAY_IP} dev eth0
+      git clone ${CI_REPOSITORY_URL} /tmp/${CI_PROJECT_NAME}
+      cd /tmp/${CI_PROJECT_NAME}
+      git checkout ${CI_COMMIT_REF_NAME}
+      cat >> ansible/hosts<<EEOF
+      [$ENV]
+      127.0.0.1
+      EEOF
+      ansible-playbook -c local -i ansible/hosts --extra-vars="$EXTRA_VARS" ansible/cluster.yml | tee -a /tmp/ansible.log
+      rm -rf /tmp/${CI_PROJECT_NAME}
+      EOF
+    - |
+      export cmd="openstack server create"
+      cmd+=" -c id -f value --image $HTTP_PROXY_IMAGE_ID"
+      cmd+=" --flavor $INSTANCE_FLAVOR"
+      for security_group in ${SECURITY_GROUP_LIST[@]};
+      do
+        cmd+=" --security-group $security_group"
+      done
+      cmd+=" --user-data user_data.txt"
+      if [ -n "$PROXY_NETWORK" ];then cmd+=" --network $PROXY_NETWORK"; fi
+      if [ -n "$HTTP_PROXY_PORT" ];then cmd+=" --port $HTTP_PROXY_PORT"; fi
+      cmd+=" --wait $HTTP_PROXY_INSTANCE_NAME"
+    - export HTTP_PROXY_INSTANCE_ID=$(bash -c "$cmd")
+    - |
+      # Associate the floating IP(s) with the HTTP Proxy instance
+      for HTTP_PROXY_FLOATING_IP in ${HTTP_PROXY_FLOATING_IP_LIST[@]};
+      do
+        echo "Associating FLOATING_IP $HTTP_PROXY_FLOATING_IP with HTTP_PROXY_INSTANCE_ID $HTTP_PROXY_INSTANCE_ID"
+        openstack server add floating ip $HTTP_PROXY_INSTANCE_ID $HTTP_PROXY_FLOATING_IP
       done
   rules:
-    - if: $CI_PIPELINE_SOURCE == "schedule"
+    - if: $PIPELINE_TARGET == "deploy" && $HTTP_PROXY_IMAGE_ID
       when: always
 
-cleanup_integration:
-  stage: cleanup
+deploy_ssh_proxy_node:
+  stage: deploy
+  environment:
+    name: $ENV
   tags:
     - build
   script:
-    - OS_PROJECT_ID=$(openstack application credential show $OS_APPLICATION_CREDENTIAL_ID -f value -c project_id)
-    - openstack image list --sort-column Name --sort-descending -f value -c Name -c ID --property owner=$OS_PROJECT_ID > images.txt
+    - openstack image set --accept $SSH_PROXY_IMAGE_ID || true
+    - FAILED=false
     - |
-      if [ "${DELETE_BUILT_IMAGE-false}" = true ]; then
-        openstack image delete $BUILT_OOD_IMAGE_ID
-      fi
-    - >
-      OOD_IMAGE_TO_BE_DELETE=($(cat images.txt
-      | awk -v NITK=$NUM_IMAGE_TO_KEEP -v REGEX=ood-$TIMESTAMP_REGEX
-      '{if ($0 ~ REGEX) result[count++] = $1}
-      END {for(i=NITK;i<count;i++) print result[i]}'))
+      cat > user_data.txt <<EOF
+      #!/bin/bash
+      cat >> /etc/NetworkManager/conf.d/90-dns-none.conf<<EEOF
+      [main]
+      dns=none
+      EEOF
+      systemctl reload NetworkManager
+      echo "$DEV_KEY" >> /root/.ssh/authorized_keys
+      ip route replace default via ${DEFAULT_GATEWAY_IP} dev eth0
+      git clone ${CI_REPOSITORY_URL} /tmp/${CI_PROJECT_NAME}
+      cd /tmp/${CI_PROJECT_NAME}
+      git checkout ${CI_COMMIT_REF_NAME}
+      cat >> ansible/hosts<<EEOF
+      [$ENV]
+      127.0.0.1
+      EEOF
+      ansible-playbook -c local -i ansible/hosts --extra-vars="$EXTRA_VARS" ansible/cluster.yml | tee -a /tmp/ansible.log
+      rm -rf /tmp/${CI_PROJECT_NAME}
+      EOF
     - |
-      for img in ${OOD_IMAGE_TO_BE_DELETE[@]}; do
-        echo "Deleting image $img"
-        openstack image delete ${img}
+      export cmd="openstack server create"
+      cmd+=" -c id -f value --image $SSH_PROXY_IMAGE_ID"
+      cmd+=" --flavor $INSTANCE_FLAVOR"
+      for security_group in ${SECURITY_GROUP_LIST[@]};
+      do
+        cmd+=" --security-group $security_group"
+      done
+      cmd+=" --user-data user_data.txt"
+      if [ -n "$PROXY_NETWORK" ];then cmd+=" --network $PROXY_NETWORK"; fi
+      if [ -n "$SSH_PROXY_PORT" ];then cmd+=" --port $SSH_PROXY_PORT"; fi
+      cmd+=" --wait $SSH_PROXY_INSTANCE_NAME"
+    - export SSH_PROXY_INSTANCE_ID=$(bash -c "$cmd")
+    - |
+      # Associate the floating IP(s) with the SSH Proxy instance
+      for SSH_PROXY_FLOATING_IP in ${SSH_PROXY_FLOATING_IP_LIST[@]};
+      do
+        echo "Associating FLOATING_IP $SSH_PROXY_FLOATING_IP with SSH_PROXY_INSTANCE_ID $SSH_PROXY_INSTANCE_ID"
+        openstack server add floating ip $SSH_PROXY_INSTANCE_ID $SSH_PROXY_FLOATING_IP
       done
   rules:
-    - if: $CI_PIPELINE_SOURCE == "schedule"
+    - if: $PIPELINE_TARGET == "deploy" && $SSH_PROXY_IMAGE_ID
       when: always
 
-cleanup_mr:
-  stage: cleanup
+deploy_login_node:
+  stage: deploy
+  environment:
+    name: $ENV
   tags:
     - build
   script:
-    - OS_PROJECT_ID=$(openstack application credential show $OS_APPLICATION_CREDENTIAL_ID -f value -c project_id)
-    - >
-      IMAGE_TO_BE_DELETE=($(openstack image list --sort-column Name --sort-descending -f value -c Name -c ID --property owner=$OS_PROJECT_ID
-      | awk -v REGEX="(ood|base|compute|gpu)-PR-$CI_MERGE_REQUEST_IID" '{if ($0 ~ REGEX) print $1}'))
+    - openstack image set --accept $LOGIN_IMAGE_ID || true
+    - FAILED=false
     - |
-      for img in ${IMAGE_TO_BE_DELETE[@]}; do
-        echo "Deleting image $img"
-        openstack image delete ${img}
+      cat > user_data.txt <<EOF
+      #!/bin/bash
+      cat >> /etc/NetworkManager/conf.d/90-dns-none.conf<<EEOF
+      [main]
+      dns=none
+      EEOF
+      systemctl reload NetworkManager
+      echo "$DEV_KEY" >> /root/.ssh/authorized_keys
+      ip route replace default via ${DEFAULT_GATEWAY_IP} dev eth0
+      git clone ${CI_REPOSITORY_URL} /tmp/${CI_PROJECT_NAME}
+      cd /tmp/${CI_PROJECT_NAME}
+      git checkout ${CI_COMMIT_REF_NAME}
+      cat >> ansible/hosts<<EEOF
+      [$ENV]
+      127.0.0.1
+      EEOF
+      s3cmd get --force -r --access_key=$AWS_ACCESS_KEY_ID --secret_key=$AWS_SECRET_ACCESS_KEY --host=$AWS_HOST --host-bucket=$AWS_HOST s3://cheaha-cloud-ansible-files/ /tmp/${CI_PROJECT_NAME}/ansible/files/ 
+      ansible-playbook -c local -i ansible/hosts --extra-vars="$EXTRA_VARS" ansible/cluster.yml | tee -a /tmp/ansible.log
+      rm -rf /tmp/${CI_PROJECT_NAME}
+      EOF
+    - |
+      export cmd="openstack server create"
+      cmd+=" -c id -f value --image $LOGIN_IMAGE_ID"
+      cmd+=" --flavor $INSTANCE_FLAVOR"
+      for security_group in ${SECURITY_GROUP_LIST[@]};
+      do
+        cmd+=" --security-group $security_group"
+      done
+      cmd+=" --user-data user_data.txt"
+      if [ -n "$INSTANCE_NETWORK" ];then cmd+=" --network $INSTANCE_NETWORK"; fi
+      if [ -n "$LOGIN_PORT" ];then cmd+=" --port $LOGIN_PORT"; fi
+      cmd+=" --wait $LOGIN_INSTANCE_NAME"
+    - export LOGIN_INSTANCE_ID=$(bash -c "$cmd")
+    - |
+      # Associate the floating IP(s) with the SSH Proxy instance
+      for LOGIN_FLOATING_IP in ${LOGIN_FLOATING_IP_LIST[@]};
+      do
+        echo "Associating FLOATING_IP $LOGIN_FLOATING_IP with LOGIN_INSTANCE_ID $LOGIN_INSTANCE_ID"
+        openstack server add floating ip $LOGIN_INSTANCE_ID $LOGIN_FLOATING_IP
       done
   rules:
-    - if: $CI_PIPELINE_SOURCE == "merge_request_event"
+    - if: $PIPELINE_TARGET == "deploy" && $LOGIN_IMAGE_ID
       when: always
+
diff --git a/ansible/cheaha.yml b/ansible/cheaha.yml
deleted file mode 100644
index bfb1af113f50383b600441092d4549aef7a82901..0000000000000000000000000000000000000000
--- a/ansible/cheaha.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-- name: Setup node for use as a virtual cheaha node
-  hosts: default
-  become: true
-  roles:
-    - { name: 'cheaha.node', tags: 'cheaha.node' }
-    - { name: 'nfs_mounts', tags: 'nfs_mounts' }
-    - { name: 'ldap_config', tags: 'ldap_config' }
-    - { name: 'slurm_client', tags: 'slurm_client' }
diff --git a/ansible/cluster.yml b/ansible/cluster.yml
new file mode 100644
index 0000000000000000000000000000000000000000..9664bf9e22996377bec93b4d4eb4155830a1d0d7
--- /dev/null
+++ b/ansible/cluster.yml
@@ -0,0 +1,16 @@
+---
+- name: Setup node for use as a virtual cheaha node
+  hosts: all
+  become: true
+  roles:
+    - { name: 'cheaha.node', tags: 'cheaha.node' }
+    - { name: 'nfs_mounts', tags: 'nfs_mounts', when: enable_nfs_mounts }
+    - { name: 'ldap_config', tags: 'ldap_config' }
+    - { name: 'slurm_client', tags: 'slurm_client', when: enable_slurm_client }
+    - { name: 'ssh_host_keys', tags: 'ssh_host_keys' }
+    - { name: 'ssh_proxy_config', tags: 'ssh_proxy_config', when: enable_ssh_proxy_config }
+    - { name: 'ssl_cert', tags: 'ssl_cert', when: enable_ssl_certs }
+    - { name: 'rsyslog_config', tags: 'rsyslog_config', when: enable_rsyslog_config }
+    - { name: 'rewrite_map', tags: 'rewrite_map', when: enable_rewrite_map }
+    - { name: 'fail2ban', tags: 'fail2ban', when: enable_fail2ban }
+    - { name: 'install_node_exporter', tags: 'install_node_exporter', when: enable_node_exporter }
diff --git a/ansible/group_vars/all b/ansible/group_vars/all
index e55be3c6b5c2ad77a3195681900b9b84a6a25585..46267cd8eb7d645ece328c8f74f82fb3d536fa77 100644
--- a/ansible/group_vars/all
+++ b/ansible/group_vars/all
@@ -4,9 +4,88 @@
   yum_repo_files: []
   pkg_list: []
   slurm_version: 18.08.9
-  
+  enable_slurm_client: false
+
 # NHC related
   nhc_download_url: "https://github.com/mej/nhc/releases/download/1.4.3/lbnl-nhc-1.4.3-1.el7.noarch.rpm"
   nhc_download_path: "/tmp"
   nhc_git_repo: "https://gitlab.rc.uab.edu/rc/nhc.git"
   nhc_git_repo_path: "/tmp/nhc"
+
+  root_ssh_key: ""
+
+# cheaha.node related
+  hostname_lookup_table:
+    - "10.141.255.254 master.cm.cluster master localmaster.cm.cluster localmaster ldapserver.cm.cluster ldapserver"
+  domain_search_list:
+    - openstack.internal
+    - cm.cluster
+  nameserver_list:
+    - 10.141.255.254
+
+# ldap_config related
+  ldap_cert_path: "/etc/openldap/certs"
+  ldap_uri: "ldap://ldapserver"
+
+# nfs_mounts related
+  enable_nfs_mounts: true
+  use_autofs: false
+  use_fstab: false
+  mount_points:
+    - { "src": "master:/gpfs4", "path": "/gpfs4", "opts": "ro,sync,hard", "mode": "0755" }
+    - { "src": "master:/gpfs5", "path": "/gpfs5", "opts": "ro,sync,hard", "mode": "0755" }
+  autofs_mounts:
+    - { "src": "master:/gpfs4/&", "path": "/gpfs4", "opts": "fstype=nfs,vers=3,_netdev,default", "mode": '0755', "mount_point": "/gpfs4", "map_name": "gpfs4", key: "*" }
+    - { "src": "master:/gpfs5/&", "path": "/gpfs5", "opts": "fstype=nfs,vers=3,_netdev,default", "mode": '0755', "mount_point": "/gpfs5", "map_name": "gpfs5", key: "*" }
+
+#SSH Host Keys
+  S3_ENDPOINT: ""
+  SSH_HOST_KEYS_S3_BUCKET: ""
+  SSH_HOST_KEYS_S3_OBJECT: ""
+
+# AWS credentials
+  LTS_ACCESS_KEY: ""
+  LTS_SECRET_KEY: ""
+
+# ssh proxy
+  enable_ssh_proxy_config: false
+  sshpiper_dest_dir: "/opt/sshpiper"
+
+# rsyslog
+  enable_rsyslog_config: true
+  rsyslog_target: "*.* @master:514"
+
+# ssl certs
+  enable_ssl_certs: false
+  ssl_cert_s3_bucket: ""
+  ssl_cert_key_location: "/etc/pki/tls/private"
+  ssl_cert_file_location: "/etc/pki/tls/certs"
+  ssl_cert_key: ""
+  ssl_cert_file: ""
+  ssl_cert_chain_file: ""
+  ssl_apache_config: ""
+  apache_service: "httpd"
+
+# rewrite map
+  enable_rewrite_map: false
+  target_groups:
+    - {"name": "gpfs4", "host": "login001", "default": True }
+    - {"name": "gpfs5", "host": "login002", "default": False }
+
+# account app
+  account_app_port: 8000
+
+# fail2ban
+  enable_fail2ban: false
+  maxretry: 1
+  findtime: 600
+  bantime: 1200
+  fail2ban_white_list: "127.0.0.1/8"
+
+# Node Exporter
+  enable_node_exporter: false
+  node_exporter_ver: "1.8.2"
+  node_exporter_filename: "node_exporter-{{ node_exporter_ver }}.linux-amd64"
+  node_exporter_user: node_exporter
+  node_exporter_group: node_exporter
+  node_exporter_port: 9100
diff --git a/ansible/group_vars/prod b/ansible/group_vars/prod
new file mode 100644
index 0000000000000000000000000000000000000000..5c694d2616f0e716ec537662469d9c7ec307d2ff
--- /dev/null
+++ b/ansible/group_vars/prod
@@ -0,0 +1,24 @@
+---
+  # cheaha.node related
+  hostname_lookup_table:
+    - "172.20.0.24 cheaha-master02.cm.cluster cheaha-master02"
+    - "172.20.0.22 cheaha-master01.cm.cluster cheaha-master01"
+    - "172.20.0.25 master.cm.cluster master localmaster.cm.cluster localmaster ldapserver.cm.cluster ldapserver"
+  domain_search_list:
+    - cm.cluster
+    - rc.uab.edu
+    - ib.cluster
+    - drac.cluster
+    - eth.cluster
+    - ib-hdr.cluster
+  nameserver_list:
+    - 172.20.0.25
+
+  bright_openldap_path: "/cm/local/apps/openldap"
+  ldap_cert_path: "{{bright_openldap_path}}/etc/certs"
+  ldap_uri: "ldaps://ldapserver"
+
+  # proxy_config
+  target_groups:
+    - {"name": "gpfs5", "host": "login002", "default": False, "authorized_keys":"/gpfs5/data/user/home/$DOWNSTREAM_USER/.ssh/authorized_keys", "private_key":"/gpfs5/data/user/home/$DOWNSTREAM_USER/.ssh/id_ecdsa"}
+    - {"name": "gpfs4", "host": "login001", "default": True, "authorized_keys":"/gpfs4/data/user/home/$DOWNSTREAM_USER/.ssh/authorized_keys", "private_key":"/gpfs4/data/user/home/$DOWNSTREAM_USER/.ssh/id_ecdsa"}
diff --git a/ansible/compute.yml b/ansible/login.yml
similarity index 63%
rename from ansible/compute.yml
rename to ansible/login.yml
index 2907d08077a8431a717209aec22fc362bf2391a7..9d7dd233bfa619713785678e6d070e8fbda5e22f 100644
--- a/ansible/compute.yml
+++ b/ansible/login.yml
@@ -5,8 +5,5 @@
   roles:
     - { name: 'fix_centos_repo', tags: 'fix_centos_repo' }
     - { name: 'install_packages', tags: 'install_packages' }
-    - { name: 'pam_slurm_adopt', tags: 'pam_slurm_adopt' }
     - { name: 'install_nhc', tags: 'install_nhc'}
 
-- name: Setup node for use as a virtual cheaha node
-  ansible.builtin.import_playbook: cheaha.yml
diff --git a/ansible/ood.yml b/ansible/ood.yml
index 089ffd3ee0d3cece4cc03c3a2a8a047083fa2b6d..37c09aa4e34466ae9c52c20699c868a70642a346 100644
--- a/ansible/ood.yml
+++ b/ansible/ood.yml
@@ -6,6 +6,3 @@
     - { name: 'fix_centos_repo', tags: 'fix_centos_repo' }
     - { name: 'install_packages', tags: 'install_packages' }
     - { name: 'install_zsh', tags: 'install_zsh' }
-
-- name: Setup node for use as a virtual cheaha node
-  ansible.builtin.import_playbook: cheaha.yml
diff --git a/ansible/roles/cheaha.node/tasks/main.yml b/ansible/roles/cheaha.node/tasks/main.yml
index 99ca7f3e9ff8f1185e715dfac56767846a176af9..c4c9335ddd773678b2f5245641c1ad67f1f2c31d 100644
--- a/ansible/roles/cheaha.node/tasks/main.yml
+++ b/ansible/roles/cheaha.node/tasks/main.yml
@@ -4,15 +4,24 @@
     path: /etc/hosts
     line: "{{ item }}"
   loop:
-    - "172.20.0.24 cheaha-master02.cm.cluster cheaha-master02"
-    - "172.20.0.22 cheaha-master01.cm.cluster cheaha-master01"
-    - "172.20.0.25 master.cm.cluster master localmaster.cm.cluster localmaster ldapserver.cm.cluster ldapserver"
+    "{{ hostname_lookup_table }}"
 
 - name: Add proper DNS search to lookup other nodes on the cluster
   ansible.builtin.lineinfile:
     path: /etc/dhcp/dhclient.conf
     insertbefore: BOF
     line: 'append domain-name " cm.cluster rc.uab.edu ib.cluster drac.cluster eth.cluster ib-hdr.cluster";'
+    create: true
+    state: present
+
+- name: Template resolv.conf
+  ansible.builtin.template:
+    src: resolv.conf.j2
+    dest: /etc/resolv.conf
+    owner: root
+    group: root
+    mode: 0644
+    backup: true
 
 - name: Disable SELinux
   ansible.posix.selinux:
@@ -25,6 +34,7 @@
     owner: root
     group: root
     mode: 0644
+  when: "'cm.repo' in yum_repo_files"
 
 - name: Add ssh key for root access
   ansible.posix.authorized_key:
@@ -35,3 +45,7 @@
 - name: Set timezone to America/Chicago
   community.general.timezone:
     name: America/Chicago
+  retries: 3
+  delay: 3
+  register: result
+  until: not result.failed
diff --git a/ansible/roles/cheaha.node/templates/resolv.conf.j2 b/ansible/roles/cheaha.node/templates/resolv.conf.j2
new file mode 100644
index 0000000000000000000000000000000000000000..be59430ed6fe3a478587a77d74ae9653c5c16f33
--- /dev/null
+++ b/ansible/roles/cheaha.node/templates/resolv.conf.j2
@@ -0,0 +1,4 @@
+search {{ domain_search_list | join(' ') }}
+{% for name_server in nameserver_list %}
+nameserver {{ name_server }}
+{% endfor %}
diff --git a/ansible/roles/fail2ban/tasks/main.yml b/ansible/roles/fail2ban/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2f7d96e68acb65478a263291c9f3e3092612ff94
--- /dev/null
+++ b/ansible/roles/fail2ban/tasks/main.yml
@@ -0,0 +1,46 @@
+---
+
+- name: Install fail2ban
+  ansible.builtin.package:
+    name: "{{ item }}"
+    state: present
+  loop:
+    - fail2ban
+    - fail2ban-firewalld
+
+- name: Configure fail2ban
+  ansible.builtin.template:
+    src: "{{ item.src }}"
+    dest: "{{ item.dest }}"
+    backup: true
+  loop:
+    - { src: 'jail.local.j2', dest: '/etc/fail2ban/jail.local' }
+    - { src: 'sshpiperd_filter.local.j2', dest: '/etc/fail2ban/filter.d/sshpiperd.local' }
+    - { src: 'sshpiperd_jail.local.j2', dest: '/etc/fail2ban/jail.d/sshpiperd.local' }
+
+- name: Activate the firewalld support for fail2ban
+  ansible.builtin.command:
+    cmd: mv /etc/fail2ban/jail.d/00-firewalld.conf /etc/fail2ban/jail.d/00-firewalld.local
+
+- name: Configure firewalld to allow ssh and sshpiper traffic
+  ansible.posix.firewalld:
+    port: "{{ item }}"
+    zone: public
+    state: enabled
+    permanent: true
+  loop:
+    - 2222/tcp
+    - 22/tcp
+
+- name: Enable and start firewalld
+  ansible.builtin.service:
+    name: firewalld
+    enabled: true
+    state: restarted
+
+- name: Enable and start fail2ban
+  ansible.builtin.service:
+    name: fail2ban
+    enabled: true
+    state: restarted
+
diff --git a/ansible/roles/fail2ban/templates/jail.local.j2 b/ansible/roles/fail2ban/templates/jail.local.j2
new file mode 100644
index 0000000000000000000000000000000000000000..87f9e4fa06f0ca75ed77d0a2361262f94859d91d
--- /dev/null
+++ b/ansible/roles/fail2ban/templates/jail.local.j2
@@ -0,0 +1,7 @@
+[DEFAULT]
+banaction = firewalld
+bantime  = {{ bantime }}
+ignoreip = {{ fail2ban_white_list }}
+
+[sshd]
+enabled = true
diff --git a/ansible/roles/fail2ban/templates/sshpiperd_filter.local.j2 b/ansible/roles/fail2ban/templates/sshpiperd_filter.local.j2
new file mode 100644
index 0000000000000000000000000000000000000000..f5a6081ec28c490ab24ff3f4c96c3b08bb86b9da
--- /dev/null
+++ b/ansible/roles/fail2ban/templates/sshpiperd_filter.local.j2
@@ -0,0 +1,22 @@
+# Refer to https://github.com/fail2ban/fail2ban/wiki/Developing-Regex-in-Fail2ban for developing regex using fail2ban
+#
+[INCLUDES]
+before = common.conf
+
+[DEFAULT]
+_daemon = sshpiperd
+__iso_datetime = "\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(?:[+-]\d{2}:\d{2}|Z)"
+__pref = time=%(__iso_datetime)s level=(?:debug|error)
+
+[Definition]
+# Define the prefix regex for the log lines
+prefregex = ^<F-MLFID>%(__prefix_line)s%(__pref)s</F-MLFID>\s+<F-CONTENT>.+</F-CONTENT>$
+
+# Failregex to match the specific failure log lines (prefregex is automatically included)
+failregex = ^msg="connection from .*failtoban: ip <HOST> too auth many failures"$
+
+ignoreregex =
+
+mode = normal
+
+maxlines = 1
diff --git a/ansible/roles/fail2ban/templates/sshpiperd_jail.local.j2 b/ansible/roles/fail2ban/templates/sshpiperd_jail.local.j2
new file mode 100644
index 0000000000000000000000000000000000000000..681212ccaf59595c5dd6097e188286db85225dbd
--- /dev/null
+++ b/ansible/roles/fail2ban/templates/sshpiperd_jail.local.j2
@@ -0,0 +1,9 @@
+# This configuration will block the remote host after {{maxretry}} failed SSH login attempts.
+[sshpiperd]
+enabled  = true
+filter   = sshpiperd
+logpath  = /var/log/messages
+port     = 22
+maxretry = {{ maxretry }}
+backend  = auto
+findtime = {{ findtime }}
diff --git a/ansible/roles/install_node_exporter/tasks/main.yaml b/ansible/roles/install_node_exporter/tasks/main.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..205904b956f913c8f9e7dd8a11d45cde1d1037b0
--- /dev/null
+++ b/ansible/roles/install_node_exporter/tasks/main.yaml
@@ -0,0 +1,82 @@
+---
+- name: Download node_exporter binary
+  ansible.builtin.get_url:
+    url: "https://github.com/prometheus/node_exporter/releases/download/v{{ node_exporter_ver }}/{{ node_exporter_filename }}.tar.gz"
+    dest: "/tmp/{{ node_exporter_filename }}.tar.gz"
+
+- name: Extract node_exporter
+  ansible.builtin.unarchive:
+    src: "/tmp/{{ node_exporter_filename }}.tar.gz"
+    dest: "/tmp"
+    remote_src: yes
+
+- name: Create system group for user account {{ node_exporter_group }}
+  ansible.builtin.group:
+    name: "{{ node_exporter_group }}"
+    system: true
+    state: present
+
+- name: Create system user account {{ node_exporter_user }}
+  ansible.builtin.user:
+    name: "{{ node_exporter_user }}"
+    comment: Prometheus node_exporter system account
+    group: "{{ node_exporter_group }}"
+    system: true
+    home: /var/lib/node_exporter
+    create_home: false
+    shell: /sbin/nologin
+    state: present
+
+- name: Copy node_exporter binary
+  ansible.builtin.copy:
+    src: "/tmp/{{ node_exporter_filename }}/node_exporter"
+    dest: /usr/local/bin/node_exporter
+    remote_src: yes
+    owner: root
+    group: root
+    mode: 0755
+
+- name: Copy systemd unit file
+  ansible.builtin.template:
+    src: node_exporter.service.j2
+    dest: /etc/systemd/system/node_exporter.service
+    owner: root
+    group: root
+    mode: '0644'
+
+- name: Clean up /tmp
+  ansible.builtin.file:
+    path: "/tmp/{{ item }}"
+    state: absent
+  loop:
+    - "{{ node_exporter_filename }}.tar.gz"
+    - "{{ node_exporter_filename }}"
+
+- name: Restart node_exporter service
+  ansible.builtin.systemd:
+    daemon_reload: yes
+    name: node_exporter
+    state: restarted
+    enabled: true
+
+- name: Collect facts about system services
+  ansible.builtin.service_facts:
+
+- name: Configure firewalld to allow prometheus
+  ansible.posix.firewalld:
+    port: "{{ node_exporter_port }}/tcp"
+    zone: public
+    state: enabled
+    permanent: true
+  when:
+    - "'firewalld.service' in ansible_facts.services"
+    - ansible_facts.services["firewalld.service"].state == "running"
+
+- name: Enable and start firewalld
+  ansible.builtin.service:
+    name: firewalld
+    enabled: true
+    state: restarted
+  when:
+    - "'firewalld.service' in ansible_facts.services"
+    - ansible_facts.services["firewalld.service"].state == "running"
diff --git a/ansible/roles/install_node_exporter/templates/node_exporter.service.j2 b/ansible/roles/install_node_exporter/templates/node_exporter.service.j2
new file mode 100644
index 0000000000000000000000000000000000000000..fddb82d9e0f74fe4382ac62e94fbb3a25ddb9dda
--- /dev/null
+++ b/ansible/roles/install_node_exporter/templates/node_exporter.service.j2
@@ -0,0 +1,12 @@
+[Unit]
+Description=Node Exporter
+After=network.target
+
+[Service]
+User={{ node_exporter_user }}
+Group={{ node_exporter_group }}
+Type=simple
+ExecStart=/usr/local/bin/node_exporter --web.listen-address=:{{ node_exporter_port }} --collector.filesystem.mount-points-exclude "^/(dev|proc|run/user/.+|run/credentials/.+|sys|var/lib/docker/.+)($|/)" --collector.filesystem.fs-types-exclude "^(autofs|binfmt_misc|bpf|cgroup|tmpfs|sunrpc|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$"
+
+[Install]
+WantedBy=multi-user.target
diff --git a/ansible/roles/ldap_config/tasks/main.yml b/ansible/roles/ldap_config/tasks/main.yml
index 183261006200403e678d54a3e0fd84d1453f9174..0f8db2afbbdfab54b1546440e9b2ea3df2b1f4ac 100644
--- a/ansible/roles/ldap_config/tasks/main.yml
+++ b/ansible/roles/ldap_config/tasks/main.yml
@@ -25,7 +25,7 @@
 - name: Copy ldap cert(s) into place
   ansible.builtin.copy:
     src: "{{ item.src }}"
-    dest: "/cm/local/apps/openldap/etc/certs/{{ item.src }}"
+    dest: "{{ ldap_cert_path }}/{{ item.src }}"
     owner: ldap
     group: ldap
     mode: 0440
@@ -33,10 +33,11 @@
     - { src: ca.pem }
     - { src: ldap.key }
     - { src: ldap.pem }
+  when: ldap_uri | regex_search('^ldaps://')
 
 - name: Copy ldap config into place
-  ansible.builtin.copy:
-    src: nslcd.conf
+  ansible.builtin.template:
+    src: nslcd.conf.j2
     dest: /etc/nslcd.conf
     owner: root
     group: root
@@ -46,5 +47,6 @@
   ansible.builtin.service:
     name: "{{ item }}"
     enabled: yes
+    state: restarted
   loop:
     - nslcd
diff --git a/ansible/roles/ldap_config/templates/nslcd.conf.j2 b/ansible/roles/ldap_config/templates/nslcd.conf.j2
new file mode 100644
index 0000000000000000000000000000000000000000..0d03cdfb7f64277046f48afbf5810916f3f5b04f
--- /dev/null
+++ b/ansible/roles/ldap_config/templates/nslcd.conf.j2
@@ -0,0 +1,148 @@
+# This is the configuration file for the LDAP nameservice
+# switch library's nslcd daemon. It configures the mapping
+# between NSS names (see /etc/nsswitch.conf) and LDAP
+# information in the directory.
+# See the manual page nslcd.conf(5) for more information.
+
+# The user and group nslcd should run as.
+uid nslcd
+gid ldap
+
+# The uri pointing to the LDAP server to use for name lookups.
+# Multiple entries may be specified. The address that is used
+# here should be resolvable without using LDAP (obviously).
+#uri ldap://127.0.0.1/
+#uri ldaps://127.0.0.1/
+#uri ldapi://%2fvar%2frun%2fldapi_sock/
+# Note: %2f encodes the '/' used as directory separator
+uri {{ ldap_uri }}
+
+# The LDAP version to use (defaults to 3
+# if supported by client library)
+#ldap_version 3
+
+# The distinguished name of the search base.
+base dc=cm,dc=cluster
+
+# The distinguished name to bind to the server with.
+# Optional: default is to bind anonymously.
+#binddn cn=proxyuser,dc=example,dc=com
+
+# The credentials to bind with.
+# Optional: default is no credentials.
+# Note that if you set a bindpw you should check the permissions of this file.
+#bindpw secret
+
+# The distinguished name to perform password modifications by root by.
+#rootpwmoddn cn=admin,dc=example,dc=com
+
+# The default search scope.
+#scope sub
+#scope one
+#scope base
+
+# Customize certain database lookups.
+#base   group  ou=Groups,dc=example,dc=com
+#base   passwd ou=People,dc=example,dc=com
+#base   shadow ou=People,dc=example,dc=com
+#scope  group  onelevel
+#scope  hosts  sub
+
+# Bind/connect timelimit.
+#bind_timelimit 30
+
+# Search timelimit.
+#timelimit 30
+
+# Idle timelimit. nslcd will close connections if the
+# server has not been contacted for the number of seconds.
+idle_timelimit 240
+
+# Use StartTLS without verifying the server certificate.
+#ssl start_tls
+#tls_reqcert never
+
+{% if ldap_uri | regex_search('^ldaps://') %}
+ssl on
+tls_reqcert demand
+
+# CA certificates for server certificate verification
+#tls_cacertdir /etc/ssl/certs
+tls_cacertfile /cm/local/apps/openldap/etc/certs/ca.pem
+tls_cert /cm/local/apps/openldap/etc/certs/ldap.pem
+tls_key /cm/local/apps/openldap/etc/certs/ldap.key
+{% endif %}
+
+# Seed the PRNG if /dev/urandom is not provided
+#tls_randfile /var/run/egd-pool
+
+# SSL cipher suite
+# See man ciphers for syntax
+#tls_ciphers TLSv1
+
+# Client certificate and key
+# Use these, if your server requires client authentication.
+
+# Mappings for Services for UNIX 3.5
+#filter passwd (objectClass=User)
+#map    passwd uid              msSFU30Name
+#map    passwd userPassword     msSFU30Password
+#map    passwd homeDirectory    msSFU30HomeDirectory
+#map    passwd homeDirectory    msSFUHomeDirectory
+#filter shadow (objectClass=User)
+#map    shadow uid              msSFU30Name
+#map    shadow userPassword     msSFU30Password
+#filter group  (objectClass=Group)
+#map    group  member           msSFU30PosixMember
+
+# Mappings for Services for UNIX 2.0
+#filter passwd (objectClass=User)
+#map    passwd uid              msSFUName
+#map    passwd userPassword     msSFUPassword
+#map    passwd homeDirectory    msSFUHomeDirectory
+#map    passwd gecos            msSFUName
+#filter shadow (objectClass=User)
+#map    shadow uid              msSFUName
+#map    shadow userPassword     msSFUPassword
+#map    shadow shadowLastChange pwdLastSet
+#filter group  (objectClass=Group)
+#map    group  member           posixMember
+
+# Mappings for Active Directory
+#pagesize 1000
+#referrals off
+#idle_timelimit 800
+#filter passwd (&(objectClass=user)(!(objectClass=computer))(uidNumber=*)(unixHomeDirectory=*))
+#map    passwd uid              sAMAccountName
+#map    passwd homeDirectory    unixHomeDirectory
+#map    passwd gecos            displayName
+#filter shadow (&(objectClass=user)(!(objectClass=computer))(uidNumber=*)(unixHomeDirectory=*))
+#map    shadow uid              sAMAccountName
+#map    shadow shadowLastChange pwdLastSet
+#filter group  (objectClass=group)
+
+# Alternative mappings for Active Directory
+# (replace the SIDs in the objectSid mappings with the value for your domain)
+#pagesize 1000
+#referrals off
+#idle_timelimit 800
+#filter passwd (&(objectClass=user)(objectClass=person)(!(objectClass=computer)))
+#map    passwd uid           cn
+#map    passwd uidNumber     objectSid:S-1-5-21-3623811015-3361044348-30300820
+#map    passwd gidNumber     objectSid:S-1-5-21-3623811015-3361044348-30300820
+#map    passwd homeDirectory "/home/$cn"
+#map    passwd gecos         displayName
+#map    passwd loginShell    "/bin/bash"
+#filter group (|(objectClass=group)(objectClass=person))
+#map    group gidNumber      objectSid:S-1-5-21-3623811015-3361044348-30300820
+
+# Mappings for AIX SecureWay
+#filter passwd (objectClass=aixAccount)
+#map    passwd uid              userName
+#map    passwd userPassword     passwordChar
+#map    passwd uidNumber        uid
+#map    passwd gidNumber        gid
+#filter group  (objectClass=aixAccessGroup)
+#map    group  cn               groupName
+#map    group  gidNumber        gid
+# This comment prevents repeated auto-migration of settings.
diff --git a/ansible/roles/nfs_mounts/tasks/autofs.yml b/ansible/roles/nfs_mounts/tasks/autofs.yml
new file mode 100644
index 0000000000000000000000000000000000000000..642a02e997533949b5b60033287f44401c06230e
--- /dev/null
+++ b/ansible/roles/nfs_mounts/tasks/autofs.yml
@@ -0,0 +1,61 @@
+---
+- name: Create base directories
+  ansible.builtin.file:
+    path: "{{ item.path }}"
+    state: directory
+    mode: "{{ item.mode }}"
+  loop:
+    - { path: /local, mode: '0777' }
+    - { path: /share, mode: '0755' }
+
+- name: Create mountpoint dirs
+  ansible.builtin.file:
+    path: "{{ item.path }}"
+    state: directory
+    mode: "{{ item.mode }}"
+  loop:
+    "{{ autofs_mounts }}"
+
+- name: Remove unused entry in master map
+  ansible.builtin.replace:
+    dest: /etc/auto.master
+    regexp: '{{ item.regexp }}'
+    replace: '{{ item.replace }}'
+    backup: true
+  loop:
+    - { regexp: '^(/misc)', replace: '#\1' }
+    - { regexp: '^(/net)', replace: '#\1' }
+    - { regexp: '^(\+auto.master)', replace: '#\1' }
+
+- name: Add master map file
+  ansible.builtin.lineinfile:
+    path: "/etc/auto.master.d/gpfs.autofs"
+    line: "{{ item.mount_point }} /etc/auto.{{ item.map_name }}"
+    create: yes
+  loop:
+    "{{ autofs_mounts }}"
+
+- name: Set up autofs map files
+  ansible.builtin.lineinfile:
+    path: "/etc/auto.{{ item.map_name }}"
+    line: "{{ item.key }} -{{ item.opts }} {{ item.src }}"
+    create: true
+  loop:
+    "{{ autofs_mounts }}"
+
+- name: Create symbolic links
+  ansible.builtin.file:
+    src: "{{ item.src }}"
+    dest: "{{ item.dest }}"
+    owner: root
+    group: root
+    force: yes
+    state: link
+  loop:
+    - { src: /data/rc/apps, dest: /share/apps }
+
+- name: Enable and start autofs service
+  ansible.builtin.service:
+    name: autofs
+    enabled: true
+    state: restarted
diff --git a/ansible/roles/nfs_mounts/tasks/fstab.yml b/ansible/roles/nfs_mounts/tasks/fstab.yml
new file mode 100644
index 0000000000000000000000000000000000000000..abfa827406de5d07a5953f123f3b7e59a1ba7cc7
--- /dev/null
+++ b/ansible/roles/nfs_mounts/tasks/fstab.yml
@@ -0,0 +1,18 @@
+---
+- name: Create base directories
+  ansible.builtin.file:
+    path: "{{ item.path }}"
+    state: directory
+    mode: "{{ item.mode }}"
+  loop:
+    "{{ mount_points }}"
+
+- name: Mount the directories
+  ansible.posix.mount:
+    src: "{{ item.src }}"
+    path: "{{ item.path }}"
+    opts: "{{ item.opts }}"
+    state: mounted
+    fstype: nfs
+  loop:
+    "{{ mount_points }}"
diff --git a/ansible/roles/nfs_mounts/tasks/main.yml b/ansible/roles/nfs_mounts/tasks/main.yml
index 39ba56e6d4f2ab399f6c80ff157cf6a51b4e40ff..96a76ff49f8d6f3fb2cd6f15bddf9fb484934f3c 100644
--- a/ansible/roles/nfs_mounts/tasks/main.yml
+++ b/ansible/roles/nfs_mounts/tasks/main.yml
@@ -1,66 +1,8 @@
 ---
-- name: Create base directories
-  ansible.builtin.file:
-    path: "{{ item.dir }}"
-    state: directory
-    mode: "{{ item.mode }}"
-  loop:
-    - { dir: /local, mode: '0777' }
-    - { dir: /scratch, mode: '0755' }
-    - { dir: /share, mode: '0755' }
-    - { dir: /data/rc/apps, mode: '0755' } # this is only required for the symlink to be happy
-    - { dir: /data/user, mode: '0755' }
-    - { dir: /data/project, mode: '0755' }
+- name: nfs_mounts using fstab
+  include_tasks: fstab.yml
+  when: use_fstab
 
-- name: Remove unused entry in master map
-  ansible.builtin.replace:
-    dest: /etc/auto.master
-    regexp: '{{ item.regexp }}'
-    replace: '{{ item.replace }}'
-    backup: true
-  loop:
-    - { regexp: '^(/misc)', replace: '#\1' }
-    - { regexp: '^(/net)', replace: '#\1' }
-    - { regexp: '^(\+auto.master)', replace: '#\1' }
-
-- name: Add master map file
-  ansible.builtin.lineinfile:
-    path: "/etc/auto.master.d/gpfs.autofs"
-    line: "{{ item.mount_point }} /etc/auto.{{ item.map_name }}"
-    create: yes
-  loop:
-    - { mount_point: "/cm/shared", map_name: "cm-share" }
-    - { mount_point: "/data/project", map_name: "data-project" }
-    - { mount_point: "/data/user", map_name: "data-user" }
-    - { mount_point: "/data/rc/apps", map_name: "data-rc-apps" }
-    - { mount_point: "/-", map_name: "scratch" }
-    - { mount_point: "/home", map_name: "home" }
-
-- name: Set up autofs map files
-  ansible.builtin.lineinfile:
-    path: "/etc/auto.{{ item.map_name }}"
-    line: "{{ item.key }} -{{ item.opts }} {{ item.src }}"
-    create: true
-  loop:
-    - { map_name: "cm-share", key: "*", src: "gpfs.rc.uab.edu:/data/cm/shared-8.2/&", opts: "fstype=nfs,vers=3,_netdev,defaults" }
-    - { map_name: "data-project", key: "*", src: "gpfs.rc.uab.edu:/data/project/&", opts: "fstype=nfs,vers=3,_netdev,defaults" }
-    - { map_name: "data-user", key: "*", src: "gpfs.rc.uab.edu:/data/user/&", opts: "fstype=nfs,vers=3,_netdev,local_lock=posix,defaults" }
-    - { map_name: "data-rc-apps", key: "*", src: "gpfs.rc.uab.edu:/data/rc/apps/&", opts: "fstype=nfs,vers=3,_netdev,defaults" }
-    - { map_name: "scratch", key: "/scratch", src: "gpfs.rc.uab.edu:/scratch", opts: "fstype=nfs,vers=3,_netdev,local_lock=posix,defaults" }
-    - { map_name: "home", key: "*", src: ":/data/user/home/&", opts: 'fstype=bind' }
-
-- name: Create symbolic links
-  ansible.builtin.file:
-    src: "{{ item.src }}"
-    dest: "{{ item.dest }}"
-    owner: root
-    group: root
-    force: yes
-    state: link
-  loop:
-    - { src: /data/rc/apps, dest: /share/apps }
-
-- name: Enable autofs service
-  ansible.builtin.service:
-    name: autofs
-    enabled: true
+- name: nfs_mounts using autofs
+  include_tasks: autofs.yml
+  when: use_autofs
diff --git a/ansible/roles/rewrite_map/tasks/main.yaml b/ansible/roles/rewrite_map/tasks/main.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8b08eb62de1e987816f386bb50bdd2d8aec221e5
--- /dev/null
+++ b/ansible/roles/rewrite_map/tasks/main.yaml
@@ -0,0 +1,28 @@
+---
+- name: Add apache rewritemap script config
+  ansible.builtin.template:
+    src: rewrite_map_config_py.j2
+    mode: '600'
+    owner: root
+    group: root
+    dest: /var/www/rewrite_map_config.py
+
+- name: Replace OOD rewrite condition regex in Apache configuration
+  ansible.builtin.replace:
+    path: /etc/httpd/conf.d/front-end.conf
+    regexp: "RewriteCond %{HTTP:REMOTE_USER} '\\^\\(\\.\\+\\)\\$'"
+    replace: |
+      RewriteCond %{HTTP:REMOTE_USER} '([a-zA-Z0-9_.+-]+)@uab.edu$' [OR]
+          RewriteCond %{HTTP:REMOTE_USER} 'urn:mace:incommon:uab.edu!https://uabgrid.uab.edu/shibboleth!(.+)$'
+
+- name: Replace account app port in Apache configuration
+  ansible.builtin.replace:
+    path: /etc/httpd/conf.d/front-end.conf
+    regexp: "account-app:8000"
+    replace: "account-app:{{ account_app_port }}"
+
+- name: Restart httpd services
+  ansible.builtin.service:
+    name: httpd
+    enabled: true
+    state: restarted
diff --git a/ansible/roles/rewrite_map/templates/rewrite_map_config_py.j2 b/ansible/roles/rewrite_map/templates/rewrite_map_config_py.j2
new file mode 100644
index 0000000000000000000000000000000000000000..3d247e704ec6efdec3f239537514cc1362bfe0c0
--- /dev/null
+++ b/ansible/roles/rewrite_map/templates/rewrite_map_config_py.j2
@@ -0,0 +1,11 @@
+DEBUG = False
+target_groups = {
+    {% for group in target_groups %}
+    "{{ group.name }}": "{{ group.host }}",
+    {% endfor %}
+}
+{% for group in target_groups %}
+{% if group.default %}
+default_hostname = "{{ group.host }}"
+{% endif %}
+{% endfor %}
diff --git a/ansible/roles/rsyslog_config/tasks/main.yml b/ansible/roles/rsyslog_config/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..61c5029006d6987cae2e8fb44aa13545d9f1e13d
--- /dev/null
+++ b/ansible/roles/rsyslog_config/tasks/main.yml
@@ -0,0 +1,15 @@
+---
+- name: Add rsyslog configuration
+  ansible.builtin.template:
+    src: rsyslog.conf.j2
+    dest: /etc/rsyslog.conf
+    mode: 0644
+    owner: root
+    group: root
+    backup: true
+
+- name: Enable and start rsyslog
+  ansible.builtin.service:
+    name: rsyslog
+    enabled: true
+    state: restarted
diff --git a/ansible/roles/rsyslog_config/templates/rsyslog.conf.j2 b/ansible/roles/rsyslog_config/templates/rsyslog.conf.j2
new file mode 100644
index 0000000000000000000000000000000000000000..41ba61b4f598d3d923e478634b39e35d772d9850
--- /dev/null
+++ b/ansible/roles/rsyslog_config/templates/rsyslog.conf.j2
@@ -0,0 +1,226 @@
+# rsyslog configuration file
+
+# For more information see /usr/share/doc/rsyslog-*/rsyslog_conf.html
+# If you experience problems, see http://www.rsyslog.com/doc/troubleshoot.html
+
+# Added for distro update >= 4 (7u4)
+global (
+net.enabledns="off"
+)
+
+#### MODULES ####
+
+# The imjournal module bellow is now used as a message source instead of imuxsock.
+$ModLoad imuxsock # provides support for local system logging (e.g. via logger command)
+$ModLoad imjournal # provides access to the systemd journal
+#$ModLoad imklog # reads kernel messages (the same are read from journald)
+#$ModLoad immark  # provides --MARK-- message capability
+
+# Provides UDP syslog reception
+#$ModLoad imudp
+#$UDPServerRun 514
+
+# Provides TCP syslog reception
+#$ModLoad imtcp
+#$InputTCPServerRun 514
+
+
+#### GLOBAL DIRECTIVES ####
+
+# Where to place auxiliary files
+$WorkDirectory /var/lib/rsyslog
+
+# Use default timestamp format
+$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat
+
+# File syncing capability is disabled by default. This feature is usually not required,
+# not useful and an extreme performance hit
+#$ActionFileEnableSync on
+
+# Include all config files in /etc/rsyslog.d/
+$IncludeConfig /etc/rsyslog.d/*.conf
+
+# Turn off message reception via local log socket;
+# local messages are retrieved through imjournal now.
+$OmitLocalLogging on
+
+# File to store the position in the journal
+$IMJournalStateFile imjournal.state
+
+
+#### RULES ####
+
+# Log all kernel messages to the console.
+# Logging much else clutters up the screen.
+#kern.*                                                 /dev/console
+
+# Filter nslcd ldap ldap_abandon and ldap_result messages.
+if $programname == 'nslcd' and $syslogseverity >= '3' and $msg contains ' failed: Can\'t contact LDAP server' then stop
+if $programname == 'nslcd' and $syslogseverity >= '3' and $msg contains 'ldap_abandon() failed to abandon search: Other (e.g., implementation specific) error' then stop
+if $programname == 'nslcd' and $syslogseverity >= '3' and $msg contains 'ldap_abandon() failed to abandon search: Can\'t contact LDAP server: Transport endpoint is not connected' then stop
+if $programname == 'nslcd' and $syslogseverity >= '3' and $msg contains 'no available LDAP server found, sleeping ' then stop
+if $programname == 'nslcd' and $syslogseverity >= '3' and $msg contains 'connected to LDAP server ldap://local' then stop
+
+# Filter sntp started messages.
+if $programname == 'sntp' and $syslogseverity > '3' and $msg contains 'Started sntp' then stop
+
+# MariaDB Galera
+# disabled, as these messages are being generated every few seconds
+:msg, contains, "START: cm-check-galera-status" stop
+:msg, contains, "EXIT: cm-check-galera-status" stop
+
+# HAProxy for OpenStack
+if $syslogfacility-text == 'local4' and ($programname == 'haproxy') then {
+  local4.* /var/log/haproxy.log
+  stop
+}
+
+# OpenStack specific
+if $syslogfacility-text == 'daemon' then {
+
+  # needed for proper handling of Python stack traces
+  $EscapeControlCharactersOnReceive off
+
+  if $programname startswith 'keystone' then {
+    *.* /var/log/keystone/keystone.log
+  }
+
+  if $programname startswith 'nova' then {
+    *.* /var/log/nova/nova.log
+
+    if $programname == 'nova-api' then {
+      *.* /var/log/nova/nova-api.log
+    }
+    if $programname == 'nova-scheduler' then {
+      *.* /var/log/nova/nova-scheduler.log
+    }
+    if $programname == 'nova-conductor' then {
+      *.* /var/log/nova/nova-conductor.log
+    }
+    if $programname == 'nova-novncproxy' then {
+      *.* /var/log/nova/nova-novncproxy.log
+    }
+    if $programname == 'nova-compute' then {
+      *.* /var/log/nova/nova-compute.log
+    }
+  }
+
+  if $programname startswith 'neutron' then {
+    *.* /var/log/neutron/neutron.log
+
+    if $programname == 'neutron-server' then {
+      *.* /var/log/neutron/neutron-server.log
+    }
+    if $programname == 'neutron-metadata-agent' then {
+      *.* /var/log/neutron/neutron-metadata-agent.log
+    }
+    if $programname == 'neutron-l3-agent' then {
+      *.* /var/log/neutron/neutron-l3-agent.log
+    }
+    if $programname == 'neutron-dhcp-agent' then {
+      *.* /var/log/neutron/neutron-dhcp-agent.log
+    }
+    if $programname == 'neutron-openvswitch-agent' then {
+      *.* /var/log/neutron/neutron-openvswitch-agent.log
+    }
+
+  }
+
+  if $programname startswith 'glance' then {
+    *.* /var/log/glance/glance.log
+
+    if $programname == 'glance-api' then {
+      *.* /var/log/glance/glance-api.log
+    }
+    if $programname == 'glance-registry' then {
+      *.* /var/log/glance/glance-registry.log
+    }
+
+  }
+
+  if $programname startswith 'cinder' then {
+    *.* /var/log/cinder/cinder.log
+
+    if $programname == 'cinder-api' then {
+      *.* /var/log/cinder/cinder-api.log
+    }
+    if $programname == 'cinder-scheduler' then {
+      *.* /var/log/cinder/cinder-scheduler.log
+    }
+    if $programname == 'cinder-volume' then {
+      *.* /var/log/cinder/cinder-volume.log
+    }
+    if $programname == 'cinder-backup' then {
+      *.* /var/log/cinder/cinder-backup.log
+    }
+  }
+
+  if $programname startswith 'heat' then {
+    *.* /var/log/heat/heat.log
+
+    if $programname == 'heat-api' then {
+      *.* /var/log/heat/heat-api.log
+    }
+    if $programname == 'heat-engine' then {
+      *.* /var/log/heat/heat-engine.log
+    }
+  }
+
+  if $programname startswith 'keystone' or \
+     $programname startswith 'nova' or \
+     $programname startswith 'neutron' or \
+     $programname startswith 'glance' or \
+     $programname startswith 'cinder' or \
+     $programname startswith 'heat' then {
+
+     *.* /var/log/openstack
+     *.* @master:514
+     stop
+  }
+}
+
+# Log anything (except mail) of level info or higher.
+# Don't log private authentication messages!
+*.info;mail.none;authpriv.none;cron.none;local5.none;local6.none   /var/log/messages
+
+# The authpriv file has restricted access.
+authpriv.*                                              /var/log/secure
+
+# Log all the mail messages in one place.
+mail.*                                                  -/var/log/maillog
+
+
+# Log cron stuff
+cron.*                                                  /var/log/cron
+
+# Everybody gets emergency messages
+*.emerg                                                 :omusrmsg:*
+
+# Save news errors of level crit and higher in a special file.
+uucp,news.crit                                          /var/log/spooler
+
+# Save boot messages also to boot.log
+local7.*                                                /var/log/boot.log
+
+# cm related log files:
+local5.* -/var/log/node-installer
+local6.* -/var/log/cmdaemon
+
+# ### begin forwarding rule ###
+# The statement between the begin ... end define a SINGLE forwarding
+# rule. They belong together, do NOT split them. If you create multiple
+# forwarding rules, duplicate the whole block!
+# Remote Logging (we use TCP for reliable delivery)
+#
+# An on-disk queue is created for this action. If the remote host is
+# down, messages are spooled to disk and sent when it is up again.
+#$ActionQueueFileName fwdRule1 # unique name prefix for spool files
+#$ActionQueueMaxDiskSpace 1g   # 1gb space limit (use as much as possible)
+#$ActionQueueSaveOnShutdown on # save messages to disk on shutdown
+#$ActionQueueType LinkedList   # run asynchronously
+#$ActionResumeRetryCount -1    # infinite retries if host is down
+# remote host is: name/ip:port, e.g. 192.168.0.1:514, port optional
+#*.* @@remote-host:514
+#CM
+{{ rsyslog_target }}
+#### end of the forwarding rule ###
diff --git a/ansible/roles/slurm_client/tasks/main.yml b/ansible/roles/slurm_client/tasks/main.yml
index 9751720e6d62aa5fb8f6844e0e6bae5ec553858a..64612ed5a5b21aee18277e03a2be8d1b41429798 100644
--- a/ansible/roles/slurm_client/tasks/main.yml
+++ b/ansible/roles/slurm_client/tasks/main.yml
@@ -19,6 +19,7 @@
     state: present
     uid: 450
     group: slurm
+    create_home: false
 
 - name: Copy munge key
   ansible.builtin.copy:
@@ -28,6 +29,19 @@
     group: root
     mode: 0400
 
+- name: Create symbolic links for Slurm config files
+  ansible.builtin.file:
+    src: "{{ item.src }}"
+    dest: "{{ item.dest }}"
+    state: link
+    force: yes  # Force the creation of the symlinks even if source files do not exist yet
+  loop:
+    - { src: "/cm/shared/apps/slurm/var/etc/cgroup.conf", dest: "/etc/slurm/cgroup.conf" }
+    - { src: "/cm/shared/apps/slurm/var/etc/gres.conf", dest: "/etc/slurm/gres.conf" }
+    - { src: "/cm/shared/apps/slurm/var/etc/slurm.conf", dest: "/etc/slurm/slurm.conf" }
+    - { src: "/cm/shared/apps/slurm/var/etc/slurmdbd.conf", dest: "/etc/slurm/slurmdbd.conf" }
+    - { src: "/cm/shared/apps/slurm/var/etc/job_submit.lua", dest: "/etc/slurm/job_submit.lua" }
+
 - name: Enable services
   ansible.builtin.service:
     name: "{{ item }}"
diff --git a/ansible/roles/ssh_host_keys/tasks/main.yml b/ansible/roles/ssh_host_keys/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..59beb02b2d41a98c84683d901ce14c2155faf9f0
--- /dev/null
+++ b/ansible/roles/ssh_host_keys/tasks/main.yml
@@ -0,0 +1,46 @@
+---
+- name: Ensure destination directory exists only if not present
+  ansible.builtin.file:
+    path: /tmp/ssh_keys
+    state: directory
+    mode: '0755'
+
+- name: Install require package
+  ansible.builtin.pip:
+    name: boto3
+    extra_args: "--extra-index-url https://pypi.python.org/simple"
+    executable: "/usr/bin/pip3"
+
+- name: Download SSH host keys tar.gz from S3
+  aws_s3:
+    mode: get
+    s3_url: "{{ S3_ENDPOINT }}"
+    bucket: "{{ SSH_HOST_KEYS_S3_BUCKET }}"
+    object: "{{ SSH_HOST_KEYS_S3_OBJECT }}"
+    dest: "/tmp/ssh_keys/{{ SSH_HOST_KEYS_S3_OBJECT }}"
+    aws_access_key: "{{ LTS_ACCESS_KEY }}"
+    aws_secret_key: "{{ LTS_SECRET_KEY }}"
+  vars:
+    ansible_python_interpreter: /usr/bin/python3
+  when: SSH_HOST_KEYS_S3_BUCKET | length > 0 and SSH_HOST_KEYS_S3_OBJECT | length > 0
+
+- name: Unpack SSH host keys to /etc/ssh
+  ansible.builtin.unarchive:
+    src: "/tmp/ssh_keys/{{ SSH_HOST_KEYS_S3_OBJECT }}"
+    dest: "/etc/ssh"
+    group: root
+    owner: root
+    remote_src: yes
+  become: true
+  when: SSH_HOST_KEYS_S3_BUCKET | length > 0 and SSH_HOST_KEYS_S3_OBJECT | length > 0
+
+- name: Remove the temporary folder after put in place
+  ansible.builtin.file:
+    path: /tmp/ssh_keys
+    state: absent
+
+- name: Restart SSH service
+  ansible.builtin.service:
+    name: sshd
+    state: restarted
+  become: true
diff --git a/ansible/roles/ssh_proxy_config/tasks/main.yml b/ansible/roles/ssh_proxy_config/tasks/main.yml
new file mode 100644
index 0000000000000000000000000000000000000000..30bac2abbe90860eabba3b051a4c212fa4f8c6b5
--- /dev/null
+++ b/ansible/roles/ssh_proxy_config/tasks/main.yml
@@ -0,0 +1,12 @@
+---
+- name: Configure sshpiper yaml plugin
+  ansible.builtin.template:
+    src: sshpiperd.yaml.j2
+    dest: "{{ sshpiper_dest_dir }}/sshpiperd.yaml"
+    backup: true
+
+- name: Enable and start sshpiper service
+  ansible.builtin.service:
+    name: sshpiperd
+    enabled: true
+    state: restarted
diff --git a/ansible/roles/ssh_proxy_config/templates/sshpiperd.yaml.j2 b/ansible/roles/ssh_proxy_config/templates/sshpiperd.yaml.j2
new file mode 100644
index 0000000000000000000000000000000000000000..4597108b3d5c3416d8edb38159dda0f37432a1b9
--- /dev/null
+++ b/ansible/roles/ssh_proxy_config/templates/sshpiperd.yaml.j2
@@ -0,0 +1,28 @@
+# yaml-language-server: $schema=https://raw.githubusercontent.com/tg123/sshpiper/master/plugin/yaml/schema.json
+version: "1.0"
+pipes:
+{% for group in target_groups %}
+{% if not group.default %}
+- from:
+    - groupname: "{{ group.name }}"
+      authorized_keys: "{{ group.authorized_keys }}"
+  to:
+    host: "{{ group.host }}"
+    ignore_hostkey: true
+    private_key: "{{ group.private_key }}"
+- from:
+    - groupname: "{{ group.name }}"
+  to:
+    host: "{{ group.host }}"
+    ignore_hostkey: true
+{% else %}
+- from:
+    - username: ".*" # catch all
+      username_regex_match: true
+      authorized_keys: "{{ group.authorized_keys }}"
+  to:
+    host: "{{ group.host }}"
+    ignore_hostkey: true
+    private_key: "{{ group.private_key }}"
+{% endif %}
+{% endfor %}
diff --git a/ansible/roles/ssl_cert/tasks/main.yaml b/ansible/roles/ssl_cert/tasks/main.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..aa562aa448b7a62554bc8e9ae27d6eac2b916c04
--- /dev/null
+++ b/ansible/roles/ssl_cert/tasks/main.yaml
@@ -0,0 +1,65 @@
+---
+- name: Download SSL Certs from S3
+  aws_s3:
+    mode: get
+    s3_url: "{{ S3_ENDPOINT }}"
+    bucket: "{{ ssl_cert_s3_bucket }}"
+    object: "{{ item }}"
+    dest: "{{ ssl_cert_file_location }}/{{ item }}"
+    aws_access_key: "{{ LTS_ACCESS_KEY }}"
+    aws_secret_key: "{{ LTS_SECRET_KEY }}"
+  vars:
+    ansible_python_interpreter: /usr/bin/python3
+  when: ssl_cert_s3_bucket | length > 0 and item | length > 0
+  loop:
+    - "{{ ssl_cert_file }}"
+    - "{{ ssl_cert_chain_file }}"
+
+- name: Change cert files permissions
+  ansible.builtin.file:
+    path: "{{ ssl_cert_file_location }}/{{ item }}"
+    owner: root
+    group: root
+    mode: '0600'
+  when: ssl_cert_s3_bucket | length > 0 and item | length > 0
+  loop:
+    - "{{ ssl_cert_file }}"
+    - "{{ ssl_cert_chain_file }}"
+
+- name: Download SSL key from S3
+  aws_s3:
+    mode: get
+    s3_url: "{{ S3_ENDPOINT }}"
+    bucket: "{{ ssl_cert_s3_bucket }}"
+    object: "{{ ssl_cert_key }}"
+    dest: "{{ ssl_cert_key_location }}/{{ ssl_cert_key }}"
+    aws_access_key: "{{ LTS_ACCESS_KEY }}"
+    aws_secret_key: "{{ LTS_SECRET_KEY }}"
+  vars:
+    ansible_python_interpreter: /usr/bin/python3
+  when: ssl_cert_s3_bucket | length > 0 and ssl_cert_key | length > 0
+
+- name: Change key file permissions
+  ansible.builtin.file:
+    path: "{{ ssl_cert_key_location }}/{{ ssl_cert_key }}"
+    owner: root
+    group: root
+    mode: '0400'
+  when: ssl_cert_s3_bucket | length > 0 and ssl_cert_key | length > 0
+
+- name: Update SSL in Apache config
+  ansible.builtin.replace:
+    path: "{{ ssl_apache_config }}"
+    regexp: "{{ item.regexp }}"
+    replace: "\\1 {{ item.location }}/{{ item.value }}"
+    backup: true
+  when: ssl_apache_config | length > 0 and item.value | length > 0
+  loop:
+    - { regexp: "#?(SSLCertificateFile).*$", location: "{{ ssl_cert_file_location }}", value: "{{ ssl_cert_file }}" }
+    - { regexp: "#?(SSLCertificateChainFile).*$", location: "{{ ssl_cert_file_location }}", value: "{{ ssl_cert_chain_file }}" }
+    - { regexp: "#?(SSLCertificateKeyFile).*$", location: "{{ ssl_cert_key_location }}", value: "{{ ssl_cert_key }}" }
+
+- name: Restart apache service
+  ansible.builtin.service:
+    name: "{{ apache_service }}"
+    state: restarted
diff --git a/openstack-compute/README.md b/openstack-login/README.md
similarity index 100%
rename from openstack-compute/README.md
rename to openstack-login/README.md
diff --git a/openstack-compute/nodeimage.pkr.hcl b/openstack-login/nodeimage.pkr.hcl
similarity index 79%
rename from openstack-compute/nodeimage.pkr.hcl
rename to openstack-login/nodeimage.pkr.hcl
index 15941bab1900a056d3d67bdba2db2bd5d94a31fb..e770d3e8a817ada4c17455ee218f2f432827b9ad 100644
--- a/openstack-compute/nodeimage.pkr.hcl
+++ b/openstack-login/nodeimage.pkr.hcl
@@ -36,11 +36,19 @@ source "openstack" "image" {
 build {
   sources = ["source.openstack.image"]
 
+  provisioner "shell" {
+    inline = [
+      "sudo yum install -y libselinux-python3 python3 python3-pip tmux vim git bash-completion curl wget unzip",
+      "sudo python3 -m pip install --upgrade pip",
+      "sudo pip3 install s3cmd==2.3.0 ansible==4.10.0 python-openstackclient==5.8.0"
+    ]
+  }
+
   provisioner "ansible" {
     use_proxy     = false
     user          = var.ssh_username
     groups        = ["compute"]
-    playbook_file = "./ansible/compute.yml"
+    playbook_file = "./ansible/login.yml"
     roles_path    = "./ansible/roles"
     extra_arguments = [
       "--extra-vars", "root_ssh_key='${var.root_ssh_key}'"
@@ -53,5 +61,8 @@ build {
     groups           = ["compute"]
     ansible_env_vars = ["ANSIBLE_HOST_KEY_CHECKING=False"]
     playbook_file    = "./CRI_XCBC/compute-packer.yaml"
+    extra_arguments  = [
+      "--extra-vars", "${var.extra_vars}"
+    ]
   }
 }
diff --git a/openstack-compute/variables.pkr.hcl b/openstack-login/variables.pkr.hcl
similarity index 93%
rename from openstack-compute/variables.pkr.hcl
rename to openstack-login/variables.pkr.hcl
index 20efd641406f81250b3cb1cc6514f5078ee2a503..6cef95c13157b85446affdea4deff552d6c1d71a 100644
--- a/openstack-compute/variables.pkr.hcl
+++ b/openstack-login/variables.pkr.hcl
@@ -1,6 +1,7 @@
 variable "root_ssh_key" {
   type        = string
   description = "The root key to use for ssh"
+  default     = ""
 }
 
 variable "image_name" {
@@ -87,4 +88,10 @@ variable "volume_size" {
   type        = number
   default     = 20
   description = "The default volume size for building iamge"
-}
\ No newline at end of file
+}
+
+variable "extra_vars" {
+  type        = string
+  default     = ""
+  description = "Extra vars to pass to ansible playbook command"
+}
diff --git a/openstack-ood/nodeimage.pkr.hcl b/openstack-ood/nodeimage.pkr.hcl
index b31d7a1724bada0b8dbe1002b026d11571c294de..1a1374419cd8ed985a4cde57378154a5cf07ff3f 100644
--- a/openstack-ood/nodeimage.pkr.hcl
+++ b/openstack-ood/nodeimage.pkr.hcl
@@ -53,5 +53,17 @@ build {
     groups           = ["ood", "knightly"]
     ansible_env_vars = ["ANSIBLE_HOST_KEY_CHECKING=False"]
     playbook_file    = "./CRI_XCBC/ood-packer.yaml"
+    extra_arguments  = [
+      "--extra-vars", "${var.extra_vars}"
+    ]
   }
+
+  provisioner "shell" {
+    inline = [
+      "sudo yum install -y libselinux-python3 python3 python3-pip tmux vim git bash-completion curl wget unzip",
+      "sudo python3 -m pip install --upgrade pip",
+      "sudo pip3 install s3cmd==2.3.0 ansible==4.10.0 python-openstackclient==5.8.0"
+    ]
+  }
+
 }
diff --git a/openstack-ood/variables.pkr.hcl b/openstack-ood/variables.pkr.hcl
index 5a6f608cc01a4134e3d276f4a0960e4d2c530f4c..3e20d6dc9e6b35ccac6b7bdb03f5ec28e8d24cce 100644
--- a/openstack-ood/variables.pkr.hcl
+++ b/openstack-ood/variables.pkr.hcl
@@ -1,5 +1,6 @@
 variable "root_ssh_key" {
   type        = string
+  default     = ""
   description = "The root key to use for ssh"
 }
 
@@ -87,4 +88,10 @@ variable "volume_size" {
   type        = number
   default     = 20
   description = "The default volume size for building iamge"
-}
\ No newline at end of file
+}
+
+variable "extra_vars" {
+  type        = string
+  default     = ""
+  description = "Extra vars to pass to ansible playbook command"
+}
diff --git a/openstack-proxy/README.md b/openstack-proxy/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..d9287a3ae8ba8e4456edcf1ef7607a9366c5f8c7
--- /dev/null
+++ b/openstack-proxy/README.md
@@ -0,0 +1 @@
+This contains packer hcl files for creating images. For documentation on packer, see [here](https://www.packer.io/docs); for information about the openstack-specific builder, see [here](https://www.packer.io/plugins/builders/openstack)
diff --git a/openstack-proxy/nodeimage.pkr.hcl b/openstack-proxy/nodeimage.pkr.hcl
new file mode 100644
index 0000000000000000000000000000000000000000..b9480f29cc550654ff3017773c5e2acb60b1584a
--- /dev/null
+++ b/openstack-proxy/nodeimage.pkr.hcl
@@ -0,0 +1,65 @@
+packer {
+  required_plugins {
+    openstack = {
+      version = "~> 1"
+      source  = "github.com/hashicorp/openstack"
+    }
+    ansible = {
+      version = "~> 1"
+      source  = "github.com/hashicorp/ansible"
+    }
+  }
+}
+
+locals {
+  local_image_name = "${var.image_name}${var.image_date_suffix ? formatdate("-YYYYMMDDHHmm", timestamp()) : ""}"
+}
+
+source "openstack" "image" {
+  skip_create_image         = var.skip_create_image
+  image_name                = local.local_image_name
+  source_image              = var.source_image
+  image_members             = var.image_membership
+  image_auto_accept_members = var.auto_accept_members
+  image_tags                = var.image_tags
+  image_disk_format         = var.image_format
+  volume_size               = var.volume_size
+  flavor                    = var.flavor
+  instance_name             = var.build_instance_name
+  use_blockstorage_volume   = true
+  floating_ip_network       = var.floating_ip_network
+  networks                  = var.networks
+  security_groups           = var.security_groups
+  ssh_username              = var.ssh_username
+}
+
+build {
+  sources = ["source.openstack.image"]
+
+  provisioner "shell" {
+    inline = [
+      "sudo yum install -y epel-release",
+      "sudo dnf config-manager --set-enabled crb",
+      "sudo yum install -y libselinux-python3 python3 python3-pip tmux vim git bash-completion curl wget unzip httpd",
+      "sudo python3 -m pip install --upgrade pip",
+      "sudo pip3 install s3cmd==2.3.0 ansible==4.10.0 python-openstackclient==5.8.0"
+    ]
+  }
+
+  provisioner "ansible" {
+    use_proxy        = false
+    user             = var.ssh_username
+    groups           = ["proxy"]
+    ansible_env_vars = [
+      "ANSIBLE_TIMEOUT=60",
+      "ANSIBLE_HOST_KEY_CHECKING=False",
+      "ANSIBLE_VERBOSITY=${var.ANSIBLE_VERBOSITY}",
+      "ANSIBLE_DEBUG=${var.ANSIBLE_DEBUG}",
+      "ANSIBLE_FORCE_COLOR=true"
+    ]
+    playbook_file    = "./CRI_XCBC/proxy.yaml"
+    extra_arguments  = [
+      "--extra-vars", "${var.extra_vars}"
+    ]
+  }
+}
diff --git a/openstack-proxy/variables.pkr.hcl b/openstack-proxy/variables.pkr.hcl
new file mode 100644
index 0000000000000000000000000000000000000000..6ab03ba638197c98ba7e93bb891596a74d9938c3
--- /dev/null
+++ b/openstack-proxy/variables.pkr.hcl
@@ -0,0 +1,113 @@
+variable "root_ssh_key" {
+  type        = string
+  default     = ""
+  description = "The root key to use for ssh"
+}
+
+variable "image_name" {
+  type        = string
+  default     = "cluster-image"
+  description = "Name of the image in openstack"
+}
+
+variable "image_format" {
+  type        = string
+  default     = "qcow2"
+  description = "The format of the resulting image"
+}
+
+variable "image_date_suffix" {
+  type        = bool
+  default     = false
+  description = "Append a date to the image name (in YYYYMMDDHHMMSS format)"
+}
+
+variable "image_tags" {
+  type        = list(string)
+  default     = []
+  description = "List of tags to be associated to the resulting image"
+}
+
+variable "image_membership" {
+  type        = list(string)
+  default     = []
+  description = "Projects/tenants to share the image in openstack with"
+}
+
+variable "auto_accept_members" {
+  type        = bool
+  default     = false
+  description = "A boolean value for auto accepting image in the projects/tenants defined in image_membership."
+}
+
+variable "skip_create_image" {
+  type        = bool
+  default     = false
+  description = "A boolean value for skipping image creation at the end of the build"
+}
+
+variable "source_image" {
+  type        = string
+  default     = ""
+  description = "The name of the source image to use"
+}
+
+variable "flavor" {
+  type        = string
+  default     = ""
+  description = "The name of the flavor to use"
+}
+
+variable "floating_ip_network" {
+  type        = string
+  default     = "uab-campus"
+  description = "floating ip network to use with (temporary) ip assignmnet to a vm"
+}
+
+variable "networks" {
+  type        = list(string)
+  default     = []
+  description = "List of network UUIDs to assign to the network"
+}
+
+variable "security_groups" {
+  type        = list(string)
+  default     = []
+  description = "A list of security groups to add - you should make sure ssh access is open to the machine"
+}
+
+variable "build_instance_name" {
+  type        = string
+  default     = "ood"
+  description = "A name of build instance used for image build"
+}
+
+variable "ssh_username" {
+  type        = string
+  default     = "centos"
+  description = "The default username to use for SSH"
+}
+
+variable "volume_size" {
+  type        = number
+  default     = 20
+  description = "The default volume size for building iamge"
+}
+
+variable "ANSIBLE_DEBUG" {
+  type        = string
+  default     = "false"
+  description = "to turn on debugging"
+}
+
+variable "ANSIBLE_VERBOSITY" {
+  type        = string
+  default     = "0"
+  description = "to increase verbosity - 0|1|2|3|4"
+}
+
+variable "extra_vars" {
+  type        = string
+  default     = ""
+  description = "Extra vars to pass to ansible playbook command"
+}