Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • dwheel7/hpc-factory
  • rc/hpc-factory
  • louistw/hpc-factory
  • jpr/hpc-factory
  • krish94/hpc-factory
  • atlurie/hpc-factory
6 results
Show changes
Commits on Source (173)
Showing
with 838 additions and 115 deletions
......@@ -43,29 +43,31 @@ workflow:
.update_ansible_repo: &update_ansible_repo
- *get_build_date
- |
if [ ! -d $CI_PROJECT_DIR/CRI_XCBC ]; then
git clone https://github.com/uabrc/CRI_XCBC.git
cd CRI_XCBC
git remote add upstream https://github.com/jprorama/CRI_XCBC.git
export EXT_REPO_DIR=$(basename -s .git $EXT_PR_TARGET_REPO)
if [ ! -d $CI_PROJECT_DIR/$EXT_REPO_DIR ]; then
git clone ${EXT_PR_TARGET_REPO} ${EXT_REPO_DIR}
cd ${EXT_REPO_DIR}
git remote add upstream ${EXT_PR_SRC_REPO}
cd ..
fi
- cd CRI_XCBC
- cd ${EXT_REPO_DIR}
- git config user.name "${GIT_AUTHOR_NAME}"
- git config user.email "${GIT_AUTHOR_EMAIL}"
- git fetch origin uab-prod
- git fetch upstream dev
- git checkout uab-prod
- git merge origin/uab-prod
- git checkout ${EXT_PR_TARGET_BRANCH}
- git fetch origin ${EXT_PR_TARGET_BRANCH}
- git merge origin/${EXT_PR_TARGET_BRANCH}
- git checkout -b integration
- git merge upstream/dev
- export CRI_XCBC_HEAD=$(git rev-parse --short HEAD)
- export CRI_XCBC_dev=$(git rev-parse --short upstream/dev)
- export CRI_XCBC_prod=$(git rev-parse --short origin/uab-prod)
- git fetch upstream ${EXT_PR_SRC_BRANCH}
- git merge upstream/${EXT_PR_SRC_BRANCH}
# export vars into job artifacts
- export EXT_REPO_HEAD=$(git rev-parse --short HEAD)
- export EXT_PR_SRC_BRANCH_SHA=$(git rev-parse --short upstream/${EXT_PR_SRC_BRANCH})
- export EXT_PR_TARGET_BRANCH_SHA=$(git rev-parse --short origin/${EXT_PR_TARGET_BRANCH})
- cd ..
- export PACKER_IMAGE_HEAD=$(git rev-parse --short HEAD)
- echo CRI_XCBC_HEAD=${CRI_XCBC_HEAD} | tee -a $CI_PROJECT_DIR/image.env
- echo CRI_XCBC_dev=${CRI_XCBC_dev} | tee -a $CI_PROJECT_DIR/image.env
- echo CRI_XCBC_prod=${CRI_XCBC_prod} | tee -a $CI_PROJECT_DIR/image.env
- echo EXT_REPO_HEAD=${EXT_REPO_HEAD} | tee -a $CI_PROJECT_DIR/image.env
- echo EXT_PR_SRC_BRANCH_SHA=${EXT_PR_SRC_BRANCH_SHA} | tee -a $CI_PROJECT_DIR/image.env
- echo EXT_PR_TARGET_BRANCH_SHA=${EXT_PR_TARGET_BRANCH_SHA} | tee -a $CI_PROJECT_DIR/image.env
- echo PACKER_IMAGE_HEAD=${PACKER_IMAGE_HEAD} | tee -a $CI_PROJECT_DIR/image.env
.get_ansible_files: &get_ansible_files
......@@ -77,32 +79,33 @@ workflow:
- *get_ansible_files
# packer vars for job env
- export PKR_VAR_flavor="${PROXY_BUILD_FLAVOR:-$PKR_VAR_flavor}"
- export PKR_VAR_build_instance_name="${BUILD_TARGET}-${CRI_XCBC_HEAD}"
- export PKR_VAR_build_instance_name="${BUILD_TARGET}-${EXT_REPO_HEAD}"
- export PKR_VAR_image_date_suffix=false
- |
if [ $CI_PIPELINE_SOURCE == 'merge_request_event' ]; then
export PKR_VAR_image_name="${BUILD_TARGET}-PR-${CI_MERGE_REQUEST_IID}"
elif [ $CI_PIPELINE_SOURCE == 'schedule' ]; then
export PKR_VAR_image_name="${BUILD_TARGET}-${BUILD_DATE}"
export PKR_VAR_image_name="${BUILD_TARGET}-${BUILD_TAG:-${BUILD_DATE}}"
fi
# Ansible var overrides
- |
if [ -n "${PROXY_ENABLE_VAR}" ]; then
sed -i -E "s/(${PROXY_ENABLE_VAR}: ).*/\1true/" CRI_XCBC/group_vars/all
sed -i -E "s/(${PROXY_ENABLE_VAR}: ).*/\1true/" $EXT_REPO_DIR/group_vars/all
fi
- 'sed -i -E "s|(s3_endpoint: ).*|\1\"${S3_ENDPOINT}\"|" CRI_XCBC/group_vars/all'
- 'sed -i -E "s/(lts_access_key: ).*/\1\"${AWS_ACCESS_KEY_ID}\"/" CRI_XCBC/group_vars/all'
- 'sed -i -E "s/(lts_secret_key: ).*/\1\"${AWS_SECRET_ACCESS_KEY}\"/" CRI_XCBC/group_vars/all'
- 'sed -i -E "s/(s3_shibboleth_bucket_name: ).*/\1\"${S3_SHIBBOLETH_BUCKET_NAME}\"/" CRI_XCBC/group_vars/all'
- 'sed -i -E "s/(s3_shibboleth_object_name: ).*/\1\"${S3_SHIBBOLETH_OBJECT_NAME}\"/" CRI_XCBC/group_vars/all'
- 'sed -i -E "s|(ssh_pub_key: ).*|\1\"{{ lookup(''file'', ''${SSH_PUB_KEY}'') }}\"|" CRI_XCBC/group_vars/all'
- 'sed -i -E "s|(s3_endpoint: ).*|\1\"${S3_ENDPOINT}\"|" $EXT_REPO_DIR/group_vars/all'
- 'sed -i -E "s/(lts_access_key: ).*/\1\"${AWS_ACCESS_KEY_ID}\"/" $EXT_REPO_DIR/group_vars/all'
- 'sed -i -E "s/(lts_secret_key: ).*/\1\"${AWS_SECRET_ACCESS_KEY}\"/" $EXT_REPO_DIR/group_vars/all'
- 'sed -i -E "s/(s3_shibboleth_bucket_name: ).*/\1\"${S3_SHIBBOLETH_BUCKET_NAME}\"/" $EXT_REPO_DIR/group_vars/all'
- 'sed -i -E "s/(s3_shibboleth_object_name: ).*/\1\"${S3_SHIBBOLETH_OBJECT_NAME}\"/" $EXT_REPO_DIR/group_vars/all'
- 'sed -i -E "s|(ssh_pub_key: ).*|\1\"{{ lookup(''file'', ''${SSH_PUB_KEY}'') }}\"|" $EXT_REPO_DIR/group_vars/all'
# packer commands
- packer init openstack-proxy
- packer validate openstack-proxy
- packer build -machine-readable openstack-proxy | tee proxy_build.log
- export BUILT_PROXY_IMAGE_ID=$(grep 'Image:' proxy_build.log | awk '{print $4}')
- echo BUILT_PROXY_IMAGE_ID=${BUILT_PROXY_IMAGE_ID} | tee -a $CI_PROJECT_DIR/image.env
- openstack image set --property CRI_XCBC_prod=${CRI_XCBC_prod} --property CRI_XCBC_dev=${CRI_XCBC_dev} --property PACKER_IMAGE_HEAD=${PACKER_IMAGE_HEAD} ${BUILT_PROXY_IMAGE_ID}
# set image properties with repo state
- openstack image set --property EXT_PR_SRC_REPO=${EXT_PR_SRC_REPO} --property EXT_PR_SRC_BRANCH_SHA=${EXT_PR_SRC_BRANCH_SHA} --property EXT_PR_TARGET_REPO=${EXT_PR_TARGET_REPO} --property EXT_PR_TARGET_BRANCH_SHA=${EXT_PR_TARGET_BRANCH_SHA} --property PACKER_IMAGE_HEAD=${PACKER_IMAGE_HEAD} ${BUILT_PROXY_IMAGE_ID}
artifacts:
reports:
dotenv: image.env
......@@ -110,11 +113,11 @@ workflow:
build_http_proxy_image:
stage: build
environment:
name: $ENV
name: build
tags:
- build
variables:
PROXY_ENABLE_VAR: "enable_ood_proxy"
PROXY_ENABLE_VAR: "enable_http_proxy"
<<: *build_proxy_image_template
rules:
- if: $PIPELINE_TARGET == "build" && $BUILD_TARGET == "http-proxy"
......@@ -123,16 +126,93 @@ build_http_proxy_image:
build_ssh_proxy_image:
stage: build
environment:
name: $ENV
name: build
tags:
- build
variables:
PROXY_ENABLE_VAR: "enable_sshpiper"
PROXY_ENABLE_VAR: "enable_ssh_proxy"
<<: *build_proxy_image_template
rules:
- if: $PIPELINE_TARGET == "build" && $BUILD_TARGET == "ssh-proxy"
when: always
.build_login_image_template: &build_login_image_template
script:
- *update_ansible_repo
- *get_ansible_files
# packer vars for job env
- export PKR_VAR_flavor="${PROXY_BUILD_FLAVOR:-$PKR_VAR_flavor}"
- export PKR_VAR_build_instance_name="${BUILD_TARGET}-${EXT_REPO_HEAD}"
- export PKR_VAR_image_date_suffix=false
- |
if [ $CI_PIPELINE_SOURCE == 'merge_request_event' ]; then
export PKR_VAR_image_name="${BUILD_TARGET}-PR-${CI_MERGE_REQUEST_IID}"
elif [ $CI_PIPELINE_SOURCE == 'schedule' ]; then
export PKR_VAR_image_name="${BUILD_TARGET}-${BUILD_TAG:-${BUILD_DATE}}"
fi
# packer commands
- packer init openstack-login
- packer validate openstack-login
- packer build -machine-readable openstack-login | tee login_build.log
- export BUILT_LOGIN_IMAGE_ID=$(grep 'Image:' login_build.log | awk '{print $4}')
- echo BUILT_LOGIN_IMAGE_ID=${BUILT_LOGIN_IMAGE_ID} | tee -a $CI_PROJECT_DIR/image.env
# set image properties with repo state
- openstack image set --property EXT_PR_SRC_REPO=${EXT_PR_SRC_REPO} --property EXT_PR_SRC_BRANCH_SHA=${EXT_PR_SRC_BRANCH_SHA} --property EXT_PR_TARGET_REPO=${EXT_PR_TARGET_REPO} --property EXT_PR_TARGET_BRANCH_SHA=${EXT_PR_TARGET_BRANCH_SHA} --property PACKER_IMAGE_HEAD=${CI_COMMIT_SHORT_SHA} ${BUILT_LOGIN_IMAGE_ID}
artifacts:
reports:
dotenv: image.env
build_login_image:
stage: build
environment:
name: build
tags:
- build
<<: *build_login_image_template
rules:
- if: $PIPELINE_TARGET == "build" && $BUILD_TARGET == "login"
when: always
build_ood_image:
stage: build
environment:
name: build
tags:
- build
script:
- *update_ansible_repo
- *get_ansible_files
# packer vars for job env
- export PKR_VAR_flavor="${OOD_BUILD_FLAVOR:-$PKR_VAR_flavor}"
- export PKR_VAR_build_instance_name="${BUILD_TARGET}-${EXT_REPO_HEAD}"
- export PKR_VAR_image_date_suffix=false
- export PKR_VAR_image_name="${BUILD_TARGET}-${BUILD_TAG:-${BUILD_DATE}}"
- |
if [ $ENV = 'knightly' ] || [ $ENV = 'prod' ]; then
curl --header "PRIVATE-TOKEN: ${ANSIBLE_VAR_TOKEN}" \
"${CI_API_V4_URL}/projects/2836/repository/files/$ENV/raw?ref=main" \
-o CRI_XCBC/group_vars/$ENV
sed -i -E "s/(lts_access_key: ).*/\1\"${AWS_ACCESS_KEY_ID}\"/" CRI_XCBC/group_vars/$ENV
sed -i -E "s/(lts_secret_key: ).*/\1\"${AWS_SECRET_ACCESS_KEY}\"/" CRI_XCBC/group_vars/$ENV
sed -i -E "s/(user_register_app_key: ).*/\1\"${SELF_REG_APP_KEY}\"/" CRI_XCBC/group_vars/$ENV
sed -i -E "s/(celery_user_password: ).*/\1\"${CELERY_PASSWD}\"/" CRI_XCBC/group_vars/$ENV
sed -i -E "s|(ssh_pub_key: ).*|\1\"{{ lookup('file', '${SSH_PUB_KEY}') }}\"|" CRI_XCBC/group_vars/$ENV
fi
# packer commands
- packer init openstack-ood
- packer validate openstack-ood
- packer build -machine-readable openstack-ood | tee ood_build.log
- export BUILT_OOD_IMAGE_ID=$(grep 'Image:' ood_build.log | awk '{print $4}')
- echo BUILT_OOD_IMAGE_ID=${BUILT_OOD_IMAGE_ID} | tee -a $CI_PROJECT_DIR/image.env
# set image properties with repo state
- openstack image set --property EXT_PR_SRC_REPO=${EXT_PR_SRC_REPO} --property EXT_PR_SRC_BRANCH_SHA=${EXT_PR_SRC_BRANCH_SHA} --property EXT_PR_TARGET_REPO=${EXT_PR_TARGET_REPO} --property EXT_PR_TARGET_BRANCH_SHA=${EXT_PR_TARGET_BRANCH_SHA} --property PACKER_IMAGE_HEAD=${CI_COMMIT_SHORT_SHA} ${BUILT_OOD_IMAGE_ID}
artifacts:
reports:
dotenv: image.env
rules:
- if: $PIPELINE_TARGET == "build" && $BUILD_TARGET == "ood"
when: always
deploy_http_proxy_node:
stage: deploy
environment:
......@@ -145,38 +225,43 @@ deploy_http_proxy_node:
- |
cat > user_data.txt <<EOF
#!/bin/bash
cat >> /etc/NetworkManager/conf.d/90-dns-none.conf<<EEOF
[main]
dns=none
EEOF
systemctl reload NetworkManager
echo "$DEV_KEY" >> /root/.ssh/authorized_keys
ip route replace default via ${DEFAULT_GATEWAY_IP} dev eth0
git clone ${CI_REPOSITORY_URL} /tmp/${CI_PROJECT_NAME}
cd /tmp/${CI_PROJECT_NAME}
git checkout ${CI_COMMIT_REF_NAME}
ansible-playbook -c local -i 127.0.0.1, --extra-vars="$EXTRA_VARS" ansible/cluster.yml | tee -a /tmp/ansible.log
cat >> ansible/hosts<<EEOF
[$ENV]
127.0.0.1
EEOF
ansible-playbook -c local -i ansible/hosts --extra-vars="$EXTRA_VARS" ansible/cluster.yml | tee -a /tmp/ansible.log
rm -rf /tmp/${CI_PROJECT_NAME}
EOF
- >
export HTTP_PROXY_INSTANCE_PORT=$(openstack port create
-c id -f value --network $INTERNALNET
--disable-port-security
${HTTP_PROXY_INSTANCE_NAME}_internal_port)
- >
export HTTP_PROXY_INSTANCE_ID=$(openstack server create
-c id -f value --image $HTTP_PROXY_IMAGE_ID
--network $PROXY_NETWORK
--port $HTTP_PROXY_INSTANCE_PORT
--security-group webserver_sec_group
--security-group allow-ssh
--user-data user_data.txt
--flavor $INSTANCE_FLAVOR
--wait
$HTTP_PROXY_INSTANCE_NAME)
- |
# Create and assign a floating IP to the HTTP Proxy instance
HTTP_PROXY_FLOATING_IP=$(openstack floating ip create $PKR_VAR_floating_ip_network -f value -c floating_ip_address)
echo "Created FLOATING_IP: $HTTP_PROXY_FLOATING_IP"
- |
# Associate the floating IP with the HTTP Proxy instance
openstack server add floating ip $HTTP_PROXY_INSTANCE_ID $HTTP_PROXY_FLOATING_IP
echo "Associated FLOATING_IP $HTTP_PROXY_FLOATING_IP with HTTP_PROXY_INSTANCE_ID $HTTP_PROXY_INSTANCE_ID"
- |
export cmd="openstack server create"
cmd+=" -c id -f value --image $HTTP_PROXY_IMAGE_ID"
cmd+=" --flavor $INSTANCE_FLAVOR"
for security_group in ${SECURITY_GROUP_LIST[@]};
do
cmd+=" --security-group $security_group"
done
cmd+=" --user-data user_data.txt"
if [ -n "$PROXY_NETWORK" ];then cmd+=" --network $PROXY_NETWORK"; fi
if [ -n "$HTTP_PROXY_PORT" ];then cmd+=" --port $HTTP_PROXY_PORT"; fi
cmd+=" --wait $HTTP_PROXY_INSTANCE_NAME"
- export HTTP_PROXY_INSTANCE_ID=$(bash -c "$cmd")
- |
# Associate the floating IP(s) with the HTTP Proxy instance
for HTTP_PROXY_FLOATING_IP in ${HTTP_PROXY_FLOATING_IP_LIST[@]};
do
echo "Associating FLOATING_IP $HTTP_PROXY_FLOATING_IP with HTTP_PROXY_INSTANCE_ID $HTTP_PROXY_INSTANCE_ID"
openstack server add floating ip $HTTP_PROXY_INSTANCE_ID $HTTP_PROXY_FLOATING_IP
done
rules:
- if: $PIPELINE_TARGET == "deploy" && $HTTP_PROXY_IMAGE_ID
when: always
......@@ -193,37 +278,151 @@ deploy_ssh_proxy_node:
- |
cat > user_data.txt <<EOF
#!/bin/bash
cat >> /etc/NetworkManager/conf.d/90-dns-none.conf<<EEOF
[main]
dns=none
EEOF
systemctl reload NetworkManager
echo "$DEV_KEY" >> /root/.ssh/authorized_keys
ip route replace default via ${DEFAULT_GATEWAY_IP} dev eth0
git clone ${CI_REPOSITORY_URL} /tmp/${CI_PROJECT_NAME}
cd /tmp/${CI_PROJECT_NAME}
git checkout ${CI_COMMIT_REF_NAME}
ansible-playbook -c local -i 127.0.0.1, --extra-vars="$EXTRA_VARS" ansible/cheaha.yml | tee -a /tmp/ansible.log
cat >> ansible/hosts<<EEOF
[$ENV]
127.0.0.1
EEOF
ansible-playbook -c local -i ansible/hosts --extra-vars="$EXTRA_VARS" ansible/cluster.yml | tee -a /tmp/ansible.log
rm -rf /tmp/${CI_PROJECT_NAME}
EOF
- >
export SSH_PROXY_INSTANCE_PORT=$(openstack port create
-c id -f value --network $INTERNALNET
--disable-port-security
${SSH_PROXY_INSTANCE_NAME}_internal_port)
- >
export SSH_PROXY_INSTANCE_ID=$(openstack server create
-c id -f value --image $SSH_PROXY_IMAGE_ID
--network $PROXY_NETWORK
--port $SSH_PROXY_INSTANCE_PORT
--security-group allow-ssh
--user-data user_data.txt
--flavor $INSTANCE_FLAVOR
--wait
$SSH_PROXY_INSTANCE_NAME)
- |
# Create and assign a floating IP to the SSH Proxy instance
SSH_PROXY_FLOATING_IP=$(openstack floating ip create $PKR_VAR_floating_ip_network -f value -c floating_ip_address)
echo "Created SSH_PROXY_FLOATING_IP: $SSH_PROXY_FLOATING_IP"
- |
# Associate the floating IP with the SSH Proxy instance
openstack server add floating ip $SSH_PROXY_INSTANCE_ID $SSH_PROXY_FLOATING_IP
echo "Associated FLOATING_IP $SSH_PROXY_FLOATING_IP with SSH_PROXY_INSTANCE_ID $SSH_PROXY_INSTANCE_ID"
- |
export cmd="openstack server create"
cmd+=" -c id -f value --image $SSH_PROXY_IMAGE_ID"
cmd+=" --flavor $INSTANCE_FLAVOR"
for security_group in ${SECURITY_GROUP_LIST[@]};
do
cmd+=" --security-group $security_group"
done
cmd+=" --user-data user_data.txt"
if [ -n "$PROXY_NETWORK" ];then cmd+=" --network $PROXY_NETWORK"; fi
if [ -n "$SSH_PROXY_PORT" ];then cmd+=" --port $SSH_PROXY_PORT"; fi
cmd+=" --wait $SSH_PROXY_INSTANCE_NAME"
- export SSH_PROXY_INSTANCE_ID=$(bash -c "$cmd")
- |
# Associate the floating IP(s) with the SSH Proxy instance
for SSH_PROXY_FLOATING_IP in ${SSH_PROXY_FLOATING_IP_LIST[@]};
do
echo "Associating FLOATING_IP $SSH_PROXY_FLOATING_IP with SSH_PROXY_INSTANCE_ID $SSH_PROXY_INSTANCE_ID"
openstack server add floating ip $SSH_PROXY_INSTANCE_ID $SSH_PROXY_FLOATING_IP
done
rules:
- if: $PIPELINE_TARGET == "deploy" && $SSH_PROXY_IMAGE_ID
when: always
deploy_login_node:
stage: deploy
environment:
name: $ENV
tags:
- build
script:
- openstack image set --accept $LOGIN_IMAGE_ID || true
- FAILED=false
- |
cat > user_data.txt <<EOF
#!/bin/bash
cat >> /etc/NetworkManager/conf.d/90-dns-none.conf<<EEOF
[main]
dns=none
EEOF
systemctl reload NetworkManager
echo "$DEV_KEY" >> /root/.ssh/authorized_keys
ip route replace default via ${DEFAULT_GATEWAY_IP} dev eth0
git clone ${CI_REPOSITORY_URL} /tmp/${CI_PROJECT_NAME}
cd /tmp/${CI_PROJECT_NAME}
git checkout ${CI_COMMIT_REF_NAME}
cat >> ansible/hosts<<EEOF
[$ENV]
127.0.0.1
EEOF
s3cmd get --force -r --access_key=$AWS_ACCESS_KEY_ID --secret_key=$AWS_SECRET_ACCESS_KEY --host=$AWS_HOST --host-bucket=$AWS_HOST s3://cheaha-cloud-ansible-files/ /tmp/${CI_PROJECT_NAME}/ansible/files/
ansible-playbook -c local -i ansible/hosts --extra-vars="$EXTRA_VARS" ansible/cluster.yml | tee -a /tmp/ansible.log
rm -rf /tmp/${CI_PROJECT_NAME}
EOF
- |
export cmd="openstack server create"
cmd+=" -c id -f value --image $LOGIN_IMAGE_ID"
cmd+=" --flavor $INSTANCE_FLAVOR"
for security_group in ${SECURITY_GROUP_LIST[@]};
do
cmd+=" --security-group $security_group"
done
cmd+=" --user-data user_data.txt"
if [ -n "$INSTANCE_NETWORK" ];then cmd+=" --network $INSTANCE_NETWORK"; fi
if [ -n "$LOGIN_PORT" ];then cmd+=" --port $LOGIN_PORT"; fi
cmd+=" --wait $LOGIN_INSTANCE_NAME"
- export LOGIN_INSTANCE_ID=$(bash -c "$cmd")
- |
# Associate the floating IP(s) with the SSH Proxy instance
for LOGIN_FLOATING_IP in ${LOGIN_FLOATING_IP_LIST[@]};
do
echo "Associating FLOATING_IP $LOGIN_FLOATING_IP with LOGIN_INSTANCE_ID $LOGIN_INSTANCE_ID"
openstack server add floating ip $LOGIN_INSTANCE_ID $LOGIN_FLOATING_IP
done
rules:
- if: $PIPELINE_TARGET == "deploy" && $LOGIN_IMAGE_ID
when: always
deploy_ood_node:
stage: deploy
environment:
name: $ENV
tags:
- build
script:
- openstack image set --accept $OOD_IMAGE_ID || true
- FAILED=false
- |
cat > user_data.txt <<EOF
#!/bin/bash
cat >> /etc/NetworkManager/conf.d/90-dns-none.conf<<EEOF
[main]
dns=none
EEOF
systemctl reload NetworkManager
echo "$DEV_KEY" >> /root/.ssh/authorized_keys
ip route replace default via ${DEFAULT_GATEWAY_IP} dev eth0
git clone ${CI_REPOSITORY_URL} /tmp/${CI_PROJECT_NAME}
cd /tmp/${CI_PROJECT_NAME}
git checkout ${CI_COMMIT_REF_NAME}
cat >> ansible/hosts<<EEOF
[$ENV]
127.0.0.1
EEOF
s3cmd get --force -r --access_key=$AWS_ACCESS_KEY_ID --secret_key=$AWS_SECRET_ACCESS_KEY --host=$AWS_HOST --host-bucket=$AWS_HOST s3://cheaha-cloud-ansible-files/ /tmp/${CI_PROJECT_NAME}/ansible/files/
ansible-playbook -c local -i ansible/hosts --extra-vars="$EXTRA_VARS" ansible/cluster.yml | tee -a /tmp/ansible.log
rm -rf /tmp/${CI_PROJECT_NAME}
EOF
- |
export cmd="openstack server create"
cmd+=" -c id -f value --image $OOD_IMAGE_ID"
cmd+=" --flavor $INSTANCE_FLAVOR"
for security_group in ${SECURITY_GROUP_LIST[@]};
do
cmd+=" --security-group $security_group"
done
cmd+=" --user-data user_data.txt"
if [ -n "$INSTANCE_NETWORK" ];then cmd+=" --network $INSTANCE_NETWORK"; fi
if [ -n "$OOD_PORT" ];then cmd+=" --port $OOD_PORT"; fi
cmd+=" --wait $OOD_INSTANCE_NAME"
- export OOD_INSTANCE_ID=$(bash -c "$cmd")
- |
# Associate the floating IP(s) with the SSH Proxy instance
for OOD_FLOATING_IP in ${OOD_FLOATING_IP_LIST[@]};
do
echo "Associating FLOATING_IP $OOD_FLOATING_IP with OOD_INSTANCE_ID $OOD_INSTANCE_ID"
openstack server add floating ip $OOD_INSTANCE_ID $OOD_FLOATING_IP
done
rules:
- if: $PIPELINE_TARGET == "deploy" && $OOD_IMAGE_ID
when: always
......@@ -4,6 +4,13 @@
become: true
roles:
- { name: 'cheaha.node', tags: 'cheaha.node' }
- { name: 'nfs_mounts', tags: 'nfs_mounts' }
- { name: 'nfs_mounts', tags: 'nfs_mounts', when: enable_nfs_mounts }
- { name: 'ldap_config', tags: 'ldap_config' }
- { name: 'slurm_client', tags: 'slurm_client', when: enable_slurm_client }
- { name: 'ssh_host_keys', tags: 'ssh_host_keys' }
- { name: 'ssh_proxy_config', tags: 'ssh_proxy_config', when: enable_ssh_proxy_config }
- { name: 'ssl_cert', tags: 'ssl_cert', when: enable_ssl_certs }
- { name: 'rsyslog_config', tags: 'rsyslog_config', when: enable_rsyslog_config }
- { name: 'rewrite_map', tags: 'rewrite_map', when: enable_rewrite_map }
- { name: 'fail2ban', tags: 'fail2ban', when: enable_fail2ban }
- { name: 'install_node_exporter', tags: 'install_node_exporter', when: enable_node_exporter }
......@@ -28,8 +28,64 @@
ldap_uri: "ldap://ldapserver"
# nfs_mounts related
enable_nfs_mounts: true
use_autofs: false
use_fstab: false
mount_points:
- /gpfs4
- /gpfs5
- { "src": "master:/gpfs4", "path": "/gpfs4", "opts": "ro,sync,hard", "mode": "0755" }
- { "src": "master:/gpfs5", "path": "/gpfs5", "opts": "ro,sync,hard", "mode": "0755" }
autofs_mounts:
- { "src": "master:/gpfs4/&", "path": "/gpfs4", "opts": "fstype=nfs,vers=3,_netdev,default", "mode": '0755', "mount_point": "/gpfs4", "map_name": "gpfs4", key: "*" }
- { "src": "master:/gpfs5/&", "path": "/gpfs5", "opts": "fstype=nfs,vers=3,_netdev,default", "mode": '0755', "mount_point": "/gpfs5", "map_name": "gpfs5", key: "*" }
#SSH Host Keys
S3_ENDPOINT: ""
SSH_HOST_KEYS_S3_BUCKET: ""
SSH_HOST_KEYS_S3_OBJECT: ""
# AWS credentials
LTS_ACCESS_KEY: ""
LTS_SECRET_KEY: ""
# ssh proxy
enable_ssh_proxy_config: false
sshpiper_dest_dir: "/opt/sshpiper"
# rsyslog
enable_rsyslog_config: true
rsyslog_target: "*.* @master:514"
# ssl certs
enable_ssl_certs: false
ssl_cert_s3_bucket: ""
ssl_cert_key_location: "/etc/pki/tls/private"
ssl_cert_file_location: "/etc/pki/tls/certs"
ssl_cert_key: ""
ssl_cert_file: ""
ssl_cert_chain_file: ""
ssl_apache_config: ""
apache_service: "httpd"
# rewrite map
enable_rewrite_map: false
target_groups:
- {"name": "gpfs4", "host": "login001", "default": True }
- {"name": "gpfs5", "host": "login002", "default": False }
# account app
account_app_port: 8000
# fail2ban
enable_fail2ban: false
maxretry: 1
findtime: 600
bantime: 1200
fail2ban_white_list: "127.0.0.1/8"
# Node Exporter
enable_node_exporter: false
node_exporter_ver: "1.8.2"
node_exporter_filename: "node_exporter-{{ node_exporter_ver }}.linux-amd64"
node_exporter_user: node_exporter
node_exporter_group: node_exporter
node_exporter_port: 9100
---
# cheaha.node related
hostname_lookup_table:
- "172.20.0.24 cheaha-master02.cm.cluster cheaha-master02"
- "172.20.0.22 cheaha-master01.cm.cluster cheaha-master01"
- "172.20.0.25 master.cm.cluster master localmaster.cm.cluster localmaster ldapserver.cm.cluster ldapserver"
domain_search_list:
- cm.cluster
- rc.uab.edu
- ib.cluster
- drac.cluster
- eth.cluster
- ib-hdr.cluster
nameserver_list:
- 172.20.0.25
bright_openldap_path: "/cm/local/apps/openldap"
ldap_cert_path: "{{bright_openldap_path}}/etc/certs"
ldap_uri: "ldaps://ldapserver"
# proxy_config
target_groups:
- {"name": "gpfs5", "host": "login002", "default": False, "authorized_keys":"/gpfs5/data/user/home/$DOWNSTREAM_USER/.ssh/authorized_keys", "private_key":"/gpfs5/data/user/home/$DOWNSTREAM_USER/.ssh/id_ecdsa"}
- {"name": "gpfs4", "host": "login001", "default": True, "authorized_keys":"/gpfs4/data/user/home/$DOWNSTREAM_USER/.ssh/authorized_keys", "private_key":"/gpfs4/data/user/home/$DOWNSTREAM_USER/.ssh/id_ecdsa"}
......@@ -5,8 +5,5 @@
roles:
- { name: 'fix_centos_repo', tags: 'fix_centos_repo' }
- { name: 'install_packages', tags: 'install_packages' }
- { name: 'pam_slurm_adopt', tags: 'pam_slurm_adopt' }
- { name: 'install_nhc', tags: 'install_nhc'}
- name: Setup node for use as a virtual cheaha node
ansible.builtin.import_playbook: cheaha.yml
......@@ -6,6 +6,3 @@
- { name: 'fix_centos_repo', tags: 'fix_centos_repo' }
- { name: 'install_packages', tags: 'install_packages' }
- { name: 'install_zsh', tags: 'install_zsh' }
- name: Setup node for use as a virtual cheaha node
ansible.builtin.import_playbook: cheaha.yml
---
- name: Install fail2ban
ansible.builtin.package:
name: "{{ item }}"
state: present
loop:
- fail2ban
- fail2ban-firewalld
- name: Configure fail2ban
ansible.builtin.template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
backup: true
loop:
- { src: 'jail.local.j2', dest: '/etc/fail2ban/jail.local' }
- { src: 'sshpiperd_filter.local.j2', dest: '/etc/fail2ban/filter.d/sshpiperd.local' }
- { src: 'sshpiperd_jail.local.j2', dest: '/etc/fail2ban/jail.d/sshpiperd.local' }
- name: Activate the firewalld support for fail2ban
ansible.builtin.command:
cmd: mv /etc/fail2ban/jail.d/00-firewalld.conf /etc/fail2ban/jail.d/00-firewalld.local
- name: Configure firewalld to allow ssh and sshpiper traffic
ansible.posix.firewalld:
port: "{{ item }}"
zone: public
state: enabled
permanent: true
loop:
- 2222/tcp
- 22/tcp
- name: Enable and start firewalld
ansible.builtin.service:
name: firewalld
enabled: true
state: restarted
- name: Enable and start fail2ban
ansible.builtin.service:
name: fail2ban
enabled: true
state: restarted
[DEFAULT]
banaction = firewalld
bantime = {{ bantime }}
ignoreip = {{ fail2ban_white_list }}
[sshd]
enabled = true
# Refer to https://github.com/fail2ban/fail2ban/wiki/Developing-Regex-in-Fail2ban for developing regex using fail2ban
#
[INCLUDES]
before = common.conf
[DEFAULT]
_daemon = sshpiperd
__iso_datetime = "\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(?:[+-]\d{2}:\d{2}|Z)"
__pref = time=%(__iso_datetime)s level=(?:debug|error)
[Definition]
# Define the prefix regex for the log lines
prefregex = ^<F-MLFID>%(__prefix_line)s%(__pref)s</F-MLFID>\s+<F-CONTENT>.+</F-CONTENT>$
# Failregex to match the specific failure log lines (prefregex is automatically included)
failregex = ^msg="connection from .*failtoban: ip <HOST> too auth many failures"$
ignoreregex =
mode = normal
maxlines = 1
# This configuration will block the remote host after {{maxretry}} failed SSH login attempts.
[sshpiperd]
enabled = true
filter = sshpiperd
logpath = /var/log/messages
port = 22
maxretry = {{ maxretry }}
backend = auto
findtime = {{ findtime }}
---
- name: Download node_exporter binary
ansible.builtin.get_url:
url: "https://github.com/prometheus/node_exporter/releases/download/v{{ node_exporter_ver }}/{{ node_exporter_filename }}.tar.gz"
dest: "/tmp/{{ node_exporter_filename }}.tar.gz"
- name: Extract node_exporter
ansible.builtin.unarchive:
src: "/tmp/{{ node_exporter_filename }}.tar.gz"
dest: "/tmp"
remote_src: yes
- name: Create system group for user account {{ node_exporter_group }}
ansible.builtin.group:
name: "{{ node_exporter_group }}"
system: true
state: present
- name: Create system user account {{ node_exporter_user }}
ansible.builtin.user:
name: "{{ node_exporter_user }}"
comment: Prometheus node_exporter system account
group: "{{ node_exporter_group }}"
system: true
home: /var/lib/node_exporter
create_home: false
shell: /sbin/nologin
state: present
- name: Copy node_exporter binary
ansible.builtin.copy:
src: "/tmp/{{ node_exporter_filename }}/node_exporter"
dest: /usr/local/bin/node_exporter
remote_src: yes
owner: root
group: root
mode: 0755
- name: Copy systemd unit file
ansible.builtin.template:
src: node_exporter.service.j2
dest: /etc/systemd/system/node_exporter.service
owner: root
group: root
mode: '0644'
- name: Clean up /tmp
ansible.builtin.file:
path: "/tmp/{{ item }}"
state: absent
loop:
- "{{ node_exporter_filename }}.tar.gz"
- "{{ node_exporter_filename }}"
- name: Restart node_exporter service
ansible.builtin.systemd:
daemon_reload: yes
name: node_exporter
state: restarted
enabled: true
- name: Collect facts about system services
ansible.builtin.service_facts:
- name: Configure firewalld to allow prometheus
ansible.posix.firewalld:
port: "{{ node_exporter_port }}/tcp"
zone: public
state: enabled
permanent: true
when:
- "'firewalld.service' in ansible_facts.services"
- ansible_facts.services["firewalld.service"].state == "running"
- name: Enable and start firewalld
ansible.builtin.service:
name: firewalld
enabled: true
state: restarted
when:
- "'firewalld.service' in ansible_facts.services"
- ansible_facts.services["firewalld.service"].state == "running"
[Unit]
Description=Node Exporter
After=network.target
[Service]
User={{ node_exporter_user }}
Group={{ node_exporter_group }}
Type=simple
ExecStart=/usr/local/bin/node_exporter --web.listen-address=:{{ node_exporter_port }} --collector.filesystem.mount-points-exclude "^/(dev|proc|run/user/.+|run/credentials/.+|sys|var/lib/docker/.+)($|/)" --collector.filesystem.fs-types-exclude "^(autofs|binfmt_misc|bpf|cgroup|tmpfs|sunrpc|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$"
[Install]
WantedBy=multi-user.target
......@@ -7,7 +7,6 @@
- nss-pam-ldapd
- openldap
- openldap-clients
- openldap-servers
- sssd-ldap
- name: Update nsswitch.conf to look for ldap
......
---
- name: Create base directories
ansible.builtin.file:
path: "{{ item.dir }}"
path: "{{ item.path }}"
state: directory
mode: "{{ item.mode }}"
loop:
- { dir: /local, mode: '0777' }
- { dir: /scratch, mode: '0755' }
- { dir: /share, mode: '0755' }
- { dir: /data/rc/apps, mode: '0755' } # this is only required for the symlink to be happy
- { dir: /data/user, mode: '0755' }
- { dir: /data/project, mode: '0755' }
- { path: /local, mode: '0777' }
- { path: /share, mode: '0755' }
- name: Create mountpoint dirs
ansible.builtin.file:
path: "{{ item.path }}"
state: directory
mode: "{{ item.mode }}"
loop:
"{{ autofs_mounts }}"
- name: Remove unused entry in master map
ansible.builtin.replace:
......@@ -29,12 +33,7 @@
line: "{{ item.mount_point }} /etc/auto.{{ item.map_name }}"
create: yes
loop:
- { mount_point: "/cm/shared", map_name: "cm-share" }
- { mount_point: "/data/project", map_name: "data-project" }
- { mount_point: "/data/user", map_name: "data-user" }
- { mount_point: "/data/rc/apps", map_name: "data-rc-apps" }
- { mount_point: "/-", map_name: "scratch" }
- { mount_point: "/home", map_name: "home" }
"{{ autofs_mounts }}"
- name: Set up autofs map files
ansible.builtin.lineinfile:
......@@ -42,12 +41,7 @@
line: "{{ item.key }} -{{ item.opts }} {{ item.src }}"
create: true
loop:
- { map_name: "cm-share", key: "*", src: "gpfs.rc.uab.edu:/data/cm/shared-8.2/&", opts: "fstype=nfs,vers=3,_netdev,defaults" }
- { map_name: "data-project", key: "*", src: "gpfs.rc.uab.edu:/data/project/&", opts: "fstype=nfs,vers=3,_netdev,defaults" }
- { map_name: "data-user", key: "*", src: "gpfs.rc.uab.edu:/data/user/&", opts: "fstype=nfs,vers=3,_netdev,local_lock=posix,defaults" }
- { map_name: "data-rc-apps", key: "*", src: "gpfs.rc.uab.edu:/data/rc/apps/&", opts: "fstype=nfs,vers=3,_netdev,defaults" }
- { map_name: "scratch", key: "/scratch", src: "gpfs.rc.uab.edu:/scratch", opts: "fstype=nfs,vers=3,_netdev,local_lock=posix,defaults" }
- { map_name: "home", key: "*", src: ":/data/user/home/&", opts: 'fstype=bind' }
"{{ autofs_mounts }}"
- name: Create symbolic links
ansible.builtin.file:
......@@ -60,7 +54,8 @@
loop:
- { src: /data/rc/apps, dest: /share/apps }
- name: Enable autofs service
- name: Enable and start autofs service
ansible.builtin.service:
name: autofs
enabled: true
state: restarted
---
- name: Create base directories
ansible.builtin.file:
path: "{{ item }}"
path: "{{ item.path }}"
state: directory
mode: '0755'
mode: "{{ item.mode }}"
loop:
"{{ mount_points }}"
- name: Mount the directories
ansible.posix.mount:
src: "master:{{ item }}"
path: "{{ item }}"
opts: rw,sync,hard
src: "{{ item.src }}"
path: "{{ item.path }}"
opts: "{{ item.opts }}"
state: mounted
fstype: nfs
loop:
......
---
- name: nfs_mounts using fstab
include_tasks: fstab.yml
when: not use_autofs
when: use_fstab
- name: nfs_mounts using autofs
include_tasks: autofs.yml
......
---
- name: Add apache rewritemap script config
ansible.builtin.template:
src: rewrite_map_config_py.j2
mode: '600'
owner: root
group: root
dest: /var/www/rewrite_map_config.py
- name: Replace OOD rewrite condition regex in Apache configuration
ansible.builtin.replace:
path: /etc/httpd/conf.d/front-end.conf
regexp: "RewriteCond %{HTTP:REMOTE_USER} '\\^\\(\\.\\+\\)\\$'"
replace: |
RewriteCond %{HTTP:REMOTE_USER} '([a-zA-Z0-9_.+-]+)@uab.edu$' [OR]
RewriteCond %{HTTP:REMOTE_USER} 'urn:mace:incommon:uab.edu!https://uabgrid.uab.edu/shibboleth!(.+)$'
- name: Replace account app port in Apache configuration
ansible.builtin.replace:
path: /etc/httpd/conf.d/front-end.conf
regexp: "account-app:8000"
replace: "account-app:{{ account_app_port }}"
- name: Restart httpd services
ansible.builtin.service:
name: httpd
enabled: true
state: restarted
DEBUG = False
target_groups = {
{% for group in target_groups %}
"{{ group.name }}": "{{ group.host }}",
{% endfor %}
}
{% for group in target_groups %}
{% if group.default %}
default_hostname = "{{ group.host }}"
{% endif %}
{% endfor %}
---
- name: Add rsyslog configuration
ansible.builtin.template:
src: rsyslog.conf.j2
dest: /etc/rsyslog.conf
mode: 0644
owner: root
group: root
backup: true
- name: Enable and start rsyslog
ansible.builtin.service:
name: rsyslog
enabled: true
state: restarted
# rsyslog configuration file
# For more information see /usr/share/doc/rsyslog-*/rsyslog_conf.html
# If you experience problems, see http://www.rsyslog.com/doc/troubleshoot.html
# Added for distro update >= 4 (7u4)
global (
net.enabledns="off"
)
#### MODULES ####
# The imjournal module bellow is now used as a message source instead of imuxsock.
$ModLoad imuxsock # provides support for local system logging (e.g. via logger command)
$ModLoad imjournal # provides access to the systemd journal
#$ModLoad imklog # reads kernel messages (the same are read from journald)
#$ModLoad immark # provides --MARK-- message capability
# Provides UDP syslog reception
#$ModLoad imudp
#$UDPServerRun 514
# Provides TCP syslog reception
#$ModLoad imtcp
#$InputTCPServerRun 514
#### GLOBAL DIRECTIVES ####
# Where to place auxiliary files
$WorkDirectory /var/lib/rsyslog
# Use default timestamp format
$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat
# File syncing capability is disabled by default. This feature is usually not required,
# not useful and an extreme performance hit
#$ActionFileEnableSync on
# Include all config files in /etc/rsyslog.d/
$IncludeConfig /etc/rsyslog.d/*.conf
# Turn off message reception via local log socket;
# local messages are retrieved through imjournal now.
$OmitLocalLogging on
# File to store the position in the journal
$IMJournalStateFile imjournal.state
#### RULES ####
# Log all kernel messages to the console.
# Logging much else clutters up the screen.
#kern.* /dev/console
# Filter nslcd ldap ldap_abandon and ldap_result messages.
if $programname == 'nslcd' and $syslogseverity >= '3' and $msg contains ' failed: Can\'t contact LDAP server' then stop
if $programname == 'nslcd' and $syslogseverity >= '3' and $msg contains 'ldap_abandon() failed to abandon search: Other (e.g., implementation specific) error' then stop
if $programname == 'nslcd' and $syslogseverity >= '3' and $msg contains 'ldap_abandon() failed to abandon search: Can\'t contact LDAP server: Transport endpoint is not connected' then stop
if $programname == 'nslcd' and $syslogseverity >= '3' and $msg contains 'no available LDAP server found, sleeping ' then stop
if $programname == 'nslcd' and $syslogseverity >= '3' and $msg contains 'connected to LDAP server ldap://local' then stop
# Filter sntp started messages.
if $programname == 'sntp' and $syslogseverity > '3' and $msg contains 'Started sntp' then stop
# MariaDB Galera
# disabled, as these messages are being generated every few seconds
:msg, contains, "START: cm-check-galera-status" stop
:msg, contains, "EXIT: cm-check-galera-status" stop
# HAProxy for OpenStack
if $syslogfacility-text == 'local4' and ($programname == 'haproxy') then {
local4.* /var/log/haproxy.log
stop
}
# OpenStack specific
if $syslogfacility-text == 'daemon' then {
# needed for proper handling of Python stack traces
$EscapeControlCharactersOnReceive off
if $programname startswith 'keystone' then {
*.* /var/log/keystone/keystone.log
}
if $programname startswith 'nova' then {
*.* /var/log/nova/nova.log
if $programname == 'nova-api' then {
*.* /var/log/nova/nova-api.log
}
if $programname == 'nova-scheduler' then {
*.* /var/log/nova/nova-scheduler.log
}
if $programname == 'nova-conductor' then {
*.* /var/log/nova/nova-conductor.log
}
if $programname == 'nova-novncproxy' then {
*.* /var/log/nova/nova-novncproxy.log
}
if $programname == 'nova-compute' then {
*.* /var/log/nova/nova-compute.log
}
}
if $programname startswith 'neutron' then {
*.* /var/log/neutron/neutron.log
if $programname == 'neutron-server' then {
*.* /var/log/neutron/neutron-server.log
}
if $programname == 'neutron-metadata-agent' then {
*.* /var/log/neutron/neutron-metadata-agent.log
}
if $programname == 'neutron-l3-agent' then {
*.* /var/log/neutron/neutron-l3-agent.log
}
if $programname == 'neutron-dhcp-agent' then {
*.* /var/log/neutron/neutron-dhcp-agent.log
}
if $programname == 'neutron-openvswitch-agent' then {
*.* /var/log/neutron/neutron-openvswitch-agent.log
}
}
if $programname startswith 'glance' then {
*.* /var/log/glance/glance.log
if $programname == 'glance-api' then {
*.* /var/log/glance/glance-api.log
}
if $programname == 'glance-registry' then {
*.* /var/log/glance/glance-registry.log
}
}
if $programname startswith 'cinder' then {
*.* /var/log/cinder/cinder.log
if $programname == 'cinder-api' then {
*.* /var/log/cinder/cinder-api.log
}
if $programname == 'cinder-scheduler' then {
*.* /var/log/cinder/cinder-scheduler.log
}
if $programname == 'cinder-volume' then {
*.* /var/log/cinder/cinder-volume.log
}
if $programname == 'cinder-backup' then {
*.* /var/log/cinder/cinder-backup.log
}
}
if $programname startswith 'heat' then {
*.* /var/log/heat/heat.log
if $programname == 'heat-api' then {
*.* /var/log/heat/heat-api.log
}
if $programname == 'heat-engine' then {
*.* /var/log/heat/heat-engine.log
}
}
if $programname startswith 'keystone' or \
$programname startswith 'nova' or \
$programname startswith 'neutron' or \
$programname startswith 'glance' or \
$programname startswith 'cinder' or \
$programname startswith 'heat' then {
*.* /var/log/openstack
*.* @master:514
stop
}
}
# Log anything (except mail) of level info or higher.
# Don't log private authentication messages!
*.info;mail.none;authpriv.none;cron.none;local5.none;local6.none /var/log/messages
# The authpriv file has restricted access.
authpriv.* /var/log/secure
# Log all the mail messages in one place.
mail.* -/var/log/maillog
# Log cron stuff
cron.* /var/log/cron
# Everybody gets emergency messages
*.emerg :omusrmsg:*
# Save news errors of level crit and higher in a special file.
uucp,news.crit /var/log/spooler
# Save boot messages also to boot.log
local7.* /var/log/boot.log
# cm related log files:
local5.* -/var/log/node-installer
local6.* -/var/log/cmdaemon
# ### begin forwarding rule ###
# The statement between the begin ... end define a SINGLE forwarding
# rule. They belong together, do NOT split them. If you create multiple
# forwarding rules, duplicate the whole block!
# Remote Logging (we use TCP for reliable delivery)
#
# An on-disk queue is created for this action. If the remote host is
# down, messages are spooled to disk and sent when it is up again.
#$ActionQueueFileName fwdRule1 # unique name prefix for spool files
#$ActionQueueMaxDiskSpace 1g # 1gb space limit (use as much as possible)
#$ActionQueueSaveOnShutdown on # save messages to disk on shutdown
#$ActionQueueType LinkedList # run asynchronously
#$ActionResumeRetryCount -1 # infinite retries if host is down
# remote host is: name/ip:port, e.g. 192.168.0.1:514, port optional
#*.* @@remote-host:514
#CM
{{ rsyslog_target }}
#### end of the forwarding rule ###