Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • dwheel7/hpc-factory
  • rc/hpc-factory
  • louistw/hpc-factory
  • jpr/hpc-factory
  • krish94/hpc-factory
  • atlurie/hpc-factory
6 results
Show changes
Showing
with 711 additions and 6 deletions
---
- name: Add rsyslog configuration
ansible.builtin.template:
src: rsyslog.conf.j2
dest: /etc/rsyslog.conf
mode: 0644
owner: root
group: root
backup: true
- name: Enable and start rsyslog
ansible.builtin.service:
name: rsyslog
enabled: true
state: restarted
# rsyslog configuration file
# For more information see /usr/share/doc/rsyslog-*/rsyslog_conf.html
# If you experience problems, see http://www.rsyslog.com/doc/troubleshoot.html
# Added for distro update >= 4 (7u4)
global (
net.enabledns="off"
)
#### MODULES ####
# The imjournal module bellow is now used as a message source instead of imuxsock.
$ModLoad imuxsock # provides support for local system logging (e.g. via logger command)
$ModLoad imjournal # provides access to the systemd journal
#$ModLoad imklog # reads kernel messages (the same are read from journald)
#$ModLoad immark # provides --MARK-- message capability
# Provides UDP syslog reception
#$ModLoad imudp
#$UDPServerRun 514
# Provides TCP syslog reception
#$ModLoad imtcp
#$InputTCPServerRun 514
#### GLOBAL DIRECTIVES ####
# Where to place auxiliary files
$WorkDirectory /var/lib/rsyslog
# Use default timestamp format
$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat
# File syncing capability is disabled by default. This feature is usually not required,
# not useful and an extreme performance hit
#$ActionFileEnableSync on
# Include all config files in /etc/rsyslog.d/
$IncludeConfig /etc/rsyslog.d/*.conf
# Turn off message reception via local log socket;
# local messages are retrieved through imjournal now.
$OmitLocalLogging on
# File to store the position in the journal
$IMJournalStateFile imjournal.state
#### RULES ####
# Log all kernel messages to the console.
# Logging much else clutters up the screen.
#kern.* /dev/console
# Filter nslcd ldap ldap_abandon and ldap_result messages.
if $programname == 'nslcd' and $syslogseverity >= '3' and $msg contains ' failed: Can\'t contact LDAP server' then stop
if $programname == 'nslcd' and $syslogseverity >= '3' and $msg contains 'ldap_abandon() failed to abandon search: Other (e.g., implementation specific) error' then stop
if $programname == 'nslcd' and $syslogseverity >= '3' and $msg contains 'ldap_abandon() failed to abandon search: Can\'t contact LDAP server: Transport endpoint is not connected' then stop
if $programname == 'nslcd' and $syslogseverity >= '3' and $msg contains 'no available LDAP server found, sleeping ' then stop
if $programname == 'nslcd' and $syslogseverity >= '3' and $msg contains 'connected to LDAP server ldap://local' then stop
# Filter sntp started messages.
if $programname == 'sntp' and $syslogseverity > '3' and $msg contains 'Started sntp' then stop
# MariaDB Galera
# disabled, as these messages are being generated every few seconds
:msg, contains, "START: cm-check-galera-status" stop
:msg, contains, "EXIT: cm-check-galera-status" stop
# HAProxy for OpenStack
if $syslogfacility-text == 'local4' and ($programname == 'haproxy') then {
local4.* /var/log/haproxy.log
stop
}
# OpenStack specific
if $syslogfacility-text == 'daemon' then {
# needed for proper handling of Python stack traces
$EscapeControlCharactersOnReceive off
if $programname startswith 'keystone' then {
*.* /var/log/keystone/keystone.log
}
if $programname startswith 'nova' then {
*.* /var/log/nova/nova.log
if $programname == 'nova-api' then {
*.* /var/log/nova/nova-api.log
}
if $programname == 'nova-scheduler' then {
*.* /var/log/nova/nova-scheduler.log
}
if $programname == 'nova-conductor' then {
*.* /var/log/nova/nova-conductor.log
}
if $programname == 'nova-novncproxy' then {
*.* /var/log/nova/nova-novncproxy.log
}
if $programname == 'nova-compute' then {
*.* /var/log/nova/nova-compute.log
}
}
if $programname startswith 'neutron' then {
*.* /var/log/neutron/neutron.log
if $programname == 'neutron-server' then {
*.* /var/log/neutron/neutron-server.log
}
if $programname == 'neutron-metadata-agent' then {
*.* /var/log/neutron/neutron-metadata-agent.log
}
if $programname == 'neutron-l3-agent' then {
*.* /var/log/neutron/neutron-l3-agent.log
}
if $programname == 'neutron-dhcp-agent' then {
*.* /var/log/neutron/neutron-dhcp-agent.log
}
if $programname == 'neutron-openvswitch-agent' then {
*.* /var/log/neutron/neutron-openvswitch-agent.log
}
}
if $programname startswith 'glance' then {
*.* /var/log/glance/glance.log
if $programname == 'glance-api' then {
*.* /var/log/glance/glance-api.log
}
if $programname == 'glance-registry' then {
*.* /var/log/glance/glance-registry.log
}
}
if $programname startswith 'cinder' then {
*.* /var/log/cinder/cinder.log
if $programname == 'cinder-api' then {
*.* /var/log/cinder/cinder-api.log
}
if $programname == 'cinder-scheduler' then {
*.* /var/log/cinder/cinder-scheduler.log
}
if $programname == 'cinder-volume' then {
*.* /var/log/cinder/cinder-volume.log
}
if $programname == 'cinder-backup' then {
*.* /var/log/cinder/cinder-backup.log
}
}
if $programname startswith 'heat' then {
*.* /var/log/heat/heat.log
if $programname == 'heat-api' then {
*.* /var/log/heat/heat-api.log
}
if $programname == 'heat-engine' then {
*.* /var/log/heat/heat-engine.log
}
}
if $programname startswith 'keystone' or \
$programname startswith 'nova' or \
$programname startswith 'neutron' or \
$programname startswith 'glance' or \
$programname startswith 'cinder' or \
$programname startswith 'heat' then {
*.* /var/log/openstack
*.* @master:514
stop
}
}
# Log anything (except mail) of level info or higher.
# Don't log private authentication messages!
*.info;mail.none;authpriv.none;cron.none;local5.none;local6.none /var/log/messages
# The authpriv file has restricted access.
authpriv.* /var/log/secure
# Log all the mail messages in one place.
mail.* -/var/log/maillog
# Log cron stuff
cron.* /var/log/cron
# Everybody gets emergency messages
*.emerg :omusrmsg:*
# Save news errors of level crit and higher in a special file.
uucp,news.crit /var/log/spooler
# Save boot messages also to boot.log
local7.* /var/log/boot.log
# cm related log files:
local5.* -/var/log/node-installer
local6.* -/var/log/cmdaemon
# ### begin forwarding rule ###
# The statement between the begin ... end define a SINGLE forwarding
# rule. They belong together, do NOT split them. If you create multiple
# forwarding rules, duplicate the whole block!
# Remote Logging (we use TCP for reliable delivery)
#
# An on-disk queue is created for this action. If the remote host is
# down, messages are spooled to disk and sent when it is up again.
#$ActionQueueFileName fwdRule1 # unique name prefix for spool files
#$ActionQueueMaxDiskSpace 1g # 1gb space limit (use as much as possible)
#$ActionQueueSaveOnShutdown on # save messages to disk on shutdown
#$ActionQueueType LinkedList # run asynchronously
#$ActionResumeRetryCount -1 # infinite retries if host is down
# remote host is: name/ip:port, e.g. 192.168.0.1:514, port optional
#*.* @@remote-host:514
#CM
{{ rsyslog_target }}
#### end of the forwarding rule ###
......@@ -19,6 +19,7 @@
state: present
uid: 450
group: slurm
create_home: false
- name: Copy munge key
ansible.builtin.copy:
......@@ -28,6 +29,19 @@
group: root
mode: 0400
- name: Create symbolic links for Slurm config files
ansible.builtin.file:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
state: link
force: yes # Force the creation of the symlinks even if source files do not exist yet
loop:
- { src: "/cm/shared/apps/slurm/var/etc/cgroup.conf", dest: "/etc/slurm/cgroup.conf" }
- { src: "/cm/shared/apps/slurm/var/etc/gres.conf", dest: "/etc/slurm/gres.conf" }
- { src: "/cm/shared/apps/slurm/var/etc/slurm.conf", dest: "/etc/slurm/slurm.conf" }
- { src: "/cm/shared/apps/slurm/var/etc/slurmdbd.conf", dest: "/etc/slurm/slurmdbd.conf" }
- { src: "/cm/shared/apps/slurm/var/etc/job_submit.lua", dest: "/etc/slurm/job_submit.lua" }
- name: Enable services
ansible.builtin.service:
name: "{{ item }}"
......
---
- name: Ensure destination directory exists only if not present
ansible.builtin.file:
path: /tmp/ssh_keys
state: directory
mode: '0755'
- name: Install require package
ansible.builtin.pip:
name: boto3
extra_args: "--extra-index-url https://pypi.python.org/simple"
executable: "/usr/bin/pip3"
- name: Download SSH host keys tar.gz from S3
aws_s3:
mode: get
s3_url: "{{ S3_ENDPOINT }}"
bucket: "{{ SSH_HOST_KEYS_S3_BUCKET }}"
object: "{{ SSH_HOST_KEYS_S3_OBJECT }}"
dest: "/tmp/ssh_keys/{{ SSH_HOST_KEYS_S3_OBJECT }}"
aws_access_key: "{{ LTS_ACCESS_KEY }}"
aws_secret_key: "{{ LTS_SECRET_KEY }}"
vars:
ansible_python_interpreter: /usr/bin/python3
when: SSH_HOST_KEYS_S3_BUCKET | length > 0 and SSH_HOST_KEYS_S3_OBJECT | length > 0
- name: Unpack SSH host keys to /etc/ssh
ansible.builtin.unarchive:
src: "/tmp/ssh_keys/{{ SSH_HOST_KEYS_S3_OBJECT }}"
dest: "/etc/ssh"
group: root
owner: root
remote_src: yes
become: true
when: SSH_HOST_KEYS_S3_BUCKET | length > 0 and SSH_HOST_KEYS_S3_OBJECT | length > 0
- name: Remove the temporary folder after put in place
ansible.builtin.file:
path: /tmp/ssh_keys
state: absent
- name: Restart SSH service
ansible.builtin.service:
name: sshd
state: restarted
become: true
---
- name: Configure sshpiper yaml plugin
ansible.builtin.template:
src: sshpiperd.yaml.j2
dest: "{{ sshpiper_dest_dir }}/sshpiperd.yaml"
backup: true
- name: Enable and start sshpiper service
ansible.builtin.service:
name: sshpiperd
enabled: true
state: restarted
# yaml-language-server: $schema=https://raw.githubusercontent.com/tg123/sshpiper/master/plugin/yaml/schema.json
version: "1.0"
pipes:
{% for group in target_groups %}
{% if not group.default %}
- from:
- groupname: "{{ group.name }}"
authorized_keys: "{{ group.authorized_keys }}"
to:
host: "{{ group.host }}"
ignore_hostkey: true
private_key: "{{ group.private_key }}"
- from:
- groupname: "{{ group.name }}"
to:
host: "{{ group.host }}"
ignore_hostkey: true
{% else %}
- from:
- username: ".*" # catch all
username_regex_match: true
authorized_keys: "{{ group.authorized_keys }}"
to:
host: "{{ group.host }}"
ignore_hostkey: true
private_key: "{{ group.private_key }}"
- from:
- username: ".*"
username_regex_match: true
to:
host: "{{ group.host }}"
ignore_hostkey: true
{% endif %}
{% endfor %}
---
- name: Download SSL Certs from S3
aws_s3:
mode: get
s3_url: "{{ S3_ENDPOINT }}"
bucket: "{{ ssl_cert_s3_bucket }}"
object: "{{ item }}"
dest: "{{ ssl_cert_file_location }}/{{ item }}"
aws_access_key: "{{ LTS_ACCESS_KEY }}"
aws_secret_key: "{{ LTS_SECRET_KEY }}"
vars:
ansible_python_interpreter: /usr/bin/python3
when: ssl_cert_s3_bucket | length > 0 and item | length > 0
loop:
- "{{ ssl_cert_file }}"
- "{{ ssl_cert_chain_file }}"
- name: Change cert files permissions
ansible.builtin.file:
path: "{{ ssl_cert_file_location }}/{{ item }}"
owner: root
group: root
mode: '0600'
when: ssl_cert_s3_bucket | length > 0 and item | length > 0
loop:
- "{{ ssl_cert_file }}"
- "{{ ssl_cert_chain_file }}"
- name: Download SSL key from S3
aws_s3:
mode: get
s3_url: "{{ S3_ENDPOINT }}"
bucket: "{{ ssl_cert_s3_bucket }}"
object: "{{ ssl_cert_key }}"
dest: "{{ ssl_cert_key_location }}/{{ ssl_cert_key }}"
aws_access_key: "{{ LTS_ACCESS_KEY }}"
aws_secret_key: "{{ LTS_SECRET_KEY }}"
vars:
ansible_python_interpreter: /usr/bin/python3
when: ssl_cert_s3_bucket | length > 0 and ssl_cert_key | length > 0
- name: Change key file permissions
ansible.builtin.file:
path: "{{ ssl_cert_key_location }}/{{ ssl_cert_key }}"
owner: root
group: root
mode: '0400'
when: ssl_cert_s3_bucket | length > 0 and ssl_cert_key | length > 0
- name: Update SSL in Apache config
ansible.builtin.replace:
path: "{{ ssl_apache_config }}"
regexp: "{{ item.regexp }}"
replace: "\\1 {{ item.location }}/{{ item.value }}"
backup: true
when: ssl_apache_config | length > 0 and item.value | length > 0
loop:
- { regexp: "#?(SSLCertificateFile).*$", location: "{{ ssl_cert_file_location }}", value: "{{ ssl_cert_file }}" }
- { regexp: "#?(SSLCertificateChainFile).*$", location: "{{ ssl_cert_file_location }}", value: "{{ ssl_cert_chain_file }}" }
- { regexp: "#?(SSLCertificateKeyFile).*$", location: "{{ ssl_cert_key_location }}", value: "{{ ssl_cert_key }}" }
- name: Restart apache service
ansible.builtin.service:
name: "{{ apache_service }}"
state: restarted
File moved
......@@ -36,11 +36,19 @@ source "openstack" "image" {
build {
sources = ["source.openstack.image"]
provisioner "shell" {
inline = [
"sudo yum install -y libselinux-python3 python3 python3-pip tmux vim git bash-completion curl wget unzip",
"sudo python3 -m pip install --upgrade pip",
"sudo pip3 install s3cmd==2.3.0 ansible==4.10.0 python-openstackclient==5.8.0"
]
}
provisioner "ansible" {
use_proxy = false
user = var.ssh_username
groups = ["compute"]
playbook_file = "./ansible/compute.yml"
playbook_file = "./ansible/login.yml"
roles_path = "./ansible/roles"
extra_arguments = [
"--extra-vars", "root_ssh_key='${var.root_ssh_key}'"
......@@ -53,5 +61,8 @@ build {
groups = ["compute"]
ansible_env_vars = ["ANSIBLE_HOST_KEY_CHECKING=False"]
playbook_file = "./CRI_XCBC/compute-packer.yaml"
extra_arguments = [
"--extra-vars", "${var.extra_vars}"
]
}
}
variable "root_ssh_key" {
type = string
description = "The root key to use for ssh"
default = ""
}
variable "image_name" {
......@@ -87,4 +88,10 @@ variable "volume_size" {
type = number
default = 20
description = "The default volume size for building iamge"
}
\ No newline at end of file
}
variable "extra_vars" {
type = string
default = ""
description = "Extra vars to pass to ansible playbook command"
}
......@@ -53,5 +53,17 @@ build {
groups = ["ood", "knightly"]
ansible_env_vars = ["ANSIBLE_HOST_KEY_CHECKING=False"]
playbook_file = "./CRI_XCBC/ood-packer.yaml"
extra_arguments = [
"--extra-vars", "${var.extra_vars}"
]
}
provisioner "shell" {
inline = [
"sudo yum install -y libselinux-python3 python3 python3-pip tmux vim git bash-completion curl wget unzip",
"sudo python3 -m pip install --upgrade pip",
"sudo pip3 install s3cmd==2.3.0 ansible==4.10.0 python-openstackclient==5.8.0"
]
}
}
variable "root_ssh_key" {
type = string
default = ""
description = "The root key to use for ssh"
}
......@@ -87,4 +88,10 @@ variable "volume_size" {
type = number
default = 20
description = "The default volume size for building iamge"
}
\ No newline at end of file
}
variable "extra_vars" {
type = string
default = ""
description = "Extra vars to pass to ansible playbook command"
}
......@@ -38,10 +38,9 @@ build {
provisioner "shell" {
inline = [
"sudo sed -i 's/^mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-*",
"sudo sed -i 's|#baseurl=http://mirror.centos.org|baseurl=http://vault.centos.org|g' /etc/yum.repos.d/CentOS-*",
"sudo yum install -y epel-release",
"sudo yum install -y libselinux-python3 python3 tmux vim git bash-completion curl wget unzip",
"sudo dnf config-manager --set-enabled crb",
"sudo yum install -y libselinux-python3 python3 python3-pip tmux vim git bash-completion curl wget unzip httpd",
"sudo python3 -m pip install --upgrade pip",
"sudo pip3 install s3cmd==2.3.0 ansible==4.10.0 python-openstackclient==5.8.0"
]
......@@ -59,5 +58,8 @@ build {
"ANSIBLE_FORCE_COLOR=true"
]
playbook_file = "./CRI_XCBC/proxy.yaml"
extra_arguments = [
"--extra-vars", "${var.extra_vars}"
]
}
}
......@@ -106,3 +106,8 @@ variable "ANSIBLE_VERBOSITY" {
description = "to increase verbosity - 0|1|2|3|4"
}
variable "extra_vars" {
type = string
default = ""
description = "Extra vars to pass to ansible playbook command"
}
certifi==2025.1.31
charset-normalizer==3.4.1
idna==3.10
python-gitlab==5.6.0
requests==2.32.3
requests-toolbelt==1.0.0
urllib3==2.4.0
### Description
These utility scripts avoid copying each ci variable manually which is tedious.
- The gitlab-ci-vars-reader.py reads variables from a specific project or a pipeline (depending on the options provided) and copies them into a yaml file
- The gitlab-ci-vars-updater.py takes a yaml file containing key value pairs in yaml format as an input. It then creates/updates project variables or pipeline variables (depending on the options provided)
### Prerequisites
```
python -m venv ~/venvs/gitlab
source ~/venvs/gitlab/bin/activate
pip install -r requirements
```
### Setup
```
cd utils
mv gitlab.ini.example gitlab.ini
```
Make changes to the gitlab.ini as you require.
[Create a personal access token](https://docs.gitlab.com/user/profile/personal_access_tokens/) via the gitlab UI and copy it to the private_token field in gitlab.ini file
### Usage
> Create an empty schedule pipeline before you try this out.
```
python3 gitlab-ci-vars-reader.py --config_file gitlab.ini --project_id <PROJECT_ID> --sched_pipeline_id <PIPELINE_ID> --var_file ci-variables.yaml
python3 gitlab-ci-vars-updater.py --config_file gitlab.ini --project_id <PROJECT_ID> --sched_pipeline_id <NEW-PIPELINE_ID> --var_file ci-variables.yaml
```
import argparse
import gitlab
import yaml
# Function to fetch all CI/CD variables from a GitLab project
def fetch_variables(project):
p_variables = list(project.variables.list(iterator=True))
variables = [var.asdict() for var in p_variables]
return variables
def fetch_sched_variables(sched_pipeline):
variables = sched_pipeline.attributes["variables"]
return variables
# Main function to load the config and fetch variables
def main():
# Setup argument parser
parser = argparse.ArgumentParser(description="GitLab CI/CD Variable reader")
parser.add_argument(
"--config_file",
type=str,
default="gitlab.ini",
required=True,
help="Path to the configuration file (default: gitlab.ini)",
)
parser.add_argument(
"--var_file",
type=str,
default="ci-variables.yaml",
help="Path to the CI vars file (default: ci-variables.yaml)",
)
parser.add_argument(
"--project_name",
type=str,
required=True,
help="Gitlab project name with namespace",
)
parser.add_argument(
"--sched_pipeline_id",
type=int,
help="Gitlab project scheduled pipeline ID",
)
# Parse the arguments
args = parser.parse_args()
gl = gitlab.Gitlab.from_config("uabrc", [args.config_file])
project = gl.projects.get(args.project_name)
# Fetch project or sched pipeline variables
if not args.sched_pipeline_id:
variables = fetch_variables(project)
else:
sched_pipeline = project.pipelineschedules.get(args.sched_pipeline_id)
variables = fetch_sched_variables(sched_pipeline)
try:
with open(args.var_file, mode="wt", encoding="utf-8") as file:
yaml.dump(variables, file, explicit_start=True)
except FileNotFoundError:
print(f"Error: Writing File to '{args.var_file}'")
exit(1)
# Run the main function
if __name__ == "__main__":
main()
import argparse
import gitlab
import yaml
def load_file(file_path):
try:
with open(file_path, mode="rt", encoding="utf-8") as file:
return yaml.safe_load(file)
except FileNotFoundError:
print(f"Error: Configuration file '{file_path}' not found.")
exit(1)
# Function to create or update a GitLab CI/CD variable
def create_or_update_variable(project, var_dict):
key = var_dict.get("key")
scope = var_dict.get("environment_scope", "*")
p_variable = None
DEFAULTS = {
"variable_type": "env_var",
"hidden": False,
"protected": False,
"masked": False,
"environment_scope": "*",
"raw": False,
"description": None,
}
# Merge defaults with var_dict
var_dict = {**DEFAULTS, **var_dict}
# Fetch a variable with matching key and scope
try:
all_vars = project.variables.list(get_all=True)
for var in all_vars:
if var.key == key and var.environment_scope == scope:
p_variable = var
break
except gitlab.exceptions.GitlabGetError:
print("Variable not found")
exit(1)
# Check if the variable exists and same as input
if p_variable is not None:
if p_variable.asdict() != var_dict:
# if not same update the project variable
print(f"Updating {p_variable.attributes['key']}")
p_variable.delete()
return project.variables.create(var_dict)
else:
print(f"variable {var_dict["key"]} already exists")
# Create variable if it doesn't exist in the project
else:
print(f"Creating variable {var_dict["key"]}")
return project.variables.create(var_dict)
def get_pipeline_vars_by_key(sched_pipeline, key_name):
p_vars = sched_pipeline.attributes["variables"]
for p_variable in p_vars:
if p_variable.get("key") == key_name:
return p_variable
# Function to create or update a schedule pipeline variable
def create_or_update_sched_vars(sched_pipeline, var_dict):
# Check if the variable exists in the sched pipeline
p_variable = get_pipeline_vars_by_key(sched_pipeline, var_dict["key"])
if p_variable:
# Check if the attributes are the same
if p_variable != var_dict:
# If not update the value in the project
sched_pipeline.variables.delete(p_variable["key"])
sched_pipeline.variables.create(var_dict)
else:
print(f"variable {var_dict["key"]} already exists")
# Create variable if it doesn't exist in the project
else:
print(f"Creating variable {var_dict["key"]}")
return sched_pipeline.variables.create(var_dict)
def main():
# Setup argument parser
parser = argparse.ArgumentParser(description="GitLab CI/CD Variables Updater")
parser.add_argument(
"--config_file",
type=str,
default="gitlab.ini",
required=True,
help="Path to the configuration file (default: gitlab.ini)",
)
parser.add_argument(
"--var_file",
type=str,
default="ci-variables.yaml",
help="Path to the CI vars file (default: ci-variables.yaml)",
)
parser.add_argument(
"--project_name",
type=str,
required=True,
help="Gitlab project name with namespace",
)
parser.add_argument(
"--sched_pipeline_id",
type=int,
help="Gitlab project scheduled pipeline ID",
)
# Parse the arguments
args = parser.parse_args()
gl = gitlab.Gitlab.from_config("uabrc", [args.config_file])
project = gl.projects.get(args.project_name)
# Load the CI vars file
var_list = load_file(args.var_file)
# Create or update all variables
for var_dict in var_list:
if not args.sched_pipeline_id:
create_or_update_variable(project, var_dict)
else:
sched_pipeline = project.pipelineschedules.get(args.sched_pipeline_id)
create_or_update_sched_vars(sched_pipeline, var_dict)
if __name__ == "__main__":
main()
[global]
default = uabrc
ssl_verify = true
timeout = 5
per_page = 100
[uabrc]
url = https://gitlab.rc.uab.edu
private_token =
api_version = 4