Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • dwheel7/hpc-factory
  • rc/hpc-factory
  • louistw/hpc-factory
  • jpr/hpc-factory
  • krish94/hpc-factory
  • atlurie/hpc-factory
6 results
Show changes
Showing
with 776 additions and 3 deletions
---
- name: Add apache rewritemap script config
ansible.builtin.template:
src: rewrite_map_config_py.j2
mode: '600'
owner: root
group: root
dest: /var/www/rewrite_map_config.py
- name: Replace OOD rewrite condition regex in Apache configuration
ansible.builtin.replace:
path: /etc/httpd/conf.d/front-end.conf
regexp: "RewriteCond %{HTTP:REMOTE_USER} '\\^\\(\\.\\+\\)\\$'"
replace: |
RewriteCond %{HTTP:REMOTE_USER} '([a-zA-Z0-9_.+-]+)@uab.edu$' [OR]
RewriteCond %{HTTP:REMOTE_USER} 'urn:mace:incommon:uab.edu!https://uabgrid.uab.edu/shibboleth!(.+)$'
- name: Replace account app port in Apache configuration
ansible.builtin.replace:
path: /etc/httpd/conf.d/front-end.conf
regexp: "account-app:8000"
replace: "account-app:{{ account_app_port }}"
- name: Restart httpd services
ansible.builtin.service:
name: httpd
enabled: true
state: restarted
DEBUG = False
target_groups = {
{% for group in target_groups %}
"{{ group.name }}": "{{ group.host }}",
{% endfor %}
}
{% for group in target_groups %}
{% if group.default %}
default_hostname = "{{ group.host }}"
{% endif %}
{% endfor %}
---
- name: Add rsyslog configuration
ansible.builtin.template:
src: rsyslog.conf.j2
dest: /etc/rsyslog.conf
mode: 0644
owner: root
group: root
backup: true
- name: Enable and start rsyslog
ansible.builtin.service:
name: rsyslog
enabled: true
state: restarted
# rsyslog configuration file
# For more information see /usr/share/doc/rsyslog-*/rsyslog_conf.html
# If you experience problems, see http://www.rsyslog.com/doc/troubleshoot.html
# Added for distro update >= 4 (7u4)
global (
net.enabledns="off"
)
#### MODULES ####
# The imjournal module bellow is now used as a message source instead of imuxsock.
$ModLoad imuxsock # provides support for local system logging (e.g. via logger command)
$ModLoad imjournal # provides access to the systemd journal
#$ModLoad imklog # reads kernel messages (the same are read from journald)
#$ModLoad immark # provides --MARK-- message capability
# Provides UDP syslog reception
#$ModLoad imudp
#$UDPServerRun 514
# Provides TCP syslog reception
#$ModLoad imtcp
#$InputTCPServerRun 514
#### GLOBAL DIRECTIVES ####
# Where to place auxiliary files
$WorkDirectory /var/lib/rsyslog
# Use default timestamp format
$ActionFileDefaultTemplate RSYSLOG_TraditionalFileFormat
# File syncing capability is disabled by default. This feature is usually not required,
# not useful and an extreme performance hit
#$ActionFileEnableSync on
# Include all config files in /etc/rsyslog.d/
$IncludeConfig /etc/rsyslog.d/*.conf
# Turn off message reception via local log socket;
# local messages are retrieved through imjournal now.
$OmitLocalLogging on
# File to store the position in the journal
$IMJournalStateFile imjournal.state
#### RULES ####
# Log all kernel messages to the console.
# Logging much else clutters up the screen.
#kern.* /dev/console
# Filter nslcd ldap ldap_abandon and ldap_result messages.
if $programname == 'nslcd' and $syslogseverity >= '3' and $msg contains ' failed: Can\'t contact LDAP server' then stop
if $programname == 'nslcd' and $syslogseverity >= '3' and $msg contains 'ldap_abandon() failed to abandon search: Other (e.g., implementation specific) error' then stop
if $programname == 'nslcd' and $syslogseverity >= '3' and $msg contains 'ldap_abandon() failed to abandon search: Can\'t contact LDAP server: Transport endpoint is not connected' then stop
if $programname == 'nslcd' and $syslogseverity >= '3' and $msg contains 'no available LDAP server found, sleeping ' then stop
if $programname == 'nslcd' and $syslogseverity >= '3' and $msg contains 'connected to LDAP server ldap://local' then stop
# Filter sntp started messages.
if $programname == 'sntp' and $syslogseverity > '3' and $msg contains 'Started sntp' then stop
# MariaDB Galera
# disabled, as these messages are being generated every few seconds
:msg, contains, "START: cm-check-galera-status" stop
:msg, contains, "EXIT: cm-check-galera-status" stop
# HAProxy for OpenStack
if $syslogfacility-text == 'local4' and ($programname == 'haproxy') then {
local4.* /var/log/haproxy.log
stop
}
# OpenStack specific
if $syslogfacility-text == 'daemon' then {
# needed for proper handling of Python stack traces
$EscapeControlCharactersOnReceive off
if $programname startswith 'keystone' then {
*.* /var/log/keystone/keystone.log
}
if $programname startswith 'nova' then {
*.* /var/log/nova/nova.log
if $programname == 'nova-api' then {
*.* /var/log/nova/nova-api.log
}
if $programname == 'nova-scheduler' then {
*.* /var/log/nova/nova-scheduler.log
}
if $programname == 'nova-conductor' then {
*.* /var/log/nova/nova-conductor.log
}
if $programname == 'nova-novncproxy' then {
*.* /var/log/nova/nova-novncproxy.log
}
if $programname == 'nova-compute' then {
*.* /var/log/nova/nova-compute.log
}
}
if $programname startswith 'neutron' then {
*.* /var/log/neutron/neutron.log
if $programname == 'neutron-server' then {
*.* /var/log/neutron/neutron-server.log
}
if $programname == 'neutron-metadata-agent' then {
*.* /var/log/neutron/neutron-metadata-agent.log
}
if $programname == 'neutron-l3-agent' then {
*.* /var/log/neutron/neutron-l3-agent.log
}
if $programname == 'neutron-dhcp-agent' then {
*.* /var/log/neutron/neutron-dhcp-agent.log
}
if $programname == 'neutron-openvswitch-agent' then {
*.* /var/log/neutron/neutron-openvswitch-agent.log
}
}
if $programname startswith 'glance' then {
*.* /var/log/glance/glance.log
if $programname == 'glance-api' then {
*.* /var/log/glance/glance-api.log
}
if $programname == 'glance-registry' then {
*.* /var/log/glance/glance-registry.log
}
}
if $programname startswith 'cinder' then {
*.* /var/log/cinder/cinder.log
if $programname == 'cinder-api' then {
*.* /var/log/cinder/cinder-api.log
}
if $programname == 'cinder-scheduler' then {
*.* /var/log/cinder/cinder-scheduler.log
}
if $programname == 'cinder-volume' then {
*.* /var/log/cinder/cinder-volume.log
}
if $programname == 'cinder-backup' then {
*.* /var/log/cinder/cinder-backup.log
}
}
if $programname startswith 'heat' then {
*.* /var/log/heat/heat.log
if $programname == 'heat-api' then {
*.* /var/log/heat/heat-api.log
}
if $programname == 'heat-engine' then {
*.* /var/log/heat/heat-engine.log
}
}
if $programname startswith 'keystone' or \
$programname startswith 'nova' or \
$programname startswith 'neutron' or \
$programname startswith 'glance' or \
$programname startswith 'cinder' or \
$programname startswith 'heat' then {
*.* /var/log/openstack
*.* @master:514
stop
}
}
# Log anything (except mail) of level info or higher.
# Don't log private authentication messages!
*.info;mail.none;authpriv.none;cron.none;local5.none;local6.none /var/log/messages
# The authpriv file has restricted access.
authpriv.* /var/log/secure
# Log all the mail messages in one place.
mail.* -/var/log/maillog
# Log cron stuff
cron.* /var/log/cron
# Everybody gets emergency messages
*.emerg :omusrmsg:*
# Save news errors of level crit and higher in a special file.
uucp,news.crit /var/log/spooler
# Save boot messages also to boot.log
local7.* /var/log/boot.log
# cm related log files:
local5.* -/var/log/node-installer
local6.* -/var/log/cmdaemon
# ### begin forwarding rule ###
# The statement between the begin ... end define a SINGLE forwarding
# rule. They belong together, do NOT split them. If you create multiple
# forwarding rules, duplicate the whole block!
# Remote Logging (we use TCP for reliable delivery)
#
# An on-disk queue is created for this action. If the remote host is
# down, messages are spooled to disk and sent when it is up again.
#$ActionQueueFileName fwdRule1 # unique name prefix for spool files
#$ActionQueueMaxDiskSpace 1g # 1gb space limit (use as much as possible)
#$ActionQueueSaveOnShutdown on # save messages to disk on shutdown
#$ActionQueueType LinkedList # run asynchronously
#$ActionResumeRetryCount -1 # infinite retries if host is down
# remote host is: name/ip:port, e.g. 192.168.0.1:514, port optional
#*.* @@remote-host:514
#CM
{{ rsyslog_target }}
#### end of the forwarding rule ###
......@@ -19,6 +19,7 @@
state: present
uid: 450
group: slurm
create_home: false
- name: Copy munge key
ansible.builtin.copy:
......@@ -28,6 +29,19 @@
group: root
mode: 0400
- name: Create symbolic links for Slurm config files
ansible.builtin.file:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
state: link
force: yes # Force the creation of the symlinks even if source files do not exist yet
loop:
- { src: "/cm/shared/apps/slurm/var/etc/cgroup.conf", dest: "/etc/slurm/cgroup.conf" }
- { src: "/cm/shared/apps/slurm/var/etc/gres.conf", dest: "/etc/slurm/gres.conf" }
- { src: "/cm/shared/apps/slurm/var/etc/slurm.conf", dest: "/etc/slurm/slurm.conf" }
- { src: "/cm/shared/apps/slurm/var/etc/slurmdbd.conf", dest: "/etc/slurm/slurmdbd.conf" }
- { src: "/cm/shared/apps/slurm/var/etc/job_submit.lua", dest: "/etc/slurm/job_submit.lua" }
- name: Enable services
ansible.builtin.service:
name: "{{ item }}"
......
---
- name: Ensure destination directory exists only if not present
ansible.builtin.file:
path: /tmp/ssh_keys
state: directory
mode: '0755'
- name: Install require package
ansible.builtin.pip:
name: boto3
extra_args: "--extra-index-url https://pypi.python.org/simple"
executable: "/usr/bin/pip3"
- name: Download SSH host keys tar.gz from S3
aws_s3:
mode: get
s3_url: "{{ S3_ENDPOINT }}"
bucket: "{{ SSH_HOST_KEYS_S3_BUCKET }}"
object: "{{ SSH_HOST_KEYS_S3_OBJECT }}"
dest: "/tmp/ssh_keys/{{ SSH_HOST_KEYS_S3_OBJECT }}"
aws_access_key: "{{ LTS_ACCESS_KEY }}"
aws_secret_key: "{{ LTS_SECRET_KEY }}"
vars:
ansible_python_interpreter: /usr/bin/python3
when: SSH_HOST_KEYS_S3_BUCKET | length > 0 and SSH_HOST_KEYS_S3_OBJECT | length > 0
- name: Unpack SSH host keys to /etc/ssh
ansible.builtin.unarchive:
src: "/tmp/ssh_keys/{{ SSH_HOST_KEYS_S3_OBJECT }}"
dest: "/etc/ssh"
group: root
owner: root
remote_src: yes
become: true
when: SSH_HOST_KEYS_S3_BUCKET | length > 0 and SSH_HOST_KEYS_S3_OBJECT | length > 0
- name: Remove the temporary folder after put in place
ansible.builtin.file:
path: /tmp/ssh_keys
state: absent
- name: Restart SSH service
ansible.builtin.service:
name: sshd
state: restarted
become: true
---
- name: Configure sshpiper yaml plugin
ansible.builtin.template:
src: sshpiperd.yaml.j2
dest: "{{ sshpiper_dest_dir }}/sshpiperd.yaml"
backup: true
- name: Enable and start sshpiper service
ansible.builtin.service:
name: sshpiperd
enabled: true
state: restarted
# yaml-language-server: $schema=https://raw.githubusercontent.com/tg123/sshpiper/master/plugin/yaml/schema.json
version: "1.0"
pipes:
{% for group in target_groups %}
{% if not group.default %}
- from:
- groupname: "{{ group.name }}"
authorized_keys: "{{ group.authorized_keys }}"
to:
host: "{{ group.host }}"
ignore_hostkey: true
private_key: "{{ group.private_key }}"
- from:
- groupname: "{{ group.name }}"
to:
host: "{{ group.host }}"
ignore_hostkey: true
{% else %}
- from:
- username: ".*" # catch all
username_regex_match: true
authorized_keys: "{{ group.authorized_keys }}"
to:
host: "{{ group.host }}"
ignore_hostkey: true
private_key: "{{ group.private_key }}"
- from:
- username: ".*"
username_regex_match: true
to:
host: "{{ group.host }}"
ignore_hostkey: true
{% endif %}
{% endfor %}
---
- name: Download SSL Certs from S3
aws_s3:
mode: get
s3_url: "{{ S3_ENDPOINT }}"
bucket: "{{ ssl_cert_s3_bucket }}"
object: "{{ item }}"
dest: "{{ ssl_cert_file_location }}/{{ item }}"
aws_access_key: "{{ LTS_ACCESS_KEY }}"
aws_secret_key: "{{ LTS_SECRET_KEY }}"
vars:
ansible_python_interpreter: /usr/bin/python3
when: ssl_cert_s3_bucket | length > 0 and item | length > 0
loop:
- "{{ ssl_cert_file }}"
- "{{ ssl_cert_chain_file }}"
- name: Change cert files permissions
ansible.builtin.file:
path: "{{ ssl_cert_file_location }}/{{ item }}"
owner: root
group: root
mode: '0600'
when: ssl_cert_s3_bucket | length > 0 and item | length > 0
loop:
- "{{ ssl_cert_file }}"
- "{{ ssl_cert_chain_file }}"
- name: Download SSL key from S3
aws_s3:
mode: get
s3_url: "{{ S3_ENDPOINT }}"
bucket: "{{ ssl_cert_s3_bucket }}"
object: "{{ ssl_cert_key }}"
dest: "{{ ssl_cert_key_location }}/{{ ssl_cert_key }}"
aws_access_key: "{{ LTS_ACCESS_KEY }}"
aws_secret_key: "{{ LTS_SECRET_KEY }}"
vars:
ansible_python_interpreter: /usr/bin/python3
when: ssl_cert_s3_bucket | length > 0 and ssl_cert_key | length > 0
- name: Change key file permissions
ansible.builtin.file:
path: "{{ ssl_cert_key_location }}/{{ ssl_cert_key }}"
owner: root
group: root
mode: '0400'
when: ssl_cert_s3_bucket | length > 0 and ssl_cert_key | length > 0
- name: Update SSL in Apache config
ansible.builtin.replace:
path: "{{ ssl_apache_config }}"
regexp: "{{ item.regexp }}"
replace: "\\1 {{ item.location }}/{{ item.value }}"
backup: true
when: ssl_apache_config | length > 0 and item.value | length > 0
loop:
- { regexp: "#?(SSLCertificateFile).*$", location: "{{ ssl_cert_file_location }}", value: "{{ ssl_cert_file }}" }
- { regexp: "#?(SSLCertificateChainFile).*$", location: "{{ ssl_cert_file_location }}", value: "{{ ssl_cert_chain_file }}" }
- { regexp: "#?(SSLCertificateKeyFile).*$", location: "{{ ssl_cert_key_location }}", value: "{{ ssl_cert_key }}" }
- name: Restart apache service
ansible.builtin.service:
name: "{{ apache_service }}"
state: restarted
File moved
......@@ -36,11 +36,19 @@ source "openstack" "image" {
build {
sources = ["source.openstack.image"]
provisioner "shell" {
inline = [
"sudo yum install -y libselinux-python3 python3 python3-pip tmux vim git bash-completion curl wget unzip",
"sudo python3 -m pip install --upgrade pip",
"sudo pip3 install s3cmd==2.3.0 ansible==4.10.0 python-openstackclient==5.8.0"
]
}
provisioner "ansible" {
use_proxy = false
user = var.ssh_username
groups = ["compute"]
playbook_file = "./ansible/compute.yml"
playbook_file = "./ansible/login.yml"
roles_path = "./ansible/roles"
extra_arguments = [
"--extra-vars", "root_ssh_key='${var.root_ssh_key}'"
......@@ -53,5 +61,8 @@ build {
groups = ["compute"]
ansible_env_vars = ["ANSIBLE_HOST_KEY_CHECKING=False"]
playbook_file = "./CRI_XCBC/compute-packer.yaml"
extra_arguments = [
"--extra-vars", "${var.extra_vars}"
]
}
}
variable "root_ssh_key" {
type = string
description = "The root key to use for ssh"
default = ""
}
variable "image_name" {
......@@ -87,4 +88,10 @@ variable "volume_size" {
type = number
default = 20
description = "The default volume size for building iamge"
}
\ No newline at end of file
}
variable "extra_vars" {
type = string
default = ""
description = "Extra vars to pass to ansible playbook command"
}
......@@ -53,5 +53,17 @@ build {
groups = ["ood", "knightly"]
ansible_env_vars = ["ANSIBLE_HOST_KEY_CHECKING=False"]
playbook_file = "./CRI_XCBC/ood-packer.yaml"
extra_arguments = [
"--extra-vars", "${var.extra_vars}"
]
}
provisioner "shell" {
inline = [
"sudo yum install -y libselinux-python3 python3 python3-pip tmux vim git bash-completion curl wget unzip",
"sudo python3 -m pip install --upgrade pip",
"sudo pip3 install s3cmd==2.3.0 ansible==4.10.0 python-openstackclient==5.8.0"
]
}
}
variable "root_ssh_key" {
type = string
default = ""
description = "The root key to use for ssh"
}
......@@ -87,4 +88,10 @@ variable "volume_size" {
type = number
default = 20
description = "The default volume size for building iamge"
}
\ No newline at end of file
}
variable "extra_vars" {
type = string
default = ""
description = "Extra vars to pass to ansible playbook command"
}
This contains packer hcl files for creating images. For documentation on packer, see [here](https://www.packer.io/docs); for information about the openstack-specific builder, see [here](https://www.packer.io/plugins/builders/openstack)
packer {
required_plugins {
openstack = {
version = "~> 1"
source = "github.com/hashicorp/openstack"
}
ansible = {
version = "~> 1"
source = "github.com/hashicorp/ansible"
}
}
}
locals {
local_image_name = "${var.image_name}${var.image_date_suffix ? formatdate("-YYYYMMDDHHmm", timestamp()) : ""}"
}
source "openstack" "image" {
skip_create_image = var.skip_create_image
image_name = local.local_image_name
source_image = var.source_image
image_members = var.image_membership
image_auto_accept_members = var.auto_accept_members
image_tags = var.image_tags
image_disk_format = var.image_format
volume_size = var.volume_size
flavor = var.flavor
instance_name = var.build_instance_name
use_blockstorage_volume = true
floating_ip_network = var.floating_ip_network
networks = var.networks
security_groups = var.security_groups
ssh_username = var.ssh_username
}
build {
sources = ["source.openstack.image"]
provisioner "shell" {
inline = [
"sudo yum install -y epel-release",
"sudo dnf config-manager --set-enabled crb",
"sudo yum install -y libselinux-python3 python3 python3-pip tmux vim git bash-completion curl wget unzip httpd",
"sudo python3 -m pip install --upgrade pip",
"sudo pip3 install s3cmd==2.3.0 ansible==4.10.0 python-openstackclient==5.8.0"
]
}
provisioner "ansible" {
use_proxy = false
user = var.ssh_username
groups = ["proxy"]
ansible_env_vars = [
"ANSIBLE_TIMEOUT=60",
"ANSIBLE_HOST_KEY_CHECKING=False",
"ANSIBLE_VERBOSITY=${var.ANSIBLE_VERBOSITY}",
"ANSIBLE_DEBUG=${var.ANSIBLE_DEBUG}",
"ANSIBLE_FORCE_COLOR=true"
]
playbook_file = "./CRI_XCBC/proxy.yaml"
extra_arguments = [
"--extra-vars", "${var.extra_vars}"
]
}
}
variable "root_ssh_key" {
type = string
default = ""
description = "The root key to use for ssh"
}
variable "image_name" {
type = string
default = "cluster-image"
description = "Name of the image in openstack"
}
variable "image_format" {
type = string
default = "qcow2"
description = "The format of the resulting image"
}
variable "image_date_suffix" {
type = bool
default = false
description = "Append a date to the image name (in YYYYMMDDHHMMSS format)"
}
variable "image_tags" {
type = list(string)
default = []
description = "List of tags to be associated to the resulting image"
}
variable "image_membership" {
type = list(string)
default = []
description = "Projects/tenants to share the image in openstack with"
}
variable "auto_accept_members" {
type = bool
default = false
description = "A boolean value for auto accepting image in the projects/tenants defined in image_membership."
}
variable "skip_create_image" {
type = bool
default = false
description = "A boolean value for skipping image creation at the end of the build"
}
variable "source_image" {
type = string
default = ""
description = "The name of the source image to use"
}
variable "flavor" {
type = string
default = ""
description = "The name of the flavor to use"
}
variable "floating_ip_network" {
type = string
default = "uab-campus"
description = "floating ip network to use with (temporary) ip assignmnet to a vm"
}
variable "networks" {
type = list(string)
default = []
description = "List of network UUIDs to assign to the network"
}
variable "security_groups" {
type = list(string)
default = []
description = "A list of security groups to add - you should make sure ssh access is open to the machine"
}
variable "build_instance_name" {
type = string
default = "ood"
description = "A name of build instance used for image build"
}
variable "ssh_username" {
type = string
default = "centos"
description = "The default username to use for SSH"
}
variable "volume_size" {
type = number
default = 20
description = "The default volume size for building iamge"
}
variable "ANSIBLE_DEBUG" {
type = string
default = "false"
description = "to turn on debugging"
}
variable "ANSIBLE_VERBOSITY" {
type = string
default = "0"
description = "to increase verbosity - 0|1|2|3|4"
}
variable "extra_vars" {
type = string
default = ""
description = "Extra vars to pass to ansible playbook command"
}
certifi==2025.1.31
charset-normalizer==3.4.1
idna==3.10
python-gitlab==5.6.0
requests==2.32.3
requests-toolbelt==1.0.0
urllib3==2.4.0
### Description
These utility scripts avoid copying each ci variable manually which is tedious.
- The gitlab-ci-vars-reader.py reads variables from a specific project or a pipeline (depending on the options provided) and copies them into a yaml file
- The gitlab-ci-vars-updater.py takes a yaml file containing key value pairs in yaml format as an input. It then creates/updates project variables or pipeline variables (depending on the options provided)
### Prerequisites
```
python -m venv ~/venvs/gitlab
source ~/venvs/gitlab/bin/activate
pip install -r requirements
```
### Setup
```
cd utils
mv gitlab.ini.example gitlab.ini
```
Make changes to the gitlab.ini as you require.
[Create a personal access token](https://docs.gitlab.com/user/profile/personal_access_tokens/) via the gitlab UI and copy it to the private_token field in gitlab.ini file
### Usage
> Create an empty schedule pipeline before you try this out.
```
python3 gitlab-ci-vars-reader.py --config_file gitlab.ini --project_id <PROJECT_ID> --sched_pipeline_id <PIPELINE_ID> --var_file ci-variables.yaml
python3 gitlab-ci-vars-updater.py --config_file gitlab.ini --project_id <PROJECT_ID> --sched_pipeline_id <NEW-PIPELINE_ID> --var_file ci-variables.yaml
```
import argparse
import gitlab
import yaml
# Function to fetch all CI/CD variables from a GitLab project
def fetch_variables(project):
p_variables = list(project.variables.list(iterator=True))
variables = [var.asdict() for var in p_variables]
return variables
def fetch_sched_variables(sched_pipeline):
variables = sched_pipeline.attributes["variables"]
return variables
# Main function to load the config and fetch variables
def main():
# Setup argument parser
parser = argparse.ArgumentParser(description="GitLab CI/CD Variable reader")
parser.add_argument(
"--config_file",
type=str,
default="gitlab.ini",
required=True,
help="Path to the configuration file (default: gitlab.ini)",
)
parser.add_argument(
"--var_file",
type=str,
default="ci-variables.yaml",
help="Path to the CI vars file (default: ci-variables.yaml)",
)
parser.add_argument(
"--project_name",
type=str,
required=True,
help="Gitlab project name with namespace",
)
parser.add_argument(
"--sched_pipeline_id",
type=int,
help="Gitlab project scheduled pipeline ID",
)
# Parse the arguments
args = parser.parse_args()
gl = gitlab.Gitlab.from_config("uabrc", [args.config_file])
project = gl.projects.get(args.project_name)
# Fetch project or sched pipeline variables
if not args.sched_pipeline_id:
variables = fetch_variables(project)
else:
sched_pipeline = project.pipelineschedules.get(args.sched_pipeline_id)
variables = fetch_sched_variables(sched_pipeline)
try:
with open(args.var_file, mode="wt", encoding="utf-8") as file:
yaml.dump(variables, file, explicit_start=True)
except FileNotFoundError:
print(f"Error: Writing File to '{args.var_file}'")
exit(1)
# Run the main function
if __name__ == "__main__":
main()