Skip to content
Snippets Groups Projects
Commit 7bf68c38 authored by Ravi Tripathi's avatar Ravi Tripathi
Browse files

Merge branch 'cicd' into 'master'

Add Packer build with variables from vars.json

See merge request rc/ohpc-packer!3
parents 1f352387 3ccc6979
No related branches found
No related tags found
1 merge request!3Add Packer build with variables from vars.json
image: gitlab.rc.uab.edu:4567/rc/packer-openstack-hpc-image:latest
variables:
ANSIBLE_REMOTE_TMP: "/tmp"
OS_REGION_NAME: "bhm1"
OS_INTERFACE: "public"
OS_IDENTITY_API_VERSION: "3"
OS_AUTH_TYPE: "v3applicationcredential"
OS_AUTH_URL: "https://keystone.cloud.rc.uab.edu:5000/v3"
PKR_VAR_external_net: "b186e28a-505a-4bcd-bac2-ec01b3916a42"
PKR_VAR_internal_net: "311c0dfc-2dd8-47e6-9ab7-224fad5feb10"
PKR_VAR_floating_ip_network: "uab-campus"
PKR_VAR_instance_floating_ip_net: "b186e28a-505a-4bcd-bac2-ec01b3916a42"
PKR_VAR_build_instance_name: "ohpc"
PKR_VAR_build_image_name: "xdmod"
PKR_VAR_ssh_username: "centos"
PKR_VAR_ssh_keypair_name: "dev-keys"
PKR_VAR_flavor: "m1.medium"
PKR_VAR_source_image_name: "CentOS-7-x86_64-GenericCloud-1905"
GIT_SUBMODULE_STRATEGY: recursive
NUM_IMAGE_TO_KEEP: 5
AWS_DEFAULT_REGION: "bhm"
AWS_HOST: "s3.lts.rc.uab.edu"
stages: # List of stages for jobs, and their order of execution stages: # List of stages for jobs, and their order of execution
- build - build
- test - clean
- deploy
build-job: # This job runs in the build stage, which runs first. build_packer:
stage: build stage: build
tags: [build] tags: [build]
script: script:
- echo "Compiling the code..." - cd CRI_XCBC && git checkout dev && git pull && cd ..
- echo "Compile complete." - cd CRI_Cluster_Monitor && git checkout master && git pull && cd ..
- export PKR_VAR_build_image_name="xdmod-$(date +%Y%m%d%H%M%S)"
- 'sed -i -E "s/(enable_selinix_permissive: ).*/\1true/" CRI_Cluster_Monitor/group_vars/all'
- 'sed -i -E "s/(enable_simplesaml: ).*/\1true/" CRI_Cluster_Monitor/group_vars/all'
- 'sed -i -E "s/(openssl_cert_gen: ).*/\1true/" CRI_Cluster_Monitor/group_vars/all'
- 'sed -i -E "s/(ServerName: ).*/\1xdmod.rc.uab.edu/" CRI_Cluster_Monitor/group_vars/all'
- 'sed -i -E "s/(trusted_url_domains: ).*/\1xdmod.rc.uab.edu/" CRI_Cluster_Monitor/group_vars/all'
- 'sed -i -E "s/(sacct_log_file_path: ).*/\1\/CRI_Cluster_Monitor\/slurm-cluster.log/" CRI_Cluster_Monitor/group_vars/all'
- 'sed -i -E "s/(enable_user_reg: ).*/\1false/" CRI_XCBC/group_vars/all'
- echo $PKR_VAR_s3_endpoint
- packer validate xdmod
- packer build -machine-readable xdmod
unit-test-job: # This job runs in the test stage. clean_up: # This job removes older built images
stage: test # It only starts when the job in the build stage completes successfully. stage: clean # It only starts when the job in the build stage completes successfully.
script: script:
- echo "Running unit tests... This will take about 60 seconds." - |
- sleep 60 IMAGE_TO_BE_DELETE=($(openstack image list --sort-column Name --sort-descending -f value -c Name -c ID | grep -P ' xdmod-\d{14}$' | sed -n $(($NUM_IMAGE_TO_KEEP+1))',$p' | awk '{print $1}'))
- echo "Code coverage is 90%" echo "Images deleted:\n"
for img in ${IMAGE_TO_BE_DELETE[@]}; do
echo ${img}
openstack image delete ${img}
done
lint-test-job: # This job also runs in the test stage.
stage: test # It can run at the same time as unit-test-job (in parallel).
script:
- echo "Linting code... This will take about 10 seconds."
- sleep 10
- echo "No lint issues found."
deploy-job: # This job runs in the deploy stage.
stage: deploy # It only runs when *both* jobs in the test stage complete successfully.
environment: production
script:
- echo "Deploying application..."
- echo "Application successfully deployed."
Subproject commit 15ab55adb45b4fe65dc3bab1b83e51778469903c Subproject commit 95c9fc88850e399ade15c6c3a7becae8f46928af
Subproject commit 87badfd6dd25dbaca51cac6367d135b215b25c76 Subproject commit 10cea5b86f178dcb3bbbe3be144127f2d1731d9a
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
"img_build_version":"", "img_build_version":"",
"ssh_username": "centos", "ssh_username": "centos",
"ssh_keypair_name": "", "ssh_keypair_name": "",
"private_key_file": "~/.ssh/id_rsa",
"ssh_host": "", "ssh_host": "",
"flavor": "m1.medium" "flavor": "m1.medium"
} }
source "openstack" "image" {
flavor = "${var.flavor}"
image_name = "${var.build_image_name}"
floating_ip_network = "${var.floating_ip_network}"
instance_floating_ip_net = "${var.instance_floating_ip_net}"
instance_name = "${var.build_instance_name}"
networks = ["${var.external_net}", "${var.internal_net}"]
reuse_ips = "false"
source_image_name = "${var.source_image_name}"
ssh_keypair_name = "${var.ssh_keypair_name}"
ssh_private_key_file = "${var.private_key_file}"
ssh_username = "${var.ssh_username}"
#pause_before_connecting = "5m"
#ssh_timeout = "10m"
}
build {
sources = ["source.openstack.image"]
provisioner "shell" {
inline = [
"sudo mkdir -p /CRI_XCBC && sudo chown ${var.ssh_username}: /CRI_XCBC",
"sudo mkdir -p /CRI_Cluster_Monitor && sudo chown ${var.ssh_username}: /CRI_Cluster_Monitor",
"sudo yum install -y epel-release", "sudo yum install -y ansible git vim bash-completion",
"sudo yum install -y NetworkManager",
"sudo systemctl restart NetworkManager",
"sudo nmcli con mod 'Wired connection 1' connection.id 'eth1'",
]
}
provisioner "file" {
destination = "/CRI_XCBC/"
source = "CRI_XCBC/"
}
provisioner "file" {
destination = "/CRI_Cluster_Monitor/"
source = "CRI_Cluster_Monitor/"
}
provisioner "shell" {
environment_vars = [
"lts_access_key=${var.lts_access_key}",
"lts_secret_key=${var.lts_secret_key}",
"s3_endpoint=${var.s3_endpoint}",
"s3_shibboleth_bucket_name=${var.s3_shibboleth_bucket_name}",
"s3_shibboleth_object_name=${var.s3_shibboleth_object_name}"
]
inline = [
"sudo -E echo 's3_endpoint: $s3_endpoint'",
"sudo -E ansible-playbook -c local -i /CRI_XCBC/hosts -l `hostname -s` /CRI_XCBC/site-build.yaml -b",
"sudo -E ansible-playbook -c local -i /CRI_XCBC/hosts -l `hostname -s` /CRI_Cluster_Monitor/clustermon.yml -b",
]
}
#provisioner "shell" {
# inline = [
# "sudo rm -rf /CRI_XCBC",
# "sudo rm -rf /CRI_Cluster_Monitor",
# ]
# }
# provisioner "ansible" {
# user = var.ssh_username
# groups = ["headnode"]
# playbook_file = "./CRI_XCBC/site-build.yaml"
# extra_arguments = [
# "-b"
# ]
# }
# provisioner "ansible" {
# user = var.ssh_username
# groups = ["headnode"]
# playbook_file = "./CRI_Cluster_Monitor/clustermon.yml"
# extra_arguments = [
# "-b",
# "-v"
# ]
# }
}
variable "source_image_name" {
type = string
default = "CentOS-7-x86_64-GenericCloud-1905"
description = "Name of the image in openstack"
}
variable "external_net" {
type = string
description = "Name of the external network in openstack"
}
variable "internal_net" {
type = string
description = "Name of the internal network in openstack"
}
variable "floating_ip_network" {
type = string
description = "Floating IP network to be use in openstack build instance"
}
variable "instance_floating_ip_net" {
type = string
description = "Network to associate the floating IP with openstack build instance"
}
variable "build_instance_name" {
type = string
description = "Name of the openstack build instance"
}
variable "build_image_name" {
type = string
description = "Name of the image built on openstack"
}
variable "ssh_username" {
type = string
description = "Username in openstack"
}
variable "ssh_keypair_name" {
type = string
description = "Keypair to be used in openstack build instance"
}
variable "flavor" {
type = string
default = "m1.medium"
description = "Flavor of openstack instance"
}
variable "private_key_file" {
type = string
default = ""
description = "Private key file used in the openstack build instance"
}
variable "lts_access_key" {
type = string
default = ""
description = "lts access key"
}
variable "lts_secret_key" {
type = string
default = ""
description = "lts secret key"
}
variable "s3_endpoint" {
type = string
default = ""
description = "s3 endpoint"
}
variable "s3_shibboleth_bucket_name" {
type = string
default = ""
description = "s3 shibboleth bucket name"
}
variable "s3_shibboleth_object_name" {
type = string
default = ""
description = "s3 shibboleth object name"
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment