Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found

Target

Select target project
  • rc/hpc-factory
  • louistw/hpc-factory
  • jpr/hpc-factory
  • krish94/hpc-factory
  • atlurie/hpc-factory
  • dwheel7/hpc-factory
  • jpr/vm-factory
  • rc/vm-factory
  • krish94/vm-factory
9 results
Show changes
Commits on Source (219)
Showing
with 914 additions and 118 deletions
This diff is collapsed.
[defaults]
# change the default callback, you can only have one 'stdout' type enabled at a time.
#stdout_callback = skippy
stdout_callback = yaml
## Ansible ships with some plugins that require whitelisting,
## this is done to avoid running all of a type by default.
## These setting lists those that you want enabled for your system.
## Custom plugins should not need this unless plugin author specifies it.
# enable callback plugins, they can output to stdout but cannot be 'stdout' type.
callbacks_enabled = timer, debug, profile_roles, profile_tasks, minimal
# Force color
force_color = true
---
- name: Setup node for use as a virtual cheaha node
hosts: all
become: true
roles:
- { name: 'cheaha.node', tags: 'cheaha.node' }
- { name: 'nfs_mounts', tags: 'nfs_mounts', when: enable_nfs_mounts }
- { name: 'ldap_config', tags: 'ldap_config' }
- { name: 'slurm_client', tags: 'slurm_client', when: enable_slurm_client }
- { name: 'ssh_host_keys', tags: 'ssh_host_keys' }
- { name: 'ssh_proxy_config', tags: 'ssh_proxy_config', when: enable_ssh_proxy_config }
- { name: 'ssl_cert', tags: 'ssl_cert', when: enable_ssl_certs }
- { name: 'rsyslog_config', tags: 'rsyslog_config', when: enable_rsyslog_config }
- { name: 'rewrite_map', tags: 'rewrite_map', when: enable_rewrite_map }
- { name: 'fail2ban', tags: 'fail2ban', when: enable_fail2ban }
- { name: 'install_node_exporter', tags: 'install_node_exporter', when: enable_node_exporter }
- { name: 'ood_config', tags: 'ood_config', when: enable_ood_config }
...@@ -28,8 +28,79 @@ ...@@ -28,8 +28,79 @@
ldap_uri: "ldap://ldapserver" ldap_uri: "ldap://ldapserver"
# nfs_mounts related # nfs_mounts related
enable_nfs_mounts: true
use_autofs: false use_autofs: false
use_fstab: false
mount_points: mount_points:
- /gpfs4 - { "src": "master:/gpfs4", "path": "/gpfs4", "opts": "ro,sync,hard", "mode": "0755" }
- /gpfs5 - { "src": "master:/gpfs5", "path": "/gpfs5", "opts": "ro,sync,hard", "mode": "0755" }
autofs_mounts:
- { "src": "master:/gpfs4/&", "path": "/gpfs4", "opts": "fstype=nfs,vers=3,_netdev,default", "mode": '0755', "mount_point": "/gpfs4", "map_name": "gpfs4", key: "*" }
- { "src": "master:/gpfs5/&", "path": "/gpfs5", "opts": "fstype=nfs,vers=3,_netdev,default", "mode": '0755', "mount_point": "/gpfs5", "map_name": "gpfs5", key: "*" }
#SSH Host Keys
S3_ENDPOINT: ""
SSH_HOST_KEYS_S3_BUCKET: ""
SSH_HOST_KEYS_S3_OBJECT: ""
# AWS credentials
LTS_ACCESS_KEY: ""
LTS_SECRET_KEY: ""
# ssh proxy
enable_ssh_proxy_config: false
sshpiper_dest_dir: "/opt/sshpiper"
# rsyslog
enable_rsyslog_config: true
rsyslog_target: "*.* @master:514"
# ssl certs
enable_ssl_certs: false
ssl_cert_s3_bucket: ""
ssl_cert_key_location: "/etc/pki/tls/private"
ssl_cert_file_location: "/etc/pki/tls/certs"
ssl_cert_key: ""
ssl_cert_file: ""
ssl_cert_chain_file: ""
ssl_apache_config: ""
apache_service: "httpd"
# rewrite map
enable_rewrite_map: false
target_groups:
- {"name": "gpfs4", "host": "login001", "default": True }
- {"name": "gpfs5", "host": "login002", "default": False }
# account app
account_app_port: 8000
# fail2ban
enable_fail2ban: false
maxretry: 1
findtime: 600
bantime: 1200
fail2ban_white_list: "127.0.0.1/8"
# Node Exporter
enable_node_exporter: false
node_exporter_ver: "1.8.2"
node_exporter_filename: "node_exporter-{{ node_exporter_ver }}.linux-amd64"
node_exporter_user: node_exporter
node_exporter_group: node_exporter
node_exporter_port: 9100
# CentOS Repo
centos_base_url: "http://vault.centos.org"
# ood_config
enable_ood_config: false
ood_internal_ip: OOD_INTERNAL_IP
ood_hostname: ood-gpfs5
login_hostname: login001
ood_domain: https://rc.uab.edu
account_app: account
account_app_port: 8000
account_app_bind_address: ["0.0.0.0:{{account_app_port}}"]
ood_user_regex: "([^!]+?)(@uab.edu)?$"
cluster_name: CoD
--- ---
# cheaha.node related
hostname_lookup_table: hostname_lookup_table:
- "172.20.0.24 cheaha-master02.cm.cluster cheaha-master02" - "172.20.0.24 cheaha-master02.cm.cluster cheaha-master02"
- "172.20.0.22 cheaha-master01.cm.cluster cheaha-master01" - "172.20.0.22 cheaha-master01.cm.cluster cheaha-master01"
- "172.20.0.25 master.cm.cluster master localmaster.cm.cluster localmaster ldapserver.cm.cluster ldapserver" - "172.20.0.25 master.cm.cluster master localmaster.cm.cluster localmaster ldapserver.cm.cluster ldapserver"
domain_search_list:
- cm.cluster
- rc.uab.edu
- ib.cluster
- drac.cluster
- eth.cluster
- ib-hdr.cluster
nameserver_list:
- 172.20.0.25
bright_openldap_path: "/cm/local/apps/openldap" bright_openldap_path: "/cm/local/apps/openldap"
ldap_cert_path: "{{bright_openldap_path}}/etc/certs" ldap_cert_path: "{{bright_openldap_path}}/etc/certs"
ldap_uri: "ldaps://ldapserver" ldap_uri: "ldaps://ldapserver"
# proxy_config
target_groups:
- {"name": "gpfs5", "host": "login002", "default": False, "authorized_keys":"/gpfs5/data/user/home/$DOWNSTREAM_USER/.ssh/authorized_keys", "private_key":"/gpfs5/data/user/home/$DOWNSTREAM_USER/.ssh/id_ecdsa"}
- {"name": "gpfs4", "host": "login001", "default": True, "authorized_keys":"/gpfs4/data/user/home/$DOWNSTREAM_USER/.ssh/authorized_keys", "private_key":"/gpfs4/data/user/home/$DOWNSTREAM_USER/.ssh/id_ecdsa"}
...@@ -5,8 +5,5 @@ ...@@ -5,8 +5,5 @@
roles: roles:
- { name: 'fix_centos_repo', tags: 'fix_centos_repo' } - { name: 'fix_centos_repo', tags: 'fix_centos_repo' }
- { name: 'install_packages', tags: 'install_packages' } - { name: 'install_packages', tags: 'install_packages' }
- { name: 'pam_slurm_adopt', tags: 'pam_slurm_adopt' }
- { name: 'install_nhc', tags: 'install_nhc'} - { name: 'install_nhc', tags: 'install_nhc'}
- name: Setup node for use as a virtual cheaha node
ansible.builtin.import_playbook: cheaha.yml
...@@ -6,6 +6,3 @@ ...@@ -6,6 +6,3 @@
- { name: 'fix_centos_repo', tags: 'fix_centos_repo' } - { name: 'fix_centos_repo', tags: 'fix_centos_repo' }
- { name: 'install_packages', tags: 'install_packages' } - { name: 'install_packages', tags: 'install_packages' }
- { name: 'install_zsh', tags: 'install_zsh' } - { name: 'install_zsh', tags: 'install_zsh' }
- name: Setup node for use as a virtual cheaha node
ansible.builtin.import_playbook: cheaha.yml
...@@ -11,6 +11,8 @@ ...@@ -11,6 +11,8 @@
path: /etc/dhcp/dhclient.conf path: /etc/dhcp/dhclient.conf
insertbefore: BOF insertbefore: BOF
line: 'append domain-name " cm.cluster rc.uab.edu ib.cluster drac.cluster eth.cluster ib-hdr.cluster";' line: 'append domain-name " cm.cluster rc.uab.edu ib.cluster drac.cluster eth.cluster ib-hdr.cluster";'
create: true
state: present
- name: Template resolv.conf - name: Template resolv.conf
ansible.builtin.template: ansible.builtin.template:
......
---
- name: Install fail2ban
ansible.builtin.package:
name: "{{ item }}"
state: present
loop:
- fail2ban
- fail2ban-firewalld
- name: Configure fail2ban
ansible.builtin.template:
src: "{{ item.src }}"
dest: "{{ item.dest }}"
backup: true
loop:
- { src: 'jail.local.j2', dest: '/etc/fail2ban/jail.local' }
- { src: 'sshpiperd_filter.local.j2', dest: '/etc/fail2ban/filter.d/sshpiperd.local' }
- { src: 'sshpiperd_jail.local.j2', dest: '/etc/fail2ban/jail.d/sshpiperd.local' }
- name: Activate the firewalld support for fail2ban
ansible.builtin.command:
cmd: mv /etc/fail2ban/jail.d/00-firewalld.conf /etc/fail2ban/jail.d/00-firewalld.local
- name: Configure firewalld to allow ssh and sshpiper traffic
ansible.posix.firewalld:
port: "{{ item }}"
zone: public
state: enabled
permanent: true
loop:
- 2222/tcp
- 22/tcp
- name: Enable and start firewalld
ansible.builtin.service:
name: firewalld
enabled: true
state: restarted
- name: Enable and start fail2ban
ansible.builtin.service:
name: fail2ban
enabled: true
state: restarted
[DEFAULT]
banaction = firewalld
bantime = {{ bantime }}
ignoreip = {{ fail2ban_white_list }}
[sshd]
enabled = true
# Refer to https://github.com/fail2ban/fail2ban/wiki/Developing-Regex-in-Fail2ban for developing regex using fail2ban
#
[INCLUDES]
before = common.conf
[DEFAULT]
_daemon = sshpiperd
__iso_datetime = "\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(?:[+-]\d{2}:\d{2}|Z)"
__pref = time=%(__iso_datetime)s level=(?:debug|error)
[Definition]
# Define the prefix regex for the log lines
prefregex = ^<F-MLFID>%(__prefix_line)s%(__pref)s</F-MLFID>\s+<F-CONTENT>.+</F-CONTENT>$
# Failregex to match the specific failure log lines (prefregex is automatically included)
failregex = ^msg="connection from .*failtoban: ip <HOST> too auth many failures"$
ignoreregex =
mode = normal
maxlines = 1
# This configuration will block the remote host after {{maxretry}} failed SSH login attempts.
[sshpiperd]
enabled = true
filter = sshpiperd
logpath = /var/log/messages
port = 22
maxretry = {{ maxretry }}
backend = auto
findtime = {{ findtime }}
...@@ -15,6 +15,6 @@ ...@@ -15,6 +15,6 @@
ansible.builtin.replace: ansible.builtin.replace:
path: "{{ item }}" path: "{{ item }}"
regexp: '^#baseurl=http://mirror.centos.org' regexp: '^#baseurl=http://mirror.centos.org'
replace: 'baseurl=http://vault.centos.org' replace: 'baseurl={{ centos_base_url }}'
backup: yes backup: yes
with_items: "{{ repo_files.stdout_lines }}" with_items: "{{ repo_files.stdout_lines }}"
---
- name: Download node_exporter binary
ansible.builtin.get_url:
url: "https://github.com/prometheus/node_exporter/releases/download/v{{ node_exporter_ver }}/{{ node_exporter_filename }}.tar.gz"
dest: "/tmp/{{ node_exporter_filename }}.tar.gz"
- name: Extract node_exporter
ansible.builtin.unarchive:
src: "/tmp/{{ node_exporter_filename }}.tar.gz"
dest: "/tmp"
remote_src: yes
- name: Create system group for user account {{ node_exporter_group }}
ansible.builtin.group:
name: "{{ node_exporter_group }}"
system: true
state: present
- name: Create system user account {{ node_exporter_user }}
ansible.builtin.user:
name: "{{ node_exporter_user }}"
comment: Prometheus node_exporter system account
group: "{{ node_exporter_group }}"
system: true
home: /var/lib/node_exporter
create_home: false
shell: /sbin/nologin
state: present
- name: Copy node_exporter binary
ansible.builtin.copy:
src: "/tmp/{{ node_exporter_filename }}/node_exporter"
dest: /usr/local/bin/node_exporter
remote_src: yes
owner: root
group: root
mode: 0755
- name: Copy systemd unit file
ansible.builtin.template:
src: node_exporter.service.j2
dest: /etc/systemd/system/node_exporter.service
owner: root
group: root
mode: '0644'
- name: Clean up /tmp
ansible.builtin.file:
path: "/tmp/{{ item }}"
state: absent
loop:
- "{{ node_exporter_filename }}.tar.gz"
- "{{ node_exporter_filename }}"
- name: Restart node_exporter service
ansible.builtin.systemd:
daemon_reload: yes
name: node_exporter
state: restarted
enabled: true
- name: Collect facts about system services
ansible.builtin.service_facts:
- name: Configure firewalld to allow prometheus
ansible.posix.firewalld:
port: "{{ node_exporter_port }}/tcp"
zone: public
state: enabled
permanent: true
when:
- "'firewalld.service' in ansible_facts.services"
- ansible_facts.services["firewalld.service"].state == "running"
- name: Enable and start firewalld
ansible.builtin.service:
name: firewalld
enabled: true
state: restarted
when:
- "'firewalld.service' in ansible_facts.services"
- ansible_facts.services["firewalld.service"].state == "running"
[Unit]
Description=Node Exporter
After=network.target
[Service]
User={{ node_exporter_user }}
Group={{ node_exporter_group }}
Type=simple
ExecStart=/usr/local/bin/node_exporter --web.listen-address=:{{ node_exporter_port }} --collector.filesystem.mount-points-exclude "^/(dev|proc|run/user/.+|run/credentials/.+|sys|var/lib/docker/.+)($|/)" --collector.filesystem.fs-types-exclude "^(autofs|binfmt_misc|bpf|cgroup|tmpfs|sunrpc|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$"
[Install]
WantedBy=multi-user.target
...@@ -7,7 +7,6 @@ ...@@ -7,7 +7,6 @@
- nss-pam-ldapd - nss-pam-ldapd
- openldap - openldap
- openldap-clients - openldap-clients
- openldap-servers
- sssd-ldap - sssd-ldap
- name: Update nsswitch.conf to look for ldap - name: Update nsswitch.conf to look for ldap
......
--- ---
- name: Create base directories - name: Create base directories
ansible.builtin.file: ansible.builtin.file:
path: "{{ item.dir }}" path: "{{ item.path }}"
state: directory state: directory
mode: "{{ item.mode }}" mode: "{{ item.mode }}"
loop: loop:
- { dir: /local, mode: '0777' } - { path: /local, mode: '0777' }
- { dir: /scratch, mode: '0755' } - { path: /share, mode: '0755' }
- { dir: /share, mode: '0755' }
- { dir: /data/rc/apps, mode: '0755' } # this is only required for the symlink to be happy - name: Create mountpoint dirs
- { dir: /data/user, mode: '0755' } ansible.builtin.file:
- { dir: /data/project, mode: '0755' } path: "{{ item.path }}"
state: directory
mode: "{{ item.mode }}"
loop:
"{{ autofs_mounts }}"
- name: Remove unused entry in master map - name: Remove unused entry in master map
ansible.builtin.replace: ansible.builtin.replace:
...@@ -29,12 +33,7 @@ ...@@ -29,12 +33,7 @@
line: "{{ item.mount_point }} /etc/auto.{{ item.map_name }}" line: "{{ item.mount_point }} /etc/auto.{{ item.map_name }}"
create: yes create: yes
loop: loop:
- { mount_point: "/cm/shared", map_name: "cm-share" } "{{ autofs_mounts }}"
- { mount_point: "/data/project", map_name: "data-project" }
- { mount_point: "/data/user", map_name: "data-user" }
- { mount_point: "/data/rc/apps", map_name: "data-rc-apps" }
- { mount_point: "/-", map_name: "scratch" }
- { mount_point: "/home", map_name: "home" }
- name: Set up autofs map files - name: Set up autofs map files
ansible.builtin.lineinfile: ansible.builtin.lineinfile:
...@@ -42,12 +41,7 @@ ...@@ -42,12 +41,7 @@
line: "{{ item.key }} -{{ item.opts }} {{ item.src }}" line: "{{ item.key }} -{{ item.opts }} {{ item.src }}"
create: true create: true
loop: loop:
- { map_name: "cm-share", key: "*", src: "gpfs.rc.uab.edu:/data/cm/shared-8.2/&", opts: "fstype=nfs,vers=3,_netdev,defaults" } "{{ autofs_mounts }}"
- { map_name: "data-project", key: "*", src: "gpfs.rc.uab.edu:/data/project/&", opts: "fstype=nfs,vers=3,_netdev,defaults" }
- { map_name: "data-user", key: "*", src: "gpfs.rc.uab.edu:/data/user/&", opts: "fstype=nfs,vers=3,_netdev,local_lock=posix,defaults" }
- { map_name: "data-rc-apps", key: "*", src: "gpfs.rc.uab.edu:/data/rc/apps/&", opts: "fstype=nfs,vers=3,_netdev,defaults" }
- { map_name: "scratch", key: "/scratch", src: "gpfs.rc.uab.edu:/scratch", opts: "fstype=nfs,vers=3,_netdev,local_lock=posix,defaults" }
- { map_name: "home", key: "*", src: ":/data/user/home/&", opts: 'fstype=bind' }
- name: Create symbolic links - name: Create symbolic links
ansible.builtin.file: ansible.builtin.file:
...@@ -60,7 +54,8 @@ ...@@ -60,7 +54,8 @@
loop: loop:
- { src: /data/rc/apps, dest: /share/apps } - { src: /data/rc/apps, dest: /share/apps }
- name: Enable autofs service - name: Enable and start autofs service
ansible.builtin.service: ansible.builtin.service:
name: autofs name: autofs
enabled: true enabled: true
state: restarted
--- ---
- name: Create base directories - name: Create base directories
ansible.builtin.file: ansible.builtin.file:
path: "{{ item }}" path: "{{ item.path }}"
state: directory state: directory
mode: '0755' mode: "{{ item.mode }}"
loop: loop:
"{{ mount_points }}" "{{ mount_points }}"
- name: Mount the directories - name: Mount the directories
ansible.posix.mount: ansible.posix.mount:
src: "master:{{ item }}" src: "{{ item.src }}"
path: "{{ item }}" path: "{{ item.path }}"
opts: rw,sync,hard opts: "{{ item.opts }}"
state: mounted state: mounted
fstype: nfs fstype: "{{ item.fstype | default('nfs') }}"
loop: loop:
"{{ mount_points }}" "{{ mount_points }}"
--- ---
- name: nfs_mounts using fstab - name: nfs_mounts using fstab
include_tasks: fstab.yml include_tasks: fstab.yml
when: not use_autofs when: use_fstab
- name: nfs_mounts using autofs - name: nfs_mounts using autofs
include_tasks: autofs.yml include_tasks: autofs.yml
......
const fs = require('fs');
const http = require('http');
const path = require('path');
const WebSocket = require('ws');
const express = require('express');
const pty = require('node-pty');
const hbs = require('hbs');
const dotenv = require('dotenv');
const Tokens = require('csrf');
const url = require('url');
const yaml = require('js-yaml');
const glob = require('glob');
const port = 3000;
const host_path_rx = '/ssh/([^\\/\\?]+)([^\\?]+)?(\\?.*)?$';
const helpers = require('./utils/helpers');
const pingInterval = 30000;
// Read in environment variables
dotenv.config({path: '.env.local'});
if (process.env.NODE_ENV === 'production') {
dotenv.config({path: '/etc/ood/config/apps/shell/env'});
}
// Keep app backwards compatible
if (fs.existsSync('.env')) {
console.warn('[DEPRECATION] The file \'.env\' is being deprecated. Please move this file to \'/etc/ood/config/apps/shell/env\'.');
dotenv.config({path: '.env'});
}
// Load color themes
var color_themes = {dark: [], light: []};
glob.sync('./color_themes/light/*').forEach(f => color_themes.light.push(require(path.resolve(f))));
glob.sync('./color_themes/dark/*').forEach(f => color_themes.dark.push(require(path.resolve(f))));
color_themes.json_array = JSON.stringify([...color_themes.light, ...color_themes.dark]);
const tokens = new Tokens({});
const secret = tokens.secretSync();
// Create all your routes
var router = express.Router();
router.get(['/', '/ssh'], function (req, res) {
res.redirect(req.baseUrl + '/ssh/default');
});
router.get('/ssh*', function (req, res) {
var theHost, theDir;
[theHost, theDir] = host_and_dir_from_url(req.url);
res.render('index',
{
baseURI: req.baseUrl,
csrfToken: tokens.create(secret),
host: theHost,
dir: theDir,
colorThemes: color_themes,
siteTitle: (process.env.OOD_DASHBOARD_TITLE || "Open OnDemand"),
});
});
router.use(express.static(path.join(__dirname, 'public')));
// Setup app
var app = express();
// Setup template engine
app.set('view engine', 'hbs');
app.set('views', path.join(__dirname, 'views'));
// Mount the routes at the base URI
app.use(process.env.PASSENGER_BASE_URI || '/', router);
// Setup websocket server
const server = new http.createServer(app);
const wss = new WebSocket.Server({ noServer: true });
let host_allowlist = new Set;
if (process.env.OOD_SSHHOST_ALLOWLIST){
host_allowlist = new Set(process.env.OOD_SSHHOST_ALLOWLIST.split(':'));
}
let default_sshhost, first_available_host;
glob.sync(path.join((process.env.OOD_CLUSTERS || '/etc/ood/config/clusters.d'), '*.y*ml'))
.map(yml => {
try {
return yaml.safeLoad(fs.readFileSync(yml));
} catch(err) { /** just keep going. dashboard should have an alert about it */}
})
.filter(config => (config && config.v2 && config.v2.login && config.v2.login.host) && ! (config.v2 && config.v2.metadata && config.v2.metadata.hidden))
.forEach((config) => {
let host = config.v2.login.host; //Already did checking above
let isDefault = config.v2.login.default;
host_allowlist.add(host);
if (isDefault) default_sshhost = host;
if (!first_available_host) first_available_host = host;
});
default_sshhost = process.env.OOD_DEFAULT_SSHHOST || process.env.DEFAULT_SSHHOST || default_sshhost || first_available_host;
if (default_sshhost) host_allowlist.add(default_sshhost);
function host_and_dir_from_url(url){
let match = url.match(host_path_rx),
hostname = null,
directory = null;
if (match) {
hostname = match[1] === "default" ? default_sshhost : match[1];
directory = match[2] ? decodeURIComponent(match[2]) : null;
}
return [hostname, directory];
}
function heartbeat() {
this.isAlive = true;
}
wss.on('connection', function connection (ws, req) {
var dir,
term,
args,
host,
cmd = process.env.OOD_SSH_WRAPPER || 'ssh';
ws.isAlive = true;
ws.on('pong', heartbeat);
console.log('Connection established');
[host, dir] = host_and_dir_from_url(req.url);
args = dir ? [host, '-t', 'cd \'' + dir.replace(/\'/g, "'\\''") + '\' ; exec ${SHELL} -l'] : [host];
process.env.LANG = 'en_US.UTF-8'; // this patch (from b996d36) lost when removing wetty (2c8a022)
term = pty.spawn(cmd, args, {
name: 'xterm-16color',
cols: 80,
rows: 30
});
console.log('Opened terminal: ' + term.pid);
term.on('data', function (data) {
ws.send(data, function (error) {
if (error) console.log('Send error: ' + error.message);
});
});
term.on('error', function (error) {
ws.close();
});
term.on('close', function () {
ws.close();
});
ws.on('message', function (msg) {
msg = JSON.parse(msg);
if (msg.input) term.write(msg.input);
if (msg.resize) term.resize(parseInt(msg.resize.cols), parseInt(msg.resize.rows));
});
ws.on('close', function () {
term.end();
console.log('Closed terminal: ' + term.pid);
});
});
const interval = setInterval(function ping() {
wss.clients.forEach(function each(ws) {
if (ws.isAlive === false) return ws.terminate();
ws.isAlive = false;
ws.ping();
});
}, pingInterval);
function custom_server_origin(default_value = null){
var custom_origin = null;
if(process.env.OOD_SHELL_ORIGIN_CHECK) {
// if ENV is set, do not use default!
if(process.env.OOD_SHELL_ORIGIN_CHECK.startsWith('http')){
custom_origin = process.env.OOD_SHELL_ORIGIN_CHECK;
}
}
else {
custom_origin = default_value;
}
return custom_origin;
}
function default_server_origin(headers){
var origin = null;
if (headers['x-forwarded-proto'] && headers['x-forwarded-host']){
origin = headers['x-forwarded-proto'] + "://" + headers['x-forwarded-host']
}
return origin;
}
server.on('upgrade', function upgrade(request, socket, head) {
const requestToken = new URLSearchParams(url.parse(request.url).search).get('csrf'),
client_origin = request.headers['origin'],
server_origin = custom_server_origin(default_server_origin(request.headers));
var host, dir;
[host, dir] = host_and_dir_from_url(request.url);
if (client_origin &&
client_origin.startsWith('http') &&
server_origin && client_origin !== server_origin) {
socket.write([
'HTTP/1.1 401 Unauthorized',
'Content-Type: text/html; charset=UTF-8',
'Content-Encoding: UTF-8',
'Connection: close',
'X-OOD-Failure-Reason: invalid origin',
].join('\r\n') + '\r\n\r\n');
socket.destroy();
} else if (!tokens.verify(secret, requestToken)) {
socket.write([
'HTTP/1.1 401 Unauthorized',
'Content-Type: text/html; charset=UTF-8',
'Content-Encoding: UTF-8',
'Connection: close',
'X-OOD-Failure-Reason: bad csrf token',
].join('\r\n') + '\r\n\r\n');
socket.destroy();
} else if (!helpers.hostInAllowList(host_allowlist, host)) { // host not in allowlist
socket.write([
'HTTP/1.1 401 Unauthorized',
'Content-Type: text/html; charset=UTF-8',
'Content-Encoding: UTF-8',
'Connection: close',
'X-OOD-Failure-Reason: host not specified in allowlist or cluster configs',
].join('\r\n') + '\r\n\r\n');
socket.destroy();
} else {
wss.handleUpgrade(request, socket, head, function done(ws) {
wss.emit('connection', ws, request);
});
}
});
server.listen(port, function () {
console.log('Listening on ' + port);
});