378 lines
10 KiB
JavaScript
378 lines
10 KiB
JavaScript
require('require-yaml');
|
|
const Docker = require('dockerode');
|
|
const handlebars = require('handlebars');
|
|
const {NodeSSH} = require('node-ssh');
|
|
const fs = require('fs-extra');
|
|
const shajs = require('sha.js');
|
|
const conf = require('./config.yml');
|
|
const package = require('./package.json');
|
|
|
|
let docker;
|
|
let template;
|
|
let lastInfoHash;
|
|
const appName = package.name;
|
|
const isProduction = process.env.NODE_ENV === 'production';
|
|
const tmpDir = isProduction ? `/tmp/${appName}` : `${__dirname}/tmp`;
|
|
const hashFile = `${tmpDir}/config.hash`;
|
|
const confDir = conf.rproxy.confDir;
|
|
const httpServices = new Map();
|
|
|
|
async function updateProxy(firstRun) {
|
|
console.log('Updating reverse proxy configuration.');
|
|
|
|
// Obtaining Docker settings
|
|
|
|
let info;
|
|
|
|
if (!isProduction) {
|
|
info = require('./test.json');
|
|
} else {
|
|
info = {
|
|
services: await docker.listServices(),
|
|
nodes: await docker.listNodes()
|
|
};
|
|
}
|
|
|
|
const workers = [];
|
|
const managers = [];
|
|
|
|
for (const node of info.nodes) {
|
|
const address = node.ManagerStatus
|
|
? node.ManagerStatus.Addr.split(':')[0]
|
|
: node.Status.Addr;
|
|
const role = node.Spec && node.Spec.Role
|
|
|
|
const nodes = role == 'worker' ? workers : managers;
|
|
nodes.push({
|
|
name: node.Description.Hostname,
|
|
address
|
|
});
|
|
}
|
|
|
|
const http = [];
|
|
const tcp = [];
|
|
const udp = [];
|
|
|
|
for (const service of info.services) {
|
|
const ports = service.Endpoint.Ports;
|
|
if (!Array.isArray(ports) || !ports.length) continue;
|
|
|
|
let name = service.Spec.Name;
|
|
const match = name.match(/^(.+)_main$/);
|
|
if (match) name = match[1];
|
|
|
|
let httpPortIndex = null;
|
|
|
|
if (httpServices.has(name)) {
|
|
if (ports.length > 1) {
|
|
const httpPort = httpServices.get(name) || 80;
|
|
const index = ports.findIndex(
|
|
p => p.TargetPort == httpPort && p.Protocol === 'tcp');
|
|
if (index !== -1) httpPortIndex = index;
|
|
}
|
|
if (httpPortIndex === null) {
|
|
const index = ports.findIndex(p => p.Protocol === 'tcp');
|
|
if (index !== -1) httpPortIndex = index;
|
|
}
|
|
}
|
|
|
|
for (let i = 0; i < ports.length; i++) {
|
|
const port = ports[i];
|
|
let backends;
|
|
|
|
if (i === httpPortIndex)
|
|
backends = http;
|
|
else if (port.Protocol === 'tcp')
|
|
backends = tcp;
|
|
else
|
|
backends = udp;
|
|
|
|
backends.push({
|
|
name,
|
|
port: port.PublishedPort,
|
|
targetPort: port.TargetPort
|
|
});
|
|
}
|
|
}
|
|
|
|
function sortFn(a, b) {
|
|
return a.name > b.name ? 1 : a.name < b.name ? -1 : 0;
|
|
}
|
|
|
|
for (const list of [workers, managers, http, tcp, udp])
|
|
list.sort(sortFn);
|
|
|
|
const configString = template({
|
|
http,
|
|
tcp,
|
|
udp,
|
|
workers,
|
|
managers,
|
|
info
|
|
});
|
|
|
|
// Cheking settings hash
|
|
|
|
const infoHash = shajs('sha256')
|
|
.update(configString)
|
|
.digest('hex');
|
|
console.log('Settings hash:', infoHash);
|
|
|
|
if (lastInfoHash == infoHash && !firstRun) {
|
|
console.log(`Settings haven't changed, aborting.`);
|
|
return;
|
|
}
|
|
|
|
// Creating configuration file
|
|
|
|
const tmpConf = `${tmpDir}/config.cfg`;
|
|
fs.writeFileSync(tmpConf, configString);
|
|
|
|
if (conf.debug) {
|
|
const delimiter = '#' + '='.repeat(79);
|
|
console.log(delimiter);
|
|
console.log(`# ${confDir}`);
|
|
console.log(delimiter);
|
|
console.log(configString);
|
|
console.log(delimiter);
|
|
}
|
|
|
|
// Updating reverse proxies
|
|
|
|
const files = {
|
|
local: tmpConf,
|
|
remote: `${confDir}/haproxy.cfg`
|
|
};
|
|
|
|
for (const host of conf.rproxy.hosts) {
|
|
console.log(`Updating host: ${host}`);
|
|
if (!isProduction) continue;
|
|
|
|
const sshClient = new NodeSSH();
|
|
await sshClient.connect(Object.assign({host}, conf.rproxy.auth));
|
|
await sshClient.putFiles([files]);
|
|
if (firstRun)
|
|
await sshClient.putDirectory(
|
|
`${tmpDir}/maps`,
|
|
`${confDir}/maps`,
|
|
{recursive: true}
|
|
);
|
|
if (conf.rproxy.reloadCmd)
|
|
await sshClient.execCommand(conf.rproxy.reloadCmd);
|
|
await sshClient.dispose();
|
|
}
|
|
|
|
// Saving applied config hash
|
|
|
|
lastInfoHash = infoHash;
|
|
fs.writeFileSync(hashFile, infoHash);
|
|
|
|
console.log('Configuration updated.');
|
|
}
|
|
|
|
(async() => {
|
|
console.log('Initializing.');
|
|
let timeoutId;
|
|
docker = new Docker(conf.docker);
|
|
template = handlebars.compile(fs.readFileSync('rproxy.handlebars', 'utf8'));
|
|
|
|
try {
|
|
fs.mkdirSync(tmpDir);
|
|
} catch (err) {
|
|
if (err.code != 'EEXIST') throw err;
|
|
}
|
|
|
|
if (fs.existsSync(hashFile)) {
|
|
lastInfoHash = fs.readFileSync(hashFile, 'utf8');
|
|
console.log('Saved settings hash:', lastInfoHash);
|
|
}
|
|
|
|
// Fetch backends
|
|
|
|
const hostMap = [];
|
|
const baseMap = [];
|
|
const https = [];
|
|
const zoneMap = [];
|
|
const zones = new Set();
|
|
|
|
for (const domain in conf.domains) {
|
|
const domainConf = conf.domains[domain];
|
|
for (const service in domainConf)
|
|
addService(service, domainConf[service], domain);
|
|
}
|
|
|
|
function addService(service, serviceConf, mainDomain) {
|
|
let rules;
|
|
if (typeof serviceConf == 'string') {
|
|
rules = serviceConf;
|
|
serviceConf = undefined;
|
|
}
|
|
|
|
serviceConf = Object.assign({},
|
|
conf.defaults,
|
|
serviceConf
|
|
);
|
|
|
|
httpServices.set(service, serviceConf.port);
|
|
|
|
if (serviceConf.https)
|
|
https.push(service);
|
|
if (serviceConf.zone) {
|
|
zoneMap.push([service, serviceConf.zone]);
|
|
zones.add(serviceConf.zone);
|
|
}
|
|
|
|
rules = rules || serviceConf.rules;
|
|
|
|
if (!rules)
|
|
rules = service;
|
|
if (!Array.isArray(rules))
|
|
rules = [rules];
|
|
|
|
for (let rule of rules) {
|
|
if (typeof rule == 'string')
|
|
rule = {domain: rule};
|
|
|
|
let domains = rule.domain;
|
|
let paths = rule.path;
|
|
if (!Array.isArray(domains))
|
|
domains = [domains];
|
|
if (!Array.isArray(paths))
|
|
paths = [paths];
|
|
|
|
for (const domain of domains) {
|
|
for (const path of paths) {
|
|
const fullDomain = domain && domain !== '$'
|
|
? `${domain}.${mainDomain}`
|
|
: mainDomain;
|
|
if (!path)
|
|
hostMap.push([fullDomain, service]);
|
|
else
|
|
baseMap.push([fullDomain + path, service]);
|
|
}}
|
|
}
|
|
}
|
|
|
|
// Fetch ACLs
|
|
|
|
const aclMap = [];
|
|
const acls = [];
|
|
|
|
for (const acl in conf.acls) {
|
|
const aclConf = conf.acls[acl];
|
|
|
|
const ips = [];
|
|
for (const ip of aclConf.ips) {
|
|
aclMap.push([ip, acl]);
|
|
ips.push(parseNet(ip));
|
|
}
|
|
|
|
acls.push({
|
|
name: acl,
|
|
ips,
|
|
zones: aclConf.zones === 'all'
|
|
? new Set(zones)
|
|
: new Set(aclConf.zones)
|
|
});
|
|
}
|
|
|
|
function parseNet(net) {
|
|
const netSplit = net.split('/');
|
|
const mask = parseInt(netSplit[1]);
|
|
const ip = netSplit[0].split('.')
|
|
.reduce((ipInt, octet) => (ipInt<<8) + parseInt(octet, 10), 0) >>> 0;
|
|
return {ip, mask};
|
|
}
|
|
|
|
for (const aAcl of acls) {
|
|
for (const aNet of aAcl.ips) {
|
|
for (const bAcl of acls) {
|
|
if (aAcl === bAcl) continue;
|
|
let match = false;
|
|
for (const bNet of bAcl.ips) {
|
|
match = bNet.mask === 0;
|
|
if (bNet.mask > 0 && bNet.mask <= aNet.mask) {
|
|
const netMask = (~0) << (32 - bNet.mask);
|
|
const aSubnet = aNet.ip & netMask;
|
|
const bSubnet = bNet.ip & netMask;
|
|
match = aSubnet === bSubnet;
|
|
}
|
|
if (match) break;
|
|
}
|
|
if (match) {
|
|
for (const zone of bAcl.zones)
|
|
aAcl.zones.add(zone);
|
|
}
|
|
}}}
|
|
|
|
const accessMap = [];
|
|
for (const acl of acls)
|
|
for (const zone of acl.zones)
|
|
accessMap.push(`${acl.name}/${zone}`);
|
|
|
|
// Generate maps
|
|
|
|
const files = {
|
|
host: hostMap,
|
|
base: baseMap,
|
|
zone: zoneMap,
|
|
acl: aclMap,
|
|
access: accessMap,
|
|
https: https
|
|
};
|
|
|
|
function strSortFn(a, b) {
|
|
return a[0] < b[0] ? -1 : a[0] > b[0] ? 1 : 0;
|
|
}
|
|
function netSortFn(a, b) {
|
|
const aMask = parseInt(a[0].split('/')[1], 10);
|
|
const bMask = parseInt(b[0].split('/')[1], 10);
|
|
return bMask - aMask;
|
|
}
|
|
|
|
const mapDir = `${tmpDir}/maps`;
|
|
if (await fs.pathExists(mapDir))
|
|
await fs.remove(mapDir, {recursive: true});
|
|
await fs.mkdir(mapDir);
|
|
|
|
for (const file in files) {
|
|
files[file].sort(file == 'acl'
|
|
? netSortFn
|
|
: strSortFn
|
|
);
|
|
|
|
const fd = await fs.open(`${mapDir}/${file}.map`, 'w+');
|
|
for (const map of files[file]) {
|
|
if (Array.isArray(map))
|
|
await fs.write(fd, `${map[0]} ${map[1]}\n`);
|
|
else
|
|
await fs.write(fd, `${map}\n`);
|
|
}
|
|
await fs.close(fd);
|
|
}
|
|
|
|
// Initalize
|
|
|
|
await updateProxy(true);
|
|
|
|
console.log('Listening for events.')
|
|
docker.getEvents({}, (err, stream) => {
|
|
if (err || !stream) {
|
|
console.error('Failed to monitor docker host', err);
|
|
return;
|
|
}
|
|
|
|
stream.on('data', event => {
|
|
event = JSON.parse(event);
|
|
if (conf.events && conf.events.indexOf(event.Type) == -1) return;
|
|
console.log(`Event: ${event.Type}: ${event.Action}`);
|
|
|
|
if (timeoutId) return;
|
|
timeoutId = setTimeout(async () => {
|
|
timeoutId = null;
|
|
await updateProxy();
|
|
}, conf.delay * 1000);
|
|
})
|
|
});
|
|
})();
|