aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKristian Lyngstol <kly@kly.no>2019-01-29 21:48:54 +0100
committerKristian Lyngstol <kly@kly.no>2019-01-29 21:48:54 +0100
commit68d31e02b28487cb5dd552c68efd10b4973f4169 (patch)
tree24eb002a4966b9ce47aef6c8bcc4c891cb1bb97d
parent3b1ff674784205218c215212fd19d9cffd2ac708 (diff)
parent4306bc4f9c5ff40a5d56f700a2d753345188605f (diff)
Merge branch 'master' of github.com:tech-server/gondul
-rw-r--r--INSTALLING.rst8
-rw-r--r--ansible/roles/common/tasks/main.yml9
-rw-r--r--ansible/roles/influx/tasks/main.yml4
-rw-r--r--ansible/roles/postgres/files/postgresql.conf10
-rw-r--r--ansible/roles/postgres/tasks/main.yml26
-rw-r--r--ansible/roles/snmp/tasks/main.yml6
-rw-r--r--ansible/roles/web/files/varnish.vcl4
-rw-r--r--ansible/roles/web/handlers/main.yml15
-rw-r--r--ansible/roles/web/tasks/main.yml68
-rw-r--r--build/test/gondul-templating-test.Dockerfile6
-rw-r--r--doc/gondul-git-split.rst62
-rw-r--r--doc/gondul-receiver.rst185
-rw-r--r--templating/requirements.txt4
-rwxr-xr-xtemplating/templating.py180
14 files changed, 420 insertions, 167 deletions
diff --git a/INSTALLING.rst b/INSTALLING.rst
index 2a4bcb8..b04e226 100644
--- a/INSTALLING.rst
+++ b/INSTALLING.rst
@@ -4,8 +4,8 @@ Installing Gondul
Requirements
------------
-- Debian Stable (jessie) with backports (possibly newer)
-- Ansible v2.1 or newer (recommended: from backports)
+- Debian Stable (stretch) with backports (possibly newer)
+- Ansible v2.7 or newer (recommended: from backports)
- A harddrive of some size. Recommended: SSD. 200GB should be sufficient
for almost any party.
- CPU: Depends on client-load. Most semi-modern cpu's will be more than
@@ -27,9 +27,9 @@ As root:
# YOURUSER=kly
# apt-get install sudo git
# echo ${YOURUSER} ALL=NOPASSWD: ALL >> /etc/sudoers
- # echo deb http://http.debian.net/debian jessie-backports main non-free contrib > /etc/apt/sources.list.d/bp.list
+ # echo deb http://http.debian.net/debian stretch-backports main non-free contrib > /etc/apt/sources.list.d/bp.list
# apt-get update
- # apt-get install ansible/jessie-backports
+ # apt-get install ansible/stretch-backports
As ``$YOURUSER``::
diff --git a/ansible/roles/common/tasks/main.yml b/ansible/roles/common/tasks/main.yml
index 562af85..4c27c32 100644
--- a/ansible/roles/common/tasks/main.yml
+++ b/ansible/roles/common/tasks/main.yml
@@ -1,3 +1,12 @@
+- name: Install basic packages
+ apt:
+ name: [
+ 'curl',
+ 'vim',
+ 'git',
+ 'iptables-persistent']
+ state: present
+
- name: Gondul-repo
become: true
tags:
diff --git a/ansible/roles/influx/tasks/main.yml b/ansible/roles/influx/tasks/main.yml
index 236c2a9..cb3a784 100644
--- a/ansible/roles/influx/tasks/main.yml
+++ b/ansible/roles/influx/tasks/main.yml
@@ -1,9 +1,7 @@
- name: Install apt-packages
apt:
- name: "{{ item }}"
+ name: 'apt-transport-https'
state: present
- with_items:
- apt-transport-https
- name: Import InfluxDB GPG signing key
apt_key: url=https://repos.influxdata.com/influxdb.key state=present
diff --git a/ansible/roles/postgres/files/postgresql.conf b/ansible/roles/postgres/files/postgresql.conf
index cf6e67e..7c45ea4 100644
--- a/ansible/roles/postgres/files/postgresql.conf
+++ b/ansible/roles/postgres/files/postgresql.conf
@@ -38,15 +38,15 @@
# The default values of these variables are driven from the -D command-line
# option or PGDATA environment variable, represented here as ConfigDir.
-data_directory = '/var/lib/postgresql/9.4/main' # use data in another directory
+data_directory = '/var/lib/postgresql/9.6/main' # use data in another directory
# (change requires restart)
-hba_file = '/etc/postgresql/9.4/main/pg_hba.conf' # host-based authentication file
+hba_file = '/etc/postgresql/9.6/main/pg_hba.conf' # host-based authentication file
# (change requires restart)
-ident_file = '/etc/postgresql/9.4/main/pg_ident.conf' # ident configuration file
+ident_file = '/etc/postgresql/9.6/main/pg_ident.conf' # ident configuration file
# (change requires restart)
# If external_pid_file is not explicitly set, no extra PID file is written.
-external_pid_file = '/var/run/postgresql/9.4-main.pid' # write an extra PID file
+external_pid_file = '/var/run/postgresql/9.6-main.pid' # write an extra PID file
# (change requires restart)
@@ -450,7 +450,7 @@ log_timezone = 'UTC'
#track_functions = none # none, pl, all
#track_activity_query_size = 1024 # (change requires restart)
#update_process_title = on
-stats_temp_directory = '/var/run/postgresql/9.4-main.pg_stat_tmp'
+stats_temp_directory = '/var/run/postgresql/9.6-main.pg_stat_tmp'
# - Statistics Monitoring -
diff --git a/ansible/roles/postgres/tasks/main.yml b/ansible/roles/postgres/tasks/main.yml
index 9adeafe..902eeb9 100644
--- a/ansible/roles/postgres/tasks/main.yml
+++ b/ansible/roles/postgres/tasks/main.yml
@@ -1,14 +1,13 @@
+# Postgres
+# https://github.com/ansible/ansible/issues/16048#issuecomment-229012509
+#
- name: Install db-packages
apt:
- name: "{{ item }}"
+ name: ['postgresql', 'python-psycopg2', 'sudo']
state: present
- with_items:
- - postgresql-9.4
- - python-psycopg2
- - sudo
- name: Drop postgresql-config
copy:
- dest: /etc/postgresql/9.4/main/postgresql.conf
+ dest: /etc/postgresql/9.6/main/postgresql.conf
src: postgresql.conf
- name: Add db to hosts
lineinfile:
@@ -25,15 +24,24 @@
state: present
line: "{{ whoami.stdout }} ALL=(postgres) NOPASSWD: ALL"
- name: Make postgres-db
- become_user: postgres
postgresql_db:
name: nms
-- name: Ensure a valid postgres-user
+ become: true
become_user: postgres
+ vars:
+ ansible_ssh_pipelining: true
+- name: Ensure a valid postgres-user
postgresql_user:
db: nms
name: nms
password: risbrod
-- name: Import SQL
+ become: true
become_user: postgres
+ vars:
+ ansible_ssh_pipelining: true
+- name: Import SQL
shell: psql nms < /opt/gondul/ansible/roles/postgres/files/schema.sql
+ become: true
+ become_user: postgres
+ vars:
+ ansible_ssh_pipelining: true
diff --git a/ansible/roles/snmp/tasks/main.yml b/ansible/roles/snmp/tasks/main.yml
index 2cb7165..6056a25 100644
--- a/ansible/roles/snmp/tasks/main.yml
+++ b/ansible/roles/snmp/tasks/main.yml
@@ -1,8 +1,10 @@
-- file:
+- name: Create data directory
+ file:
path: /opt/gondul/data
state: directory
mode: 0755
-- stat:
+- name: register mibdir
+ stat:
path: /opt/gondul/data/mibs
register: mibdir
- name: Get mibs
diff --git a/ansible/roles/web/files/varnish.vcl b/ansible/roles/web/files/varnish.vcl
index b082971..2cdbbfe 100644
--- a/ansible/roles/web/files/varnish.vcl
+++ b/ansible/roles/web/files/varnish.vcl
@@ -3,12 +3,12 @@
vcl 4.0;
backend default {
- .host = "localhost";
+ .host = "::1";
.port = "8080";
}
backend influx {
- .host = "localhost";
+ .host = "::1";
.port = "8086";
}
diff --git a/ansible/roles/web/handlers/main.yml b/ansible/roles/web/handlers/main.yml
index 3f71f4c..5d36f51 100644
--- a/ansible/roles/web/handlers/main.yml
+++ b/ansible/roles/web/handlers/main.yml
@@ -1,7 +1,16 @@
---
- name: restart apache
- service: name=apache2 state=restarted
+ systemd:
+ state: restarted
+ daemon_reload: yes
+ name: apache2
+
- name: restart varnish
- service: name=varnish state=restarted
+ systemd:
+ state: restarted
+ daemon_reload: yes
+ name: varnish
+
- name: reload systemd
- command: systemctl daemon-reload
+ systemd:
+ daemon_reload: yes
diff --git a/ansible/roles/web/tasks/main.yml b/ansible/roles/web/tasks/main.yml
index dce3f4c..1c89947 100644
--- a/ansible/roles/web/tasks/main.yml
+++ b/ansible/roles/web/tasks/main.yml
@@ -1,39 +1,38 @@
- name: Install front-packages
apt:
- name: "{{ item }}"
+ name: [
+ 'libcapture-tiny-perl',
+ 'libcommon-sense-perl',
+ 'libdata-dumper-simple-perl',
+ 'libdbd-pg-perl',
+ 'libdbi-perl',
+ 'libdigest-perl',
+ 'libgd-perl',
+ 'libgeo-ip-perl',
+ 'libhtml-parser-perl',
+ 'libhtml-template-perl',
+ 'libjson-perl',
+ 'libjson-xs-perl',
+ 'libnetaddr-ip-perl',
+ 'libnet-cidr-perl',
+ 'libnet-ip-perl',
+ 'libnet-oping-perl',
+ 'libnet-rawip-perl',
+ 'libsnmp-perl',
+ 'libsocket6-perl',
+ 'libsocket-perl',
+ 'libswitch-perl',
+ 'libtimedate-perl',
+ 'perl',
+ 'perl-base',
+ 'perl-modules',
+ 'libfreezethaw-perl',
+ 'apache2',
+ 'libxml2-dev',
+ 'build-essential',
+ 'cpanminus',
+ 'apt-transport-https']
state: present
- with_items:
- - libcapture-tiny-perl
- - libcommon-sense-perl
- - libdata-dumper-simple-perl
- - libdbd-pg-perl
- - libdbi-perl
- - libdigest-perl
- - libgd-perl
- - libgeo-ip-perl
- - libhtml-parser-perl
- - libhtml-template-perl
- - libjson-perl
- - libjson-xs-perl
- - libnetaddr-ip-perl
- - libnet-cidr-perl
- - libnet-ip-perl
- - libnet-oping-perl
- - libnet-rawip-perl
- - libsnmp-perl
- - libsocket6-perl
- - libsocket-perl
- - libswitch-perl
- - libtimedate-perl
- - perl
- - perl-base
- - perl-modules
- - libfreezethaw-perl
- - apache2
- - libxml2-dev
- - build-essential
- - cpanminus
- - apt-transport-https
- name: Add packagecloud.io Varnish apt key.
apt_key:
@@ -85,5 +84,6 @@
ignore_errors: true
notify: restart apache
-- cpanm:
+- name: Install InfluxDB module
+ cpanm:
name: AnyEvent::InfluxDB
diff --git a/build/test/gondul-templating-test.Dockerfile b/build/test/gondul-templating-test.Dockerfile
index 80c09c0..6dabc9c 100644
--- a/build/test/gondul-templating-test.Dockerfile
+++ b/build/test/gondul-templating-test.Dockerfile
@@ -1,7 +1,9 @@
FROM debian:jessie
RUN apt-get update
-RUN apt-get -y install \
- python3-jinja2 \
+RUN apt-get -y install \
+ python3-jinja2 \
+ python3-netaddr \
+ python3-flask \
python3-requests
RUN mkdir -p /opt/gondul
diff --git a/doc/gondul-git-split.rst b/doc/gondul-git-split.rst
new file mode 100644
index 0000000..e4fd155
--- /dev/null
+++ b/doc/gondul-git-split.rst
@@ -0,0 +1,62 @@
+================
+Ny repo-struktur
+================
+
+Motivasjon
+==========
+
+Vi ønsker å dele opp gondul-repoet i mindre blokker, hensikten bak splitten
+er delt:
+
+- Klarere skille mellom ellers uavhengige komponenter
+- Enklere utvikling for alle
+- Deployment forenkles ved å ha det i et eget repo, og at hvert repo kan
+ levere en faktisk pakke som installeres om ønskelig.
+- Lettere å fryse enkelt-komponenter i forkant av arrangement.
+
+
+
+Nye repo
+========
+
+- Templating
+- Front - inkluderer web/{js,img,fonts,css} og web/index.html
+- lib - inkluderer include/ - Målet er nok å endre denne, da det ikke
+ egentlig er voldsomt overlapp mellom API og collectors, men inntill
+ videre er det eget repo.
+- api - inkluderer web/api
+- collectors - inkluderer collectors/
+- gondul/ - inkluderer ansible, dokumentasjon, default config.
+
+På sikt er målet at API er det eneste som snakker med postgres, men inntill
+videre vil fortsatt collectors snakke direkte. Collectors kan i prinsippet
+deles yttligere opp om det ønskes, men det blir mye små-repoer.
+
+Navn:
+
+- gondul-templating
+- gondul-frontend
+- gondul-api
+- gondul-collectors
+- gondul
+
+Repoet som da heter "gondul" blir "master-repo" og et slags
+integrasjonsrepo. Det kan potensielt bli delt mer på sikt for å skille
+ansible-saker fra dokumentasjon mm. Dette venter vi med for å unngå
+usedvanlig mye fragmentering.
+
+Installasjon
+============
+
+Alt installeres default i /opt/$gondul-repo - Det vil være opp til
+master-repoet å binde ting sammen. Det vil typisk bety at apache/nginx
+settes opp for å levere statisk innhold for front og i dag levere CGI for
+API, templating settes opp på egen port - Varnish vil da sørge for faktisk
+ruting.
+
+Alt av "deployment" legges i "gondul"-repoet, men hver enkelt repo kan også
+ønske å levere rutiner for isolert installasjon av typen som hører hjemme i
+for eksempel en python-pakke eller debian-pakke.
+
+
+
diff --git a/doc/gondul-receiver.rst b/doc/gondul-receiver.rst
new file mode 100644
index 0000000..e663e19
--- /dev/null
+++ b/doc/gondul-receiver.rst
@@ -0,0 +1,185 @@
+
+================================
+API for receiving time-base data
+================================
+
+Background
+==========
+
+Toda, Gondul has three different "collectors". The ping-collector, the
+snmp-collector and the dhcp log-tailer.
+
+They all write data directly to the postgres backend.
+
+Over the years we've tried different methods of storing time series data for
+actual graphs. To support this, we've stored some data in two sources. Most
+recently, we've stored stuff in postgres and influxdb.
+
+In addition to actually storing this data in different locations, we some
+times need to "massage" data, or change the database schema. A prime example
+for SNMP is to actually establish a tree-structure for port-data by picking
+up ifTable and ifXTable and building a "ports"-tree using ifIndex. An other
+example is normalization of MAC addresses for example.
+
+We also need to do something with Virtual Chassis MIBs.
+
+While we've been able to do all this, the fact that these collectors all
+write directly to the postgres database creates a strong cross-dependency
+between the collectors, the database schema and the API. It has also created
+a strong depenendency to time series database tools.
+
+This has made it difficult to safely experiment with enriching input-data
+without introducing critical bugs in collectors or breaking the north-bound
+API.
+
+This document outlines a way to reduce this problem.
+
+Concept
+=======
+
+The concept is to create a generic "time-based" API for all time-oriented
+data. The API will cover high-frequency producers like the ping collector,
+but also low-frequency producers like the operations log, or the DHCP
+tailer.
+
+While the API will be generic, it will provide just enough data to allow the
+receiver to identify the type of data and apply enrichments to it and thus
+treat it diffrently. By default, the data posted will just be written to
+postgres, but through enrichment add-ons, we can also chose to split a
+single SNMP poll into multiple entries (e.g.: individual entries for virtual
+chassis nodes), or re-arrange the data to produce interface-mapping.
+
+The enrichment will also be able to do basically anything with the data,
+including sending it to multiple other APIs - e.g. influx.
+
+While the first version does not deal with authentication, future versions
+should.
+
+Core API
+========
+
+The core API accepts N or more metrics in a single post.
+
+The core of the API will accept 3 fields:
+
+- `source` - a text-string identifying the source of the data, e.g. "dhcp",
+ "ping", "snmp". This should be sent either as a json text field, or as
+ part of the url. E.g., allow posting to
+ ``https://user:pass@gondul/api/write/gtimes/dhcp`` . The benefit of
+ linking this with the URL is that it will simplify authentication in the
+ future, allowing "write-only" accounts.
+- `metadata` - this is a generic JSON object that contain a number of fields
+ that will be indexed upon or used by enrichment. Example: ``{ "server":
+ "dhcpserver1", "time": "2019-01-05T15:00:10Z" }``.
+- `data` - an array of json-objects. Each object in the array must either
+ have a "time" field or the "metadata"-field must have a time field.
+
+
+Examples
+========
+
+Example 1, dhcp::
+
+ {
+ src: "dhcp",
+ metadata: {
+ server: "dhcpserver1"
+ },
+ data: [
+ {
+ type: "assignment",
+ time: "2001-01-01T15:12:01Z",
+ ip: "2001:db8::1",
+ circuit: "vlan123:e3-1:ge-0/0/1",
+ msg: "blatti foo"
+ },
+ {
+ type: "renew",
+ time: "2001-01-01T15:32:01Z",
+ ip: "2001:db8::1",
+ circuit: "vlan123:e3-1:ge-0/0/1",
+ msg: "blatti foo something"
+ }
+ ]
+ }
+
+Example 2, ping::
+
+ {
+ "src": "ping",
+ "metadata": {
+ "time": "2019-05-01T15:01:12Z"
+ },
+ "data": [
+ { "s": "e1-3", "l": 0.91211 },
+ { "s": "e1-2", "l": 0.12211 },
+ { "s": "e1-1", "l": 0.12311 },
+ { "s": "e3-1", "l": 1.12111 },
+ { "s": "e3-2", "l": null },
+ { "s": "e3-3", "l": 0.91211 },
+ { "s": "e3-4", "l": 0.91211 }
+ ]
+ }
+
+Example 3, oplog::
+
+ {
+ "src": "oplog",
+ "data": [
+ {
+ "system": "floor",
+ "user": "kristian",
+ "message": "lol",
+ "time": "2019-04-19T15:00:10Z"
+ }
+ ]
+ }
+
+Note that "metadata" is optional.
+
+Implementation plan
+===================
+
+The plan would be to start small. The first candidate is the dhcp log
+tailer, which needs to support IPv6 and thus needs a change.
+
+The first implementation would be a "hard-coded" perl API since that is what
+we already have. There is no current plan to migrate other producers to the
+new API at this time.
+
+The first implementation would not offer much in the way of generic storage
+for other users than the dhcp collector.
+
+Since particularly the ping collector can produce quite a lot of data, some
+care might be needed to support it. This will most likely require a
+different apporach than the old CGI-based perl way of doing things.
+
+To allow a flexible enrichment-scheme, it might be necessarry to implement a
+separate service in a more modern language. There are currently three worthy
+alternatives:
+
+Node.js has the benefit of using JavaScript which is already heavily used in
+Gondul, and is fairly fault-tolerant. There are also already plans to
+utilize node.js to do server-side parsing of health data. However, I'm
+unsure if it offers the speed or integration we need.
+
+Python is an other alternative, which is also already used. It is slightly
+more mature than Node.js, but also doesn't really offer much else.
+
+The third alternative is Go, which will certainly provide us with the speed
+we need, but might not allow the development pace we require during an
+event.
+
+No conclusion is offered and at any rate, no plans to actually implement
+such a service exist nor should one be planned until we have more experience
+from the DHCP-collector implementation.
+
+Storage
+=======
+
+Storage is deliberately left OUT of the API definition, but for
+implementation-purposes we should assume postgres as the primary target with
+influx as a sencodary target. Details of how this is done is intentionally
+left out of this document as this should not be relevant to any user of the
+API.
+
diff --git a/templating/requirements.txt b/templating/requirements.txt
new file mode 100644
index 0000000..20e96a0
--- /dev/null
+++ b/templating/requirements.txt
@@ -0,0 +1,4 @@
+jinja2
+requests
+flask
+netaddr
diff --git a/templating/templating.py b/templating/templating.py
index fff5d42..03b7dc9 100755
--- a/templating/templating.py
+++ b/templating/templating.py
@@ -1,115 +1,89 @@
-#!/usr/bin/env python3
+#!/usr/bin/python3
+
+import argparse
+import traceback
+import sys
-import requests,traceback
-from jinja2 import Template,Environment,FileSystemLoader,TemplateNotFound
-import json
import netaddr
-import http.server
-from enum import Enum
+import requests
+
+from flask import Flask, request
+from jinja2 import Environment, FileSystemLoader, TemplateNotFound
endpoints = "read/networks read/oplog read/snmp read/switches-management public/distro-tree public/config public/dhcp public/dhcp-summary public/ping public/switches public/switch-state".split()
-objects = dict()
+
+objects = {}
+
def getEndpoint(endpoint):
- r = requests.get("http://localhost:80/api/%s" % endpoint)
- if (r.status_code != 200):
- raise Exception("Bad status code for endpoint %s: %s" % (endpoint, r.status_code))
+ r = requests.get("http://localhost:80/api/{}".format(endpoint))
+ if r.status_code != 200:
+ raise Exception("Bad status code for endpoint {}: {}".format(endpoint, r.status_code))
return r.json()
+
def updateData():
for a in endpoints:
objects[a] = getEndpoint(a)
-def netmask(ip):
- return netaddr.IPNetwork(ip).netmask
-def cidr(ip):
- return netaddr.IPNetwork(ip).prefixlen
-def networkId(ip):
- return netaddr.IPNetwork(ip).ip
-def getFirstDhcpIp(ip):
- return netaddr.IPNetwork(ip)[3]
-def getLastDhcpIp(ip):
- return netaddr.IPNetwork(ip)[-1]
-def getDistro(src):
- return src.split(":")[0]
-def getPort(src):
- return src.split(":")[1]
-def getFirstFapIp(ip):
- return netaddr.IPNetwork(ip)[netaddr.IPNetwork(ip).size/2]
-
-env = Environment(loader=FileSystemLoader(['templates/','/opt/gondul/data/templates', '/opt/gondul/web/templates']), trim_blocks=True)
-
-env.filters["netmask"] = netmask
-env.filters["cidr"] = cidr
-env.filters["networkId"] = networkId
-env.filters["getFirstDhcpIp"] = getFirstDhcpIp
-env.filters["getLastDhcpIp"] = getLastDhcpIp
-env.filters["agentDistro"] = getDistro
-env.filters["agentPort"] = getPort
-env.filters["getFirstFapIP"] = getFirstFapIp
-
-class Mode(Enum):
- Get = 1
- Post = 2
-
-class MyHandler(http.server.BaseHTTPRequestHandler):
-
- options = dict()
-
- def parse_options(self):
- self.url = self.path[1:]
- self.options = dict()
- if self.url.find("?") != -1:
- (self.url, tmpoptions) = self.url.split("?")
- tmptuples = tmpoptions.split("&")
- for a in tmptuples:
- (x,y) = a.split("=")
- self.options[x] = y
-
- def generic(self, mode):
- updateData()
- self.parse_options()
- body = ""
- try:
- if mode == Mode.Get:
- template = env.get_template(self.url)
- elif mode == Mode.Post:
- length = self.headers.get('content-length')
- if not length:
- length = 0
- content = self.rfile.read(int(length)).decode('UTF-8')
- template = env.from_string(content)
- else:
- raise Exception("Invalid mode")
-
- body = template.render(objects=objects, options=self.options)
- self.send_response(200)
- except TemplateNotFound as err:
- body = "Template \"%s\" not found\n" % self.url
- self.send_response(404)
- except Exception as err:
- body = ("Templating of \"%s\" failed to render. Most likely due to an error in the template. Error transcript:\n\n%s\n----\n\n%s\n" % (self.url, err, traceback.format_exc()))
- if mode == Mode.Get:
- self.send_response(400)
- else:
- self.send_response(500)
- finally:
- self.send_header('Cache-Control','max-age=5, s-maxage=1')
- body = body.encode('UTF-8')
- self.send_header('Content-Length', int(len(body)))
- self.end_headers()
- self.wfile.write(body)
- self.wfile.flush()
-
- def do_GET(self):
- self.generic(Mode.Get)
-
- def do_POST(self):
- self.generic(Mode.Post)
-
-def run(server_class=http.server.HTTPServer, handler_class=http.server.BaseHTTPRequestHandler):
- server_address = ('localhost', 8081)
- httpd = server_class(server_address, handler_class)
- httpd.serve_forever()
-
-run(handler_class=MyHandler)
+
+env = Environment(loader=FileSystemLoader([]), trim_blocks=True)
+
+env.filters["netmask"] = lambda ip: netaddr.IPNetwork(ip).netmask
+env.filters["cidr"] = lambda ip: netaddr.IPNetwork(ip).prefixlen
+env.filters["networkId"] = lambda ip: netaddr.IPNetwork(ip).ip
+env.filters["getFirstDhcpIp"] = lambda ip: netaddr.IPNetwork(ip)[3]
+env.filters["getLastDhcpIp"] = lambda ip: netaddr.IPNetwork(ip)[-1]
+env.filters["agentDistro"] = lambda src: src.split(":")[0]
+env.filters["agentPort"] = lambda src: src.split(":")[1]
+env.filters["getFirstFapIP"] = lambda ip: netaddr.IPNetwork(ip)[netaddr.IPNetwork(ip).size / 2]
+
+app = Flask(__name__)
+
+
+@app.after_request
+def add_header(response):
+ if response.status_code == 200:
+ response.cache_control.max_age = 5
+ response.cache_control.s_maxage = 1
+ return response
+
+
+@app.route("/<path>", methods=["GET"])
+def root_get(path):
+ updateData()
+ try:
+ template = env.get_template(path)
+ body = template.render(objects=objects, options=request.args)
+ except TemplateNotFound:
+ return 'Template "{}" not found\n'.format(path), 404
+ except Exception as err:
+ return 'Templating of "{}" failed to render. Most likely due to an error in the template. Error transcript:\n\n{}\n----\n\n{}\n'.format(path, err, traceback.format_exc()), 400
+ return body, 200
+
+
+@app.route("/<path>", methods=["POST"])
+def root_post(path):
+ updateData()
+ try:
+ content = request.stream.read(int(request.headers["Content-Length"]))
+ template = env.from_string(content.decode("utf-8"))
+ body = template.render(objects=objects, options=request.args)
+ except Exception as err:
+ return 'Templating of "{}" failed to render. Most likely due to an error in the template. Error transcript:\n\n{}\n----\n\n{}\n'.format(path, err, traceback.format_exc()), 400
+ return body, 200
+
+
+parser = argparse.ArgumentParser(description="Process templates for gondul.", add_help=False)
+parser.add_argument("-t", "--templates", type=str, nargs="+", help="location of templates")
+parser.add_argument("-h", "--host", type=str, default="127.0.0.1", help="host address")
+parser.add_argument("-p", "--port", type=int, default=8080, help="host port")
+parser.add_argument("-d", "--debug", action="store_true", help="enable debug mode")
+
+args = parser.parse_args()
+env.loader.searchpath = args.templates
+
+if not sys.argv[1:]:
+ parser.print_help()
+
+app.run(host=args.host, port=args.port, debug=args.debug)