master #2
60 changed files with 803 additions and 246 deletions
5
LICENSE
Normal file
5
LICENSE
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
© University of Ljubljana, Faculty of Computer and Information Science
|
||||
|
||||
Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
14
README.md
14
README.md
|
|
@ -1,6 +1,10 @@
|
|||
# FRI servers
|
||||
|
||||
These Ansible roles set up servers running various Linux distributions to participate in BGP routing. Device and IP address data are pulled from NetBox. A separate VRF `mgmt` is configured for a L2 management interface.
|
||||
|
||||
# Setup
|
||||
This project is licensed under [0BSD](https://spdx.org/licenses/0BSD.html).
|
||||
|
||||
## Setup
|
||||
|
||||
Each physical server should have the following information recorded in NetBox:
|
||||
|
||||
|
|
@ -14,7 +18,7 @@ MAC addresses are used to rename interfaces in the host OS. Prefix for the manag
|
|||
|
||||
For Windows hosts the platform must be defined in NetBox to configure connection parameters.
|
||||
|
||||
# Run
|
||||
## Run
|
||||
|
||||
Create a read-only token in NetBox. Define required variables:
|
||||
|
||||
|
|
@ -25,6 +29,10 @@ Run one-off tasks with (add `--key-file` or other options as necessary):
|
|||
|
||||
ansible -m ping 'server-*'
|
||||
|
||||
Run a playbook with:
|
||||
Run a playbook in check mode with:
|
||||
|
||||
ansible-playbook setup.yml -l 'server-*' -CD
|
||||
|
||||
If everything seems OK, run:
|
||||
|
||||
ansible-playbook setup.yml -l 'server-*'
|
||||
|
|
|
|||
|
|
@ -65,7 +65,6 @@
|
|||
- acl
|
||||
- git
|
||||
- iproute2
|
||||
- logrotate
|
||||
- nftables
|
||||
- procps
|
||||
- rsync
|
||||
|
|
@ -98,33 +97,11 @@
|
|||
|
||||
- meta: flush_handlers
|
||||
|
||||
- name: Enable QEMU guest agent
|
||||
when: is_virtual
|
||||
block:
|
||||
- name: Install QEMU guest agent package
|
||||
package:
|
||||
name: qemu-guest-agent
|
||||
|
||||
- name: Enable QEMU guest agent service
|
||||
service:
|
||||
name: qemu-guest-agent
|
||||
enabled: yes
|
||||
state: started
|
||||
|
||||
- name: Install automatic upgrade script
|
||||
copy:
|
||||
dest: /etc/periodic/weekly/
|
||||
src: unattended-upgrade
|
||||
mode: 0755
|
||||
|
||||
- name: Configure log rotation for automatic upgrades
|
||||
copy:
|
||||
dest: /etc/logrotate.d/unattended-upgrade
|
||||
src: unattended-upgrade.logrotate
|
||||
mode: 0644
|
||||
|
||||
- name: Set authorized SSH keys
|
||||
authorized_key:
|
||||
user: root
|
||||
exclusive: true
|
||||
key: "{{ ssh_keys | join('\n') }}"
|
||||
|
||||
- when: is_virtual
|
||||
include_tasks: vm.yml
|
||||
|
|
|
|||
25
roles/alpine/tasks/vm.yml
Normal file
25
roles/alpine/tasks/vm.yml
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
- name: Install QEMU guest agent package
|
||||
package:
|
||||
name: qemu-guest-agent
|
||||
|
||||
- name: Enable QEMU guest agent service
|
||||
service:
|
||||
name: qemu-guest-agent
|
||||
enabled: yes
|
||||
state: started
|
||||
|
||||
- name: Install logrotate
|
||||
package:
|
||||
name: logrotate
|
||||
|
||||
- name: Install automatic upgrade script
|
||||
copy:
|
||||
dest: /etc/periodic/weekly/
|
||||
src: unattended-upgrade
|
||||
mode: "0755"
|
||||
|
||||
- name: Configure log rotation for automatic upgrades
|
||||
copy:
|
||||
dest: /etc/logrotate.d/unattended-upgrade
|
||||
src: unattended-upgrade.logrotate
|
||||
mode: "0644"
|
||||
|
|
@ -1,10 +1,20 @@
|
|||
{# Loopback interface must be present so define it here if none exists. #}
|
||||
{% if interfaces | rejectattr("name", "==", "lo") %}
|
||||
{# Loopback interface must be present so create it here if none is defined in inventory. #}
|
||||
{% if not interfaces | selectattr("name", "==", "lo") %}
|
||||
auto lo
|
||||
iface lo
|
||||
|
||||
{% endif -%}
|
||||
|
||||
{# Define VRFs. #}
|
||||
{% for vrf in interfaces | selectattr("vrf") | map(attribute="vrf.name") %}
|
||||
auto {{ vrf }}
|
||||
iface {{ vrf }}
|
||||
pre-up ip link add $IFACE type vrf table {{ 100 + loop.index }}
|
||||
up ip link set dev $IFACE up
|
||||
post-down ip link del $IFACE
|
||||
|
||||
{% endfor -%}
|
||||
|
||||
{# Skip disabled and OOB management interfaces. #}
|
||||
{# For VMs we have to set the attribute manually (to false) so rejectattr works. #}
|
||||
{% for iface in interfaces
|
||||
|
|
@ -13,6 +23,10 @@ iface lo
|
|||
| selectattr('enabled') %}
|
||||
auto {{ iface.name }}
|
||||
iface {{ iface.name }}
|
||||
{% if iface.vrf %}
|
||||
requires {{ iface.vrf.name }}
|
||||
pre-up ip link set $IFACE master {{ iface.vrf.name }}
|
||||
{% endif %}
|
||||
{% if iface.mtu %}
|
||||
mtu {{ iface.mtu }}
|
||||
{% endif %}
|
||||
|
|
@ -23,13 +37,17 @@ iface {{ iface.name }}
|
|||
{% set prefix = prefixes | selectattr('prefix', '==', subnet) | first %}
|
||||
{% set gateway = prefix.custom_fields.gateway.address %}
|
||||
{% if gateway is defined and gateway != address.address %}
|
||||
{% if iface.vrf %}
|
||||
up ip route add default via {{ gateway | ipaddr('address') }} {% if iface.vrf.name %}vrf {{ iface.vrf.name }}{% endif +%}
|
||||
{% else %}
|
||||
gateway {{ gateway | ipaddr('address') }}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endif %}
|
||||
{% endfor -%}
|
||||
|
||||
{# disable SLAAC if we have a manually set IPv6 address #}
|
||||
{% if iface.ip_addresses | selectattr("family.value", "==", 6) %}
|
||||
{% if iface.ip_addresses | selectattr("family.value", "==", 6) and iface.name != "lo" %}
|
||||
pre-up echo 0 > /proc/sys/net/ipv6/conf/$IFACE/autoconf
|
||||
{% endif %}
|
||||
|
||||
|
|
|
|||
19
roles/collector/README.md
Normal file
19
roles/collector/README.md
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
Set up metric collection with prometheus and telegraf as the SNMP proxy.
|
||||
|
||||
Each entry in `prometheus_config` should define `name`, `hosts` and optionally `interval`. As above, `hosts` is used as a query filter.
|
||||
|
||||
For SNMP the properties `snmp_hosts` and optional `snmp_interval` should define respectively the NetBox query filter and poll interval.
|
||||
|
||||
For example:
|
||||
|
||||
{
|
||||
"prometheus_config": [
|
||||
{
|
||||
"name": "classroom",
|
||||
"hosts": "role=desktop-computer status=active location=classroom",
|
||||
"interval": 300
|
||||
}
|
||||
],
|
||||
"snmp_hosts": "role=switch name__isw=sw- status=active status=staged status=planned",
|
||||
"snmp_interval": 300
|
||||
}
|
||||
4
roles/collector/files/prometheus-snmp.yml
Normal file
4
roles/collector/files/prometheus-snmp.yml
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
scrape_configs:
|
||||
- job_name: "snmp"
|
||||
static_configs:
|
||||
- targets: ["localhost:9273"]
|
||||
12
roles/collector/files/prometheus.nft
Normal file
12
roles/collector/files/prometheus.nft
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
table inet filter {
|
||||
chain output {
|
||||
type filter hook output priority 0; policy accept;
|
||||
|
||||
skuid prometheus ct state { established, related } accept
|
||||
skuid prometheus th dport domain accept
|
||||
skuid prometheus tcp dport { 443, 9100 } accept comment "prometheus"
|
||||
skuid prometheus ip daddr 127.0.0.1 tcp dport 9090 accept comment "prometheus self"
|
||||
skuid prometheus ip daddr 127.0.0.1 tcp dport 9273 accept comment "telegraf snmp exporter"
|
||||
skuid prometheus drop
|
||||
}
|
||||
}
|
||||
9
roles/collector/files/telegraf.nft
Normal file
9
roles/collector/files/telegraf.nft
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
table inet filter {
|
||||
chain output {
|
||||
type filter hook output priority 0; policy accept;
|
||||
|
||||
skuid telegraf ct state { established, related } accept
|
||||
skuid telegraf th dport snmp accept
|
||||
skuid telegraf drop
|
||||
}
|
||||
}
|
||||
17
roles/collector/handlers/main.yml
Normal file
17
roles/collector/handlers/main.yml
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
- name: reload nftables
|
||||
service:
|
||||
name: nftables
|
||||
state: reloaded
|
||||
when: "'handler' not in ansible_skip_tags"
|
||||
|
||||
- name: reload prometheus
|
||||
service:
|
||||
name: prometheus
|
||||
state: reloaded
|
||||
when: "'handler' not in ansible_skip_tags"
|
||||
|
||||
- name: restart telegraf
|
||||
service:
|
||||
name: telegraf
|
||||
state: restarted # seems to crash on reloads
|
||||
when: "'handler' not in ansible_skip_tags"
|
||||
3
roles/collector/meta/main.yml
Normal file
3
roles/collector/meta/main.yml
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
dependencies:
|
||||
- role: prometheus
|
||||
- role: telegraf
|
||||
34
roles/collector/tasks/main.yml
Normal file
34
roles/collector/tasks/main.yml
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
# since this host likely has access to sensitive networks,
|
||||
# restrict the destinations where monitoring daemons can connect
|
||||
- name: Set up outbound firewall rules
|
||||
copy:
|
||||
dest: "/etc/nftables.d/{{ item }}.nft"
|
||||
src: "{{ item }}.nft"
|
||||
loop:
|
||||
- prometheus
|
||||
- telegraf
|
||||
notify: reload nftables
|
||||
|
||||
- name: Configure telegraf to expose SNMP data as prometheus metrics
|
||||
template:
|
||||
dest: "/etc/telegraf.conf.d/{{ item }}.conf"
|
||||
src: "{{ item }}.conf.j2"
|
||||
loop:
|
||||
- output
|
||||
- snmp
|
||||
notify: restart telegraf
|
||||
|
||||
- name: Configure prometheus to pull SNMP data
|
||||
copy:
|
||||
dest: "/etc/prometheus/conf.d/snmp.yml"
|
||||
src: "prometheus-snmp.yml"
|
||||
notify: reload prometheus
|
||||
|
||||
- name: Configure prometheus to pull custom data
|
||||
template:
|
||||
dest: "/etc/prometheus/conf.d/{{ item.name }}.yml"
|
||||
src: "prometheus-job.yml.j2"
|
||||
loop: "{{ prometheus_config }}"
|
||||
loop_control:
|
||||
label: "{{ item.name }}"
|
||||
notify: reload prometheus
|
||||
4
roles/collector/templates/output.conf.j2
Normal file
4
roles/collector/templates/output.conf.j2
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
[[outputs.prometheus_client]]
|
||||
listen = "127.0.0.1:9273"
|
||||
expiration_interval = "300s"
|
||||
tagexclude = ["mac?"] # temporary tags we don’t need to export
|
||||
24
roles/collector/templates/prometheus-job.yml.j2
Normal file
24
roles/collector/templates/prometheus-job.yml.j2
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
{% set devices = query("netbox.netbox.nb_lookup", "devices", api_filter="{{ item.hosts }}", raw_data=true)
|
||||
| selectattr("primary_ip")
|
||||
| map(attribute="name")
|
||||
| map("extract", hostvars) -%}
|
||||
|
||||
scrape_configs:
|
||||
- job_name: "{{ item.name }}"
|
||||
{% if item.interval is defined %}
|
||||
scrape_interval: {{ item.interval }}s
|
||||
scrape_timeout: {{ item.interval // 5 }}s
|
||||
{% endif %}
|
||||
relabel_configs:
|
||||
- source_labels: [__address__]
|
||||
regex: '([^.]+).*'
|
||||
target_label: name
|
||||
replacement: ${1}
|
||||
static_configs:
|
||||
- targets:
|
||||
{% for address in devices
|
||||
| selectattr("dns_name", "defined")
|
||||
| map(attribute="dns_name")
|
||||
| reject("none") | sort | unique %}
|
||||
- "{{ address }}:9100"
|
||||
{% endfor %}
|
||||
115
roles/collector/templates/snmp.conf.j2
Normal file
115
roles/collector/templates/snmp.conf.j2
Normal file
|
|
@ -0,0 +1,115 @@
|
|||
[[inputs.snmp]]
|
||||
{% if snmp_interval is defined %}
|
||||
interval = "{{ snmp_interval }}s"
|
||||
{% endif %}
|
||||
agent_host_tag = "source"
|
||||
agents = [
|
||||
{% for address in query("netbox.netbox.nb_lookup", "devices", api_filter=snmp_hosts, raw_data=true)
|
||||
| selectattr("primary_ip4") | map(attribute="primary_ip4.address")
|
||||
| ipaddr("int") | sort | ipaddr("address") %}
|
||||
"{{ address }}",
|
||||
{% endfor %}
|
||||
]
|
||||
version = 3
|
||||
sec_level = "authPriv"
|
||||
auth_protocol = "SHA"
|
||||
priv_protocol = "DES"
|
||||
sec_name = "{{ password.snmp_user }}"
|
||||
auth_password = "{{ password.snmp_pass }}"
|
||||
priv_password = "{{ password.snmp_pass }}"
|
||||
|
||||
fieldexclude = ["ifDescr", "ifSpecific"]
|
||||
|
||||
[[inputs.snmp.field]]
|
||||
name = "hostname"
|
||||
oid = "RFC1213-MIB::sysName.0"
|
||||
is_tag = true
|
||||
|
||||
[[inputs.snmp.field]]
|
||||
name = "location"
|
||||
oid = "RFC1213-MIB::sysLocation.0"
|
||||
|
||||
[[inputs.snmp.field]]
|
||||
name = "uptime"
|
||||
oid = "RFC1213-MIB::sysUpTime.0"
|
||||
conversion = "float(2)"
|
||||
|
||||
# interface table
|
||||
[[inputs.snmp.table]]
|
||||
name = "iface"
|
||||
oid = "IF-MIB::ifTable"
|
||||
inherit_tags = ["hostname"]
|
||||
|
||||
[[inputs.snmp.table.field]]
|
||||
oid = "IF-MIB::ifName"
|
||||
|
||||
# rename counters to make prometheus happy
|
||||
[[inputs.snmp.table.field]]
|
||||
name = "in_total"
|
||||
oid = "IF-MIB::ifInOctets"
|
||||
|
||||
[[inputs.snmp.table.field]]
|
||||
name = "in_err_total"
|
||||
oid = "IF-MIB::ifInErrors"
|
||||
|
||||
[[inputs.snmp.table.field]]
|
||||
name = "out_total"
|
||||
oid = "IF-MIB::ifOutOctets"
|
||||
|
||||
[[inputs.snmp.table.field]]
|
||||
name = "out_err_total"
|
||||
oid = "IF-MIB::ifOutErrors"
|
||||
|
||||
# MAC address table per VLAN
|
||||
[[inputs.snmp.table]]
|
||||
name = "fdb"
|
||||
index_as_tag = true
|
||||
inherit_tags = ["hostname"]
|
||||
|
||||
[[inputs.snmp.table.field]]
|
||||
name = "ifIndex"
|
||||
oid = "Q-BRIDGE-MIB::dot1qTpFdbPort"
|
||||
is_tag = true
|
||||
|
||||
[[inputs.snmp.table.field]]
|
||||
name = "entry"
|
||||
oid = "Q-BRIDGE-MIB::dot1qTpFdbStatus"
|
||||
|
||||
# look up interface name from its index
|
||||
# seems we need another SNMP connection for that
|
||||
[[processors.snmp_lookup]]
|
||||
namepass = ["fdb", "iface"]
|
||||
agent_tag = "source"
|
||||
index_tag = "ifIndex"
|
||||
|
||||
version = 3
|
||||
sec_level = "authPriv"
|
||||
auth_protocol = "SHA"
|
||||
priv_protocol = "DES"
|
||||
sec_name = "{{ password.snmp_user }}"
|
||||
auth_password = "{{ password.snmp_pass }}"
|
||||
priv_password = "{{ password.snmp_pass }}"
|
||||
|
||||
[[processors.snmp_lookup.tag]]
|
||||
oid = "IF-MIB::ifName"
|
||||
name = "iface"
|
||||
|
||||
# split index 42.1.2.3.10.11.12 into tags "vlan" and "mac1" to "mac6"
|
||||
[[processors.regex]]
|
||||
namepass = ["fdb"]
|
||||
|
||||
[[processors.regex.tags]]
|
||||
key = "index"
|
||||
pattern = '^(?P<vlan>\d+)\.(?P<mac1>\d+)\.(?P<mac2>\d+)\.(?P<mac3>\d+)\.(?P<mac4>\d+)\.(?P<mac5>\d+)\.(?P<mac6>\d+)'
|
||||
|
||||
# combine "mac*" tags into a single tag "mac" with value 01:02:03:0a:0b:0c
|
||||
[[processors.template]]
|
||||
namepass = ["fdb"]
|
||||
tagexclude = ["ifIndex", "index"]
|
||||
tag = "mac"
|
||||
{% raw %}
|
||||
template = '''{{
|
||||
printf "%02x:%02x:%02x:%02x:%02x:%02x"
|
||||
(.Tag "mac1"|int) (.Tag "mac2"|int) (.Tag "mac3"|int) (.Tag "mac4"|int) (.Tag "mac5"|int) (.Tag "mac6"|int)
|
||||
}}'''
|
||||
{% endraw %}
|
||||
|
|
@ -39,15 +39,17 @@
|
|||
- git
|
||||
- ifupdown2
|
||||
- rsync
|
||||
- vim
|
||||
- tmux
|
||||
- vim
|
||||
- wget
|
||||
|
||||
# for base Debian the main interfaces file is just an include
|
||||
- name: Remove interface definitions added by installer
|
||||
when: not is_proxmox
|
||||
copy:
|
||||
dest: /etc/network/interfaces
|
||||
content: 'source /etc/network/interfaces.d/*'
|
||||
content: |
|
||||
source /etc/network/interfaces.d/*
|
||||
notify: reload interfaces
|
||||
|
||||
# for Proxmox the main interfaces file will define bridges
|
||||
|
|
@ -89,19 +91,6 @@
|
|||
include_tasks: firewall.yml
|
||||
when: not is_proxmox # proxmox has its own firewall configuration
|
||||
|
||||
- name: Install automatic upgrade package
|
||||
package:
|
||||
name: unattended-upgrades
|
||||
|
||||
- name: Configure automatic upgrades
|
||||
lineinfile:
|
||||
path: /etc/apt/apt.conf.d/20auto-upgrades
|
||||
create: yes
|
||||
line: '{{ item }}'
|
||||
loop:
|
||||
- 'APT::Periodic::Update-Package-Lists "1";'
|
||||
- 'APT::Periodic::Unattended-Upgrade "1";'
|
||||
|
||||
- name: Run SSH instance in management VRF
|
||||
when: interfaces | selectattr('vrf') | selectattr('vrf.name', '==', 'mgmt')
|
||||
block:
|
||||
|
|
@ -124,3 +113,6 @@
|
|||
name: sshd@mgmt
|
||||
enabled: yes
|
||||
notify: reboot
|
||||
|
||||
- when: is_virtual
|
||||
include_tasks: vm.yml
|
||||
|
|
|
|||
12
roles/debian/tasks/vm.yml
Normal file
12
roles/debian/tasks/vm.yml
Normal file
|
|
@ -0,0 +1,12 @@
|
|||
- name: Install automatic upgrade package
|
||||
package:
|
||||
name: unattended-upgrades
|
||||
|
||||
- name: Configure automatic upgrades
|
||||
lineinfile:
|
||||
path: /etc/apt/apt.conf.d/20auto-upgrades
|
||||
create: yes
|
||||
line: '{{ item }}'
|
||||
loop:
|
||||
- 'APT::Periodic::Update-Package-Lists "1";'
|
||||
- 'APT::Periodic::Unattended-Upgrade "1";'
|
||||
25
roles/dnsmasq/README.md
Normal file
25
roles/dnsmasq/README.md
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
Install and configure dnsmasq with support for external DNS updates.
|
||||
|
||||
Leases are offered from IP ranges defined in NetBox with the role `DHCP pool`. For each range a prefix should be defined with the custom properties:
|
||||
|
||||
{
|
||||
"gateway": <gateway IP address>,
|
||||
"dhcp_ranges": <list of IP ranges>,
|
||||
"dhcp_server": <server IP address>
|
||||
}
|
||||
|
||||
To create a reservation for a device, ensure it has the primary IP address with the status `DHCP` inside some DHCP range. The primary MAC address of the corresponding interface should also be defined.
|
||||
|
||||
Custom reservations can also be specified in the local context of the DHCP server, for example:
|
||||
|
||||
{
|
||||
"reservations": [
|
||||
{ "host": "foo", "ip": "10.0.1.101", "mac": "12:23:34:45:56:67" },
|
||||
{ "host": "bar", "ip": "10.0.2.102", "mac": "ab:bc:cd:de:ef:f0" },
|
||||
…
|
||||
]
|
||||
}
|
||||
|
||||
For updating DNS records, a kerberos ticket is created at first setup and then renewed on updates. The password file should contain the keys `ldap_user` and `ldap_pass` to create the initial ticket.
|
||||
|
||||
The default dnsmasq leasefile implementation performs poorly with many entries on slow storage, so we replace it with an sqlite database.
|
||||
|
|
@ -18,3 +18,8 @@ dhcp-host = {{ interface.mac_address | lower }},{{ address.address | ipaddr('add
|
|||
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
|
||||
# custom reservations
|
||||
{% for reservation in reservations | default([]) %}
|
||||
dhcp-host = {{ reservation.mac | lower }},{{ reservation.ip | ipaddr('address') }},{{ reservation.host | lower }}
|
||||
{% endfor %}
|
||||
|
|
|
|||
1
roles/friwall/README.md
Normal file
1
roles/friwall/README.md
Normal file
|
|
@ -0,0 +1 @@
|
|||
Install and configure the [FRIwall](https://git.fri.uni-lj.si/rc/friwall) web application for managing firewall nodes. For settings and operation refer to that project.
|
||||
|
|
@ -38,17 +38,18 @@
|
|||
extra_args: --user --break-system-packages --no-warn-script-location
|
||||
notify: restart uwsgi
|
||||
|
||||
- name: Configure base settings
|
||||
template:
|
||||
dest: "/srv/friwall/{{ item }}"
|
||||
src: "{{ item }}.j2"
|
||||
- name: Ensure setting files exist
|
||||
copy:
|
||||
dest: "/srv/friwall/{{ item }}.json"
|
||||
content: |
|
||||
{}
|
||||
owner: friwall
|
||||
group: friwall
|
||||
mode: 0600
|
||||
force: no
|
||||
loop:
|
||||
- nodes.json
|
||||
- settings.json
|
||||
- nodes
|
||||
- settings
|
||||
notify: restart uwsgi
|
||||
|
||||
- name: Configure list of networks
|
||||
|
|
|
|||
|
|
@ -1,14 +0,0 @@
|
|||
auto lo
|
||||
iface lo inet loopback
|
||||
|
||||
{% for iface in interfaces %}
|
||||
auto {{ iface.name }}
|
||||
iface {{ iface.name }} inet static
|
||||
{% for address in iface.ip_addresses %}
|
||||
address {{ address.address }}
|
||||
{% endfor %}
|
||||
{% if iface.custom_fields.gateway %}
|
||||
gateway {{ iface.custom_fields.gateway.address | ipaddr('address') }}
|
||||
{% endif %}
|
||||
|
||||
{% endfor %}
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
{% set nodes = query('netbox.netbox.nb_lookup', 'devices', api_filter='role=firewall', raw_data=true)
|
||||
| selectattr('config_context') | selectattr('config_context', 'contains', 'master')
|
||||
| selectattr('config_context.master', '==', inventory_hostname)
|
||||
| map(attribute='name') -%}
|
||||
|
||||
{
|
||||
{% for node in nodes %}
|
||||
"{{ hostvars[node] | device_address | selectattr('family.value', '==', 4)
|
||||
| map(attribute='address') | ipaddr('address') | first }}": -1{{ '' if loop.last else ',' }}
|
||||
{% endfor %}
|
||||
}
|
||||
|
|
@ -1,10 +0,0 @@
|
|||
{
|
||||
"ldap_host": "{{ domain }}",
|
||||
"ldap_user": "{{ password.ldap_user }}",
|
||||
"ldap_pass": "{{ password.ldap_pass }}",
|
||||
"ldap_base_dn": "{{ ldap_base_dn }}",
|
||||
"oidc_server": "{{ password.oidc_server }}",
|
||||
"oidc_client_id": "{{ password.oidc_client_id }}",
|
||||
"oidc_client_secret": "{{ password.oidc_client_secret }}",
|
||||
"wg_net": "{{ wg_net }}"
|
||||
}
|
||||
|
|
@ -9,11 +9,15 @@
|
|||
option: '{{ item.option }}'
|
||||
value: '{{ item.value }}'
|
||||
loop:
|
||||
- { section: analytics, option: reporting_enabled, value: false }
|
||||
- { section: analytics, option: check_for_updates, value: false }
|
||||
- { section: analytics, option: check_for_plugin_updates, value: false }
|
||||
- { section: news, option: news_feed_enabled, value: false }
|
||||
- { section: public_dashboards, option: enabled, value: false }
|
||||
- { section: server, option: root_url, value: "https://%(domain)s/" }
|
||||
- { section: analytics, option: check_for_updates, value: "false" }
|
||||
- { section: analytics, option: check_for_plugin_updates, value: "false" }
|
||||
- { section: analytics, option: enabled, value: "false" }
|
||||
- { section: analytics, option: feedback_links_enabled, value: "false" }
|
||||
- { section: analytics, option: reporting_enabled, value: "false" }
|
||||
- { section: news, option: news_feed_enabled, value: "false" }
|
||||
- { section: public_dashboards, option: enabled, value: "false" }
|
||||
notify: restart grafana
|
||||
|
||||
- name: Set up nginx site
|
||||
template:
|
||||
|
|
|
|||
|
|
@ -93,9 +93,9 @@
|
|||
sysctl_set: true
|
||||
|
||||
- name: Install user certificate expiry notification script
|
||||
copy:
|
||||
dest: /usr/local/bin/
|
||||
src: notify-expiring-certs
|
||||
template:
|
||||
dest: /usr/local/bin/notify-expiring-certs
|
||||
src: notify-expiring-certs.j2
|
||||
mode: "0755"
|
||||
|
||||
- name: Schedule user certificate expiry notification script
|
||||
|
|
|
|||
|
|
@ -24,11 +24,11 @@ for cert in /var/lib/ocserv/certs/*.crt ; do
|
|||
To: ${email}
|
||||
Bcc: root
|
||||
Date: $(date -R)
|
||||
Subject: Potek certifikata za FRI VPN
|
||||
Subject: Potek certifikata za {{ dns_name }}
|
||||
|
||||
Spoštovani,
|
||||
|
||||
čez ${validity} dni bo potekel FRI VPN certifikat za ${email}. Če dostop še potrebujete, kontaktirajte RC FRI za podaljšanje.
|
||||
čez ${validity} dni bo potekel certifikat ${email} za dostop do {{ dns_name }}. Če dostop še potrebujete, kontaktirajte RC FRI za podaljšanje.
|
||||
|
||||
Lep pozdrav,
|
||||
RC FRI
|
||||
|
|
@ -37,7 +37,7 @@ RC FRI
|
|||
|
||||
Hello,
|
||||
|
||||
in ${validity} days the FRI VPN certificate for ${email} will expire. If you still need access, contact RC FRI for renewal.
|
||||
in ${validity} days the certificate ${email} to access {{ dns_name }} will expire. If you still need access, contact RC FRI for renewal.
|
||||
|
||||
Best regards,
|
||||
RC FRI
|
||||
3
roles/prometheus/README.md
Normal file
3
roles/prometheus/README.md
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
Install and configure prometheus.
|
||||
|
||||
Job definitions should be placed in /etc/prometheus/conf.d by roles using this one. Data retention time can be set with the `retention` property in a NetBox config context.
|
||||
2
roles/prometheus/files/prometheus.yml
Normal file
2
roles/prometheus/files/prometheus.yml
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
scrape_config_files:
|
||||
- "conf.d/*.yml"
|
||||
11
roles/prometheus/handlers/main.yml
Normal file
11
roles/prometheus/handlers/main.yml
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
- name: reload prometheus
|
||||
service:
|
||||
name: prometheus
|
||||
state: reloaded
|
||||
when: "'handler' not in ansible_skip_tags"
|
||||
|
||||
- name: restart prometheus
|
||||
service:
|
||||
name: prometheus
|
||||
state: restarted
|
||||
when: "'handler' not in ansible_skip_tags"
|
||||
29
roles/prometheus/tasks/main.yml
Normal file
29
roles/prometheus/tasks/main.yml
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
- name: Install packages
|
||||
package:
|
||||
name:
|
||||
- prometheus
|
||||
|
||||
- name: Create directory for prometheus configs
|
||||
file:
|
||||
path: /etc/prometheus/conf.d
|
||||
state: directory
|
||||
|
||||
- name: Configure prometheus retention time
|
||||
when: retention is defined
|
||||
lineinfile:
|
||||
path: /etc/conf.d/prometheus
|
||||
regexp: "^prometheus_retention_time="
|
||||
line: "prometheus_retention_time={{ retention }}"
|
||||
notify: restart prometheus
|
||||
|
||||
- name: Configure prometheus
|
||||
copy:
|
||||
dest: /etc/prometheus/
|
||||
src: prometheus.yml
|
||||
notify: reload prometheus
|
||||
|
||||
- name: Enable prometheus service
|
||||
service:
|
||||
name: prometheus
|
||||
enabled: true
|
||||
state: started
|
||||
|
|
@ -1,12 +1,12 @@
|
|||
#!/bin/sh
|
||||
GARAZEDIR="~/garaze/tmp"
|
||||
GARAZEDIR="${HOME}/garaze/tmp"
|
||||
# RECIPIENTS="polz@fri.uni-lj.si"
|
||||
# RECIPIENTS="anita.strmole@fri.uni-lj.si"
|
||||
# RECIPIENTS="racunovodstvo@fri.uni-lj.si"
|
||||
RECIPIENTS="$(cat ~/garage_recipients.txt)"
|
||||
RECIPIENTS="$(cat ${HOME}/garaze_recipients.txt)"
|
||||
|
||||
# cd /home/registrator/siemens-spica-rilec
|
||||
|
||||
rm $GARAZEDIR/*.xlsx
|
||||
~/siemens-spica-rilec/garage_count.py --employee --calculation --format xlsx ~/data "$GARAZEDIR/garaze_{start}_{end}.xlsx"
|
||||
${HOME}/siemens-spica-rilec/garage_count.py --employee --calculation --format xlsx ~/data "$GARAZEDIR/garaze_{start}_{end}.xlsx"
|
||||
echo "Garaže za pretekli mesec" | mail -s "Garaže za pretekli mesec" "$RECIPIENTS" -A $GARAZEDIR/*.xlsx
|
||||
|
|
|
|||
2
roles/registrator/meta/main.yml
Normal file
2
roles/registrator/meta/main.yml
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
dependencies:
|
||||
- role: opensmtpd # for certificate expiry notifications
|
||||
|
|
@ -10,12 +10,14 @@
|
|||
home: /home/registrator
|
||||
shell: /sbin/nologin
|
||||
generate_ssh_key: yes
|
||||
groups: www-data
|
||||
append: true
|
||||
ssh_key_comment: "{{ inventory_hostname }}"
|
||||
ssh_key_type: ed25519
|
||||
|
||||
- name: Install packages
|
||||
package:
|
||||
name: git,py3-pip
|
||||
name: git,py3-pip,mailutils,py3-openpyxl
|
||||
|
||||
- name: Clone siemens-spica-rilec
|
||||
become: yes
|
||||
|
|
|
|||
|
|
@ -14,3 +14,5 @@ SPOOL_FNAME="new_events.csv"
|
|||
OLDEVENTS_FNAME="old_events.csv"
|
||||
NOCOMMIT_FNAME="nocommit.csv"
|
||||
FIX_FNAME="fixes.csv"
|
||||
FIX_OWNER="registrator"
|
||||
FIX_GROUP="www-data"
|
||||
|
|
|
|||
|
|
@ -1,5 +0,0 @@
|
|||
Set up a basic nginx reverse proxy.
|
||||
|
||||
NetBox config context should contain a proxy_pass property with the server address.
|
||||
|
||||
Custom error page can be placed in /srv/http/error/index.html.
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
- name: Set up nginx site
|
||||
template:
|
||||
dest: '/etc/nginx/http.d/{{ inventory_hostname }}.conf'
|
||||
src: 'nginx.conf.j2'
|
||||
notify: reload nginx
|
||||
|
|
@ -1,31 +0,0 @@
|
|||
server {
|
||||
server_name {{ ([dns_name] + tls_domains|default([])) | join(" ") }};
|
||||
|
||||
listen [::]:443 ssl ipv6only=off;
|
||||
ssl_certificate /etc/letsencrypt/live/{{ dns_name }}/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/{{ dns_name }}/privkey.pem;
|
||||
|
||||
error_page 500 501 502 503 504 505 506 507 508 510 511 /error/;
|
||||
|
||||
location / {
|
||||
proxy_pass {{ proxy_pass }};
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
|
||||
proxy_connect_timeout 30s;
|
||||
proxy_read_timeout 800s;
|
||||
proxy_request_buffering off;
|
||||
proxy_max_temp_file_size 0;
|
||||
client_max_body_size 200M;
|
||||
|
||||
# TODO maybe
|
||||
#proxy_ssl_verify on;
|
||||
#proxy_ssl_trusted_certificate /etc/ssl/certs/ca-certificates.crt;
|
||||
}
|
||||
|
||||
location /error/ {
|
||||
root /srv/http;
|
||||
try_files $uri $uri/index.html =503;
|
||||
}
|
||||
}
|
||||
1
roles/rilec/files/motd
Normal file
1
roles/rilec/files/motd
Normal file
|
|
@ -0,0 +1 @@
|
|||
Apis rilec. Glug glug, njam njam.
|
||||
2
roles/rilec/files/uwsgi.ini
Normal file
2
roles/rilec/files/uwsgi.ini
Normal file
|
|
@ -0,0 +1,2 @@
|
|||
[uwsgi]
|
||||
emperor = /etc/uwsgi/conf.d
|
||||
17
roles/rilec/handlers/main.yml
Normal file
17
roles/rilec/handlers/main.yml
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
- name: reload nginx
|
||||
service:
|
||||
name: nginx
|
||||
state: reloaded
|
||||
when: "'handler' not in ansible_skip_tags"
|
||||
|
||||
- name: reload uwsgi
|
||||
service:
|
||||
name: uwsgi
|
||||
state: reloaded
|
||||
when: "'handler' not in ansible_skip_tags"
|
||||
|
||||
- name: restart uwsgi
|
||||
service:
|
||||
name: uwsgi
|
||||
state: restarted
|
||||
when: "'handler' not in ansible_skip_tags"
|
||||
174
roles/rilec/tasks/main.yml
Normal file
174
roles/rilec/tasks/main.yml
Normal file
|
|
@ -0,0 +1,174 @@
|
|||
- name: Install dependencies
|
||||
package:
|
||||
name:
|
||||
- git
|
||||
- python3
|
||||
- python3-dev
|
||||
- py3-pip
|
||||
- py3-virtualenv
|
||||
- bash # for upgrade script
|
||||
- build-base # to build psycopg if not available
|
||||
- postgresql-dev # likewise
|
||||
- uwsgi
|
||||
- uwsgi-python3
|
||||
- openldap-dev
|
||||
|
||||
- name: Create group for web service
|
||||
group:
|
||||
name: '{{ user }}'
|
||||
system: yes
|
||||
|
||||
- name: Create user for web service
|
||||
user:
|
||||
name: '{{ user }}'
|
||||
group: '{{ user }}'
|
||||
home: '/srv/{{ user }}'
|
||||
shell: /bin/sh
|
||||
system: yes
|
||||
register: user_info
|
||||
|
||||
- name: Clone web files
|
||||
become: yes
|
||||
become_user: "{{ user }}"
|
||||
become_method: su
|
||||
become_flags: "-s /bin/sh"
|
||||
git:
|
||||
repo: 'https://github.com/polz113/apis-ad-rilec'
|
||||
dest: "{{ user_info.home }}/apis-rilec"
|
||||
force: yes
|
||||
notify: reload uwsgi
|
||||
|
||||
- name: Set django_app
|
||||
set_fact:
|
||||
django_app: "{{ user_info.home }}/apis-rilec/python/django/apis_rilec_fri/"
|
||||
django_venv: "{{ user_info.home }}/venv/apis-rilec/"
|
||||
public_root: "{{ user_info.home }}/www_data/apis-rilec/"
|
||||
|
||||
- name: Create directory for static files
|
||||
file:
|
||||
state: directory
|
||||
owner: "{{ user }}"
|
||||
path: "{{ public_root }}/static"
|
||||
|
||||
- name: Create directory for media
|
||||
file:
|
||||
state: directory
|
||||
owner: "{{ user }}"
|
||||
path: "{{ public_root }}/media"
|
||||
|
||||
- name: Install requirements
|
||||
become: yes
|
||||
become_user: "{{ user }}"
|
||||
become_method: su
|
||||
become_flags: '-s /bin/sh'
|
||||
pip:
|
||||
requirements: "{{ django_app }}/requirements.txt"
|
||||
virtualenv: "{{ django_venv }}"
|
||||
notify: restart uwsgi
|
||||
|
||||
- name: Install database driver
|
||||
become: yes
|
||||
become_user: "{{ user }}"
|
||||
become_method: su
|
||||
become_flags: '-s /bin/sh'
|
||||
pip:
|
||||
name: "psycopg"
|
||||
virtualenv: "{{ django_venv }}"
|
||||
notify: restart uwsgi
|
||||
|
||||
- name: Configure settings
|
||||
template:
|
||||
dest: "{{ django_app }}/apis_rilec_fri/.env"
|
||||
src: "environment.j2"
|
||||
owner: apis
|
||||
group: apis
|
||||
mode: 0600
|
||||
force: yes
|
||||
notify: restart uwsgi
|
||||
|
||||
- name: Run migrations
|
||||
become: yes
|
||||
become_user: '{{ user }}'
|
||||
command:
|
||||
cmd: '{{ django_venv }}/bin/python {{ django_app }}/manage.py migrate --noinput'
|
||||
|
||||
- name: Collect static files
|
||||
command:
|
||||
cmd: '{{ django_venv }}/bin/python {{ django_app }}/manage.py collectstatic --noinput'
|
||||
|
||||
- name: Create superuser / set password
|
||||
become: yes
|
||||
become_user: '{{ user }}'
|
||||
command:
|
||||
cmd: '{{ django_venv }}/bin/python {{ django_app }}/manage.py shell --interface python'
|
||||
stdin: |
|
||||
import sys
|
||||
from django.contrib.auth.models import User
|
||||
username = '{{ password.admin_user }}'
|
||||
u, created = User.objects.get_or_create(username=username)
|
||||
u.set_password('{{ password.admin_pass }}')
|
||||
u.is_staff = True
|
||||
u.is_superuser = True
|
||||
u.save()
|
||||
register: result
|
||||
|
||||
#- name: Create admin user
|
||||
# shell: ". {{django_venv}}/bin/activate; cd {{ django_app }}; ./manage.py createsuperuser --noinput"
|
||||
|
||||
- name: Configure uwsgi
|
||||
copy:
|
||||
dest: /etc/uwsgi/
|
||||
src: uwsgi.ini
|
||||
notify: restart uwsgi
|
||||
|
||||
- name: Configure uwsgi instance
|
||||
template:
|
||||
dest: /etc/uwsgi/conf.d/apis-rilec.ini
|
||||
src: apis-rilec.ini.j2
|
||||
owner: apis
|
||||
group: apis
|
||||
|
||||
- name: Enable uwsgi
|
||||
service:
|
||||
name: uwsgi
|
||||
enabled: yes
|
||||
state: started
|
||||
|
||||
- name: Configure nginx instance
|
||||
template:
|
||||
dest: /etc/nginx/http.d/apis-rilec.conf
|
||||
src: nginx.conf.j2
|
||||
notify: reload nginx
|
||||
|
||||
- name: Process data every day
|
||||
cron:
|
||||
name: "delete data older than 1 month"
|
||||
job: ". {{django_venv}}/bin/activate; cd {{ django_app }}; ./manage.py clean -d 32"
|
||||
user: apis
|
||||
hour: "02"
|
||||
minute: "15"
|
||||
|
||||
- name: Create data update script
|
||||
template:
|
||||
dest: "{{ user_info.home }}/apis_process_data.sh"
|
||||
src: apis_process_data.sh.j2
|
||||
mode: 0775
|
||||
owner: apis
|
||||
group: apis
|
||||
|
||||
- name: Periodically update data
|
||||
cron:
|
||||
name: "Process data from apis"
|
||||
job: "{{ user_info.home }}/rilec_process_data.sh"
|
||||
user: apis
|
||||
hour: "19,7"
|
||||
minute: "00"
|
||||
|
||||
- name: Periodically delete data older than 1 month
|
||||
cron:
|
||||
name: "delete data older than 1 month"
|
||||
job: ". {{django_venv}}/bin/activate; cd {{ django_app }}; ./manage.py clean -d 32"
|
||||
user: apis
|
||||
hour: "02"
|
||||
minute: "15"
|
||||
|
||||
32
roles/rilec/templates/apis-rilec.ini.j2
Normal file
32
roles/rilec/templates/apis-rilec.ini.j2
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
[uwsgi]
|
||||
plugin = python3
|
||||
|
||||
uid = {{ user }}
|
||||
gid = {{ user }}
|
||||
|
||||
# the socket (use the full path to be safe
|
||||
socket = /run/apis-rilec.socket
|
||||
chown-socket = {{ user }}:nginx
|
||||
chmod-socket = 660
|
||||
|
||||
# log
|
||||
logto = /var/log/uwsgi/apis-rilec/apis-rilec.log
|
||||
|
||||
# Django-related settings
|
||||
# the base directory (full path)
|
||||
chdir = {{ django_app }}
|
||||
# Django's wsgi file
|
||||
module = apis_rilec_fri.wsgi
|
||||
# the virtualenv (full path)
|
||||
home = {{ django_venv }}
|
||||
|
||||
# process-related settings
|
||||
# master
|
||||
master = true
|
||||
# maximum number of worker processes
|
||||
processes = 16
|
||||
harakiri = 3600
|
||||
# clear environment on exit
|
||||
vacuum = true
|
||||
|
||||
|
||||
5
roles/rilec/templates/apis_process_data.sh.j2
Normal file
5
roles/rilec/templates/apis_process_data.sh.j2
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
#!/bin/sh
|
||||
. {{ django_venv }}/bin/activate
|
||||
{{ django_app }}/manage.py from_ldap
|
||||
{{ django_app }}/manage.py to_datasets
|
||||
{{ django_app }}/manage.py to_ldapobjects -gs -cd
|
||||
36
roles/rilec/templates/environment.j2
Normal file
36
roles/rilec/templates/environment.j2
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
DEBUG=false
|
||||
# DATABASE_URL="mysql://apisrilec:TestBazeZaApisRilec@127.0.0.1:3306/apisrilecphp"
|
||||
DATABASE_URL="postgresql://{{ user }}@localhost/{{database | default(user)}}"
|
||||
# mysql://USER:PASSWORD@HOST:PORT/NAME
|
||||
ALLOWED_HOSTS=apis-rilec.fri.uni-lj.si,apis-rilec.fri1.uni-lj.si,apis-rilec-php.fri1.uni-lj.si
|
||||
SECRET_KEY="{{ password.secret_key }}"
|
||||
PUBLIC_ROOT={{ user_info.home }}/www_data/apis-rilec
|
||||
|
||||
# APIS_X_API_KEY="ToleJeSkrivnost"
|
||||
X_API_KEY="{{ password.x_api_key }}"
|
||||
|
||||
STUDIS_API_TOKEN="{{ password.studis_api_token }}"
|
||||
STUDIS_API_BASE_URL=https://studisfri.uni-lj.si/api
|
||||
|
||||
#LDAP_SERVER_URI="ldap://212.235.188.28:389"
|
||||
# LDAP_SERVER_URI="ldap://dcv1fri1.fri1.uni-lj.si:389"
|
||||
# LDAP_BIND_DN="CN=ldap test,OU=ServiceAccounts,DC=test,DC=nodomain",
|
||||
# LDAP_BIND_DN="CN=LDAP\\, RILEC,OU=System,OU=admin,DC=fri1,DC=uni-lj,DC=si"
|
||||
# LDAP_BIND_PASSWORD="rUn.n-9pNUMq>7[}{AAF2W)f"
|
||||
|
||||
|
||||
LDAP_SERVER_URI="{{ ldap_server }}"
|
||||
LDAP_BIND_DN="{{ password.ldap_bind_dn }}"
|
||||
LDAP_BIND_PASSWORD="{{ password.ldap_bind_pass }}"
|
||||
LDAP_USER_SEARCH_BASE="OU=FRI,DC=fri1,DC=uni-lj,DC=si"
|
||||
LDAP_USER_SEARCH_SCOPE="SUBTREE"
|
||||
LDAP_START_TLS=true
|
||||
LDAP_OPT_X_TLS_REQUIRE_CERT="NEVER"
|
||||
|
||||
# AUTH_LDAP_USER_DN_TEMPLATE = "userPrincipalName=%(user)s,OU=Users,OU=FRI,DC=fri1,DC=uni-lj,DC=si"
|
||||
|
||||
LDAP_GROUP_SEARCH_BASE="OU=FRI,DC=fri1,DC=uni-lj,DC=si"
|
||||
AUTH_LDAP_MIRROR_GROUPS=true
|
||||
AUTH_LDAP_ALWAYS_UPDATE_USER=true
|
||||
AUTH_LDAP_CACHE_GROUPS=true
|
||||
AUTH_LDAP_GROUP_CACHE_TIMEOUT=3600
|
||||
25
roles/rilec/templates/nginx.conf.j2
Normal file
25
roles/rilec/templates/nginx.conf.j2
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
server {
|
||||
listen 443 ssl;
|
||||
listen [::]:443 ssl;
|
||||
server_name {{ dns_name }};
|
||||
|
||||
ssl_certificate /etc/letsencrypt/live/{{ dns_name }}/fullchain.pem;
|
||||
ssl_certificate_key /etc/letsencrypt/live/{{ dns_name }}/privkey.pem;
|
||||
|
||||
location @uwsgi {
|
||||
include uwsgi_params;
|
||||
uwsgi_pass unix:/run/apis-rilec.socket;
|
||||
}
|
||||
location / {
|
||||
# First attempt to serve request as file, then
|
||||
# as directory, then fall back to displaying a 404.
|
||||
alias {{ public_root }};
|
||||
try_files $uri @uwsgi;
|
||||
}
|
||||
location /media/ {
|
||||
alias {{ public_root }}/media/;
|
||||
}
|
||||
location /static/ {
|
||||
alias {{ public_root }}/static/;
|
||||
}
|
||||
}
|
||||
|
|
@ -1,3 +1,7 @@
|
|||
- name: reload smbd
|
||||
command: service smbd reload
|
||||
- name: reload samba
|
||||
command: service samba reload
|
||||
when: "'handler' not in ansible_skip_tags"
|
||||
|
||||
- name: reload nscd
|
||||
command: service nscd restart
|
||||
when: "'handler' not in ansible_skip_tags"
|
||||
|
|
|
|||
|
|
@ -1,31 +0,0 @@
|
|||
- name: Add influxdb repository
|
||||
deb822_repository:
|
||||
name: influxdata
|
||||
uris: https://repos.influxdata.com/debian
|
||||
suites: stable
|
||||
components: main
|
||||
architectures: amd64
|
||||
signed_by: https://repos.influxdata.com/influxdata-archive.key
|
||||
notify: update package cache
|
||||
|
||||
- meta: flush_handlers
|
||||
|
||||
- name: Install telegraf
|
||||
package:
|
||||
name: telegraf
|
||||
|
||||
- name: Configure telegraf
|
||||
when: not ansible_check_mode
|
||||
template:
|
||||
dest: /etc/telegraf/telegraf.d/output.conf
|
||||
src: output.conf.j2
|
||||
owner: telegraf
|
||||
group: telegraf
|
||||
mode: 0640
|
||||
notify: restart telegraf
|
||||
|
||||
- name: Enable telegraf
|
||||
service:
|
||||
name: telegraf
|
||||
enabled: true
|
||||
state: started
|
||||
|
|
@ -1,11 +1,22 @@
|
|||
- name: Get influxdb info
|
||||
set_fact:
|
||||
influxdb_info: '{{ lookup("passwordstore", "vm/"~influxdb_host, returnall=true, missing="empty") | from_yaml }}'
|
||||
|
||||
- name: Create influxdb token for this host
|
||||
include_tasks: token.yml
|
||||
when: 'not ansible_check_mode and "influxdb_token" not in password'
|
||||
|
||||
- name: Install telegraf on Debian
|
||||
include_tasks: debian.yml
|
||||
- name: Add telegraf package repo on Debian
|
||||
when: ansible_os_family == "Debian"
|
||||
deb822_repository:
|
||||
name: influxdata
|
||||
uris: https://repos.influxdata.com/debian
|
||||
suites: stable
|
||||
components: main
|
||||
architectures: amd64
|
||||
signed_by: https://repos.influxdata.com/influxdata-archive.key
|
||||
notify: update package cache
|
||||
|
||||
- meta: flush_handlers
|
||||
|
||||
- name: Install telegraf
|
||||
package:
|
||||
name: telegraf
|
||||
|
||||
- name: Enable telegraf service
|
||||
service:
|
||||
name: telegraf
|
||||
enabled: true
|
||||
state: started
|
||||
|
|
|
|||
|
|
@ -1,53 +0,0 @@
|
|||
- name: Get influxdb organization ID
|
||||
delegate_to: localhost
|
||||
uri:
|
||||
url: '{{ influxdb_info.influxdb_url }}/api/v2/orgs'
|
||||
headers:
|
||||
Authorization: Token {{ influxdb_info.influxdb_operator_token }}
|
||||
register: response
|
||||
|
||||
- name: Parse influxdb orgID
|
||||
set_fact:
|
||||
influxdb_orgID: '{{ response.json.orgs | selectattr("name", "==", influxdb_info.influxdb_org) | map(attribute="id") | first }}'
|
||||
|
||||
- name: Get influxdb bucket ID
|
||||
delegate_to: localhost
|
||||
uri:
|
||||
url: '{{ influxdb_info.influxdb_url }}/api/v2/buckets?orgID={{ influxdb_orgID }}'
|
||||
headers:
|
||||
Authorization: Token {{ influxdb_info.influxdb_operator_token }}
|
||||
register: response
|
||||
|
||||
- name: Parse influxdb bucketID
|
||||
set_fact:
|
||||
influxdb_bucketID: '{{ response.json.buckets | selectattr("name", "==", "servers") | map(attribute="id") | first }}'
|
||||
|
||||
- name: Create influxdb token
|
||||
delegate_to: localhost
|
||||
uri:
|
||||
url: '{{ influxdb_info.influxdb_url }}/api/v2/authorizations'
|
||||
method: POST
|
||||
body_format: json
|
||||
status_code: 201
|
||||
headers:
|
||||
Authorization: Token {{ influxdb_info.influxdb_operator_token }}
|
||||
Content-Type: application/json
|
||||
body: |
|
||||
{
|
||||
"description": "{{ inventory_hostname }}",
|
||||
"orgID": "{{ influxdb_orgID }}",
|
||||
"permissions": [{ "action": "write", "resource": { "type": "buckets", "id": "{{ influxdb_bucketID }}" } }]
|
||||
}
|
||||
register: response
|
||||
|
||||
- name: Parse influxdb token
|
||||
set_fact:
|
||||
influxdb_token: '{{ response.json.token }}'
|
||||
|
||||
# Ansible’s passwordstore lookup plugin should be able to do that but is pretty broken,
|
||||
# so we do it manually.
|
||||
- name: Store influxdb token in password store
|
||||
delegate_to: localhost
|
||||
command:
|
||||
cmd: 'pass insert --force --multiline {{ ("vm/" if is_virtual else "host/")~inventory_hostname }}'
|
||||
stdin: '{{ password | to_nice_yaml(sort_keys=false) }}influxdb_token: {{ influxdb_token }}'
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
[[outputs.influxdb_v2]]
|
||||
urls = ["{{ influxdb_info.influxdb_url }}"]
|
||||
organization = "{{ influxdb_info.influxdb_org }}"
|
||||
bucket = "{{ influxdb_info.influxdb_bucket }}"
|
||||
token = "{{ influxdb_token | default(password.influxdb_token) }}"
|
||||
|
|
@ -1,3 +1,9 @@
|
|||
Set up a generic Windows host.
|
||||
|
||||
Rename and configure network interfaces. Configure the SSH server.
|
||||
|
||||
If `windows_exporter_version` is set, Prometheus [windows_exporter](https://github.com/prometheus-community/windows_exporter) will be installed and configured on port 9100.
|
||||
|
||||
Before first run, set the network profile to private and start the SSH server manually with
|
||||
|
||||
start-service sshd
|
||||
|
|
|
|||
|
|
@ -3,3 +3,9 @@
|
|||
name: sshd
|
||||
state: restarted
|
||||
when: "'handler' not in ansible_skip_tags"
|
||||
|
||||
- name: restart windows_exporter
|
||||
win_service:
|
||||
name: windows_exporter
|
||||
state: restarted
|
||||
when: "'handler' not in ansible_skip_tags"
|
||||
|
|
|
|||
|
|
@ -45,3 +45,7 @@
|
|||
name: sshd
|
||||
start_mode: auto
|
||||
state: started
|
||||
|
||||
- name: Install windows-exporter for prometheus metrics
|
||||
when: 'windows_exporter_version is defined'
|
||||
include_tasks: windows_exporter.yml
|
||||
|
|
|
|||
25
roles/windows/tasks/windows_exporter.yml
Normal file
25
roles/windows/tasks/windows_exporter.yml
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
- name: Check current windows_exporter version
|
||||
win_command: '"c:\program files\windows_exporter\windows_exporter.exe" --version'
|
||||
failed_when: false
|
||||
changed_when: false
|
||||
register: result
|
||||
|
||||
- when: '"stderr" not in result or "version "+windows_exporter_version not in result.stderr'
|
||||
block:
|
||||
- name: Download windows_exporter
|
||||
win_get_url:
|
||||
url: "https://github.com/prometheus-community/windows_exporter/releases/download/v{{ windows_exporter_version }}/windows_exporter-{{ windows_exporter_version }}-amd64.msi"
|
||||
dest: 'c:\windows\temp\windows_exporter.msi'
|
||||
force: true
|
||||
|
||||
- name: Install Windows exporter
|
||||
win_package:
|
||||
path: 'c:\windows\temp\windows_exporter.msi'
|
||||
arguments: "LISTEN_PORT=9100"
|
||||
notify: restart windows_exporter
|
||||
|
||||
- name: Enable windows_exporter service
|
||||
win_service:
|
||||
name: windows_exporter
|
||||
start_mode: delayed
|
||||
state: started
|
||||
11
setup.yml
11
setup.yml
|
|
@ -16,12 +16,12 @@
|
|||
- hosts: ceph-*
|
||||
roles:
|
||||
- frr
|
||||
- telegraf
|
||||
- ceph
|
||||
|
||||
- hosts: mgmt-gw
|
||||
roles:
|
||||
- radvd # we are router for mgmt networks
|
||||
- collector
|
||||
|
||||
- hosts: proxmox-backup
|
||||
roles:
|
||||
|
|
@ -96,3 +96,12 @@
|
|||
roles:
|
||||
- opensmtpd
|
||||
- friwall
|
||||
|
||||
- hosts: rilec
|
||||
roles:
|
||||
- postgres
|
||||
- rilec
|
||||
vars:
|
||||
user: apis
|
||||
ldap_server: ldap://dcv1fri1.fri1.uni-lj.si:389
|
||||
tls_domains: ['apis-rilec-php.fri1.uni-lj.si', "apis-rilec.fri1.uni-lj.si"]
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue