Add ceph role
Just prepares the servers, all management is then done through cephadm.
This commit is contained in:
parent
832be31e21
commit
5038411af3
7
roles/ceph/handlers/main.yml
Normal file
7
roles/ceph/handlers/main.yml
Normal file
|
@ -0,0 +1,7 @@
|
|||
- name: reboot
|
||||
reboot:
|
||||
|
||||
- name: reload nftables
|
||||
service:
|
||||
name: nftables
|
||||
state: reloaded
|
16
roles/ceph/tasks/firewall.yml
Normal file
16
roles/ceph/tasks/firewall.yml
Normal file
|
@ -0,0 +1,16 @@
|
|||
- name: Install nftables
|
||||
package:
|
||||
name: nftables
|
||||
|
||||
- name: Configure nftables
|
||||
template:
|
||||
dest: /etc/nftables.conf
|
||||
src: nftables.conf.j2
|
||||
mode: 0644
|
||||
notify: reload nftables
|
||||
|
||||
- name: Enable nftables
|
||||
service:
|
||||
name: nftables
|
||||
enabled: true
|
||||
state: started
|
38
roles/ceph/tasks/main.yml
Normal file
38
roles/ceph/tasks/main.yml
Normal file
|
@ -0,0 +1,38 @@
|
|||
- name: Configure /etc/hosts
|
||||
template:
|
||||
dest: /etc/hosts
|
||||
src: hosts.j2
|
||||
|
||||
- name: Get cluster
|
||||
set_fact:
|
||||
cluster: "{{ query('netbox.netbox.nb_lookup', 'clusters', raw_data=true, api_filter='name='~cluster) | first }}"
|
||||
|
||||
- name: Generate my SSH key
|
||||
openssh_keypair:
|
||||
path: /root/.ssh/id_ed25519
|
||||
type: ed25519
|
||||
comment: "root@{{ ansible_hostname }}"
|
||||
register: my_key
|
||||
|
||||
- name: Deploy my key on other nodes
|
||||
authorized_key:
|
||||
user: root
|
||||
key: "{{ my_key.public_key }}"
|
||||
comment: "{{ my_key.comment }}"
|
||||
delegate_to: "{{ item }}"
|
||||
loop: "{{ query('netbox.netbox.nb_lookup', 'devices', api_filter='cluster_id='~cluster.id, raw_data=true) | map(attribute='name') }}"
|
||||
|
||||
- name: Install required packages
|
||||
package:
|
||||
name:
|
||||
- lvm2
|
||||
- podman
|
||||
|
||||
- name: Install cephadm script
|
||||
get_url:
|
||||
url: https://download.ceph.com/rpm-{{ ceph_version }}/el9/noarch/cephadm
|
||||
checksum: sha512:dc9bfed5b56c3756720e935a1520ccffe4ecf5e37502fe60a6ff835b98da8afe23a0ecab3770698c10ce1278004c788091eed9b4651d1c52f8d05729b4036c12
|
||||
dest: /usr/local/bin/cephadm
|
||||
mode: 0755
|
||||
|
||||
- include_tasks: firewall.yml
|
11
roles/ceph/templates/hosts.j2
Normal file
11
roles/ceph/templates/hosts.j2
Normal file
|
@ -0,0 +1,11 @@
|
|||
127.0.0.1 localhost
|
||||
::1 localhost ip6-localhost ip6-loopback
|
||||
ff02::1 ip6-allnodes
|
||||
ff02::2 ip6-allrouters
|
||||
|
||||
{% set my_cluster = query('netbox.netbox.nb_lookup', 'clusters', raw_data=true, api_filter='name='~cluster) | first %}
|
||||
{% for host in query('netbox.netbox.nb_lookup', 'devices', raw_data=true, api_filter='cluster_id='~my_cluster.id) | map(attribute='name') %}
|
||||
{% for address in (hostvars[host].interfaces | selectattr('name', 'equalto', 'lo') | map(attribute='ip_addresses') | first) %}
|
||||
{{ address.address | ipaddr('address') }} {{ host }}
|
||||
{% endfor %}
|
||||
{% endfor %}
|
33
roles/ceph/templates/nftables.conf.j2
Normal file
33
roles/ceph/templates/nftables.conf.j2
Normal file
|
@ -0,0 +1,33 @@
|
|||
#!/usr/sbin/nft -f
|
||||
|
||||
flush ruleset
|
||||
|
||||
# Just a temporary filter until we get our shit together policy-wise.
|
||||
table inet filter {
|
||||
set allowed {
|
||||
type ipv4_addr
|
||||
flags interval
|
||||
elements = { 10.32.0.0/14, 193.2.76.176/24, 192.168.19.0/24, 192.168.251.0/24 }
|
||||
}
|
||||
|
||||
chain input {
|
||||
type filter hook input priority filter; policy drop
|
||||
|
||||
ct state vmap { invalid : drop, established : accept, related : accept }
|
||||
iif lo accept
|
||||
|
||||
# BGP / BFD sessions
|
||||
iif lan0 ip6 saddr fe80::/64 accept
|
||||
iif lan1 ip6 saddr fe80::/64 accept
|
||||
|
||||
ip saddr @allowed accept
|
||||
}
|
||||
|
||||
chain forward {
|
||||
type filter hook forward priority filter; policy accept
|
||||
}
|
||||
|
||||
chain output {
|
||||
type filter hook output priority filter; policy accept
|
||||
}
|
||||
}
|
Loading…
Reference in a new issue