update files for db configs and witness
This commit is contained in:
parent
6c2fa71f30
commit
7e283f76b5
4 changed files with 201 additions and 12 deletions
130
~host-setups/SQL_DB_VMs/VM_Setup.sh
Normal file
130
~host-setups/SQL_DB_VMs/VM_Setup.sh
Normal file
|
|
@ -0,0 +1,130 @@
|
|||
##Common pre-setup
|
||||
wget -O /var/lib/vz/template/iso/debian-12-generic-amd64.qcow2 https://cloud.debian.org/images/cloud/bookworm/latest/debian-12-generic-amd64.qcow2
|
||||
apt update && apt install -y libguestfs-tools
|
||||
virt-customize -a /var/lib/vz/template/iso/debian-12-generic-amd64.qcow2 --install qemu-guest-agent
|
||||
|
||||
##PVE1 steps
|
||||
qm create 800 --name "debian-12-db-template" --memory 4096 --cores 2 --net0 virtio,bridge=vmbr0,tag=201
|
||||
qm importdisk 800 /var/lib/vz/template/iso/debian-12-generic-amd64.qcow2 pve1-hdd01-01
|
||||
qm set 800 --scsihw virtio-scsi-pci --scsi0 pve1-hdd01-01:vm-800-disk-0
|
||||
qm set 800 --boot c --bootdisk scsi0
|
||||
qm set 800 --ide2 pve1-hdd01-01:cloudinit
|
||||
qm set 800 --serial0 socket --vga serial0
|
||||
qm set 800 --agent enabled=1
|
||||
qm resize 800 scsi0 32G
|
||||
qm template 800
|
||||
qm clone 800 151 --name db-node-02
|
||||
qm set 151 --ipconfig0 ip=172.16.201.151/24,gw=172.16.201.1
|
||||
qm start 151
|
||||
|
||||
|
||||
##PVE2 steps
|
||||
qm create 801 --name "debian-12-db-template" --memory 4096 --cores 2 --net0 virtio,bridge=vmbr0,tag=201
|
||||
qm importdisk 801 /var/lib/vz/template/iso/debian-12-generic-amd64.qcow2 pve2-ssd01-01
|
||||
qm set 801 --scsihw virtio-scsi-pci --scsi0 pve2-ssd01-01:vm-801-disk-0
|
||||
qm set 801 --boot c --bootdisk scsi0
|
||||
qm set 801 --ide2 pve2-ssd01-01:cloudinit
|
||||
qm set 801 --serial0 socket --vga serial0
|
||||
qm set 801 --agent enabled=1
|
||||
qm resize 801 scsi0 32G
|
||||
qm template 801
|
||||
qm clone 801 152 --name db-node-01
|
||||
qm set 152 --ipconfig0 ip=172.16.201.152/24,gw=172.16.201.1
|
||||
qm start 152
|
||||
|
||||
##cloud-init setup on both
|
||||
user:
|
||||
password:
|
||||
domain:
|
||||
dns server:
|
||||
local ssh public key (workstation)
|
||||
--regenerate-- button at top
|
||||
--start vm--
|
||||
|
||||
##Common Setup
|
||||
apt update && apt install -y mariadb-server
|
||||
|
||||
##.151 config file
|
||||
{/etc/mysql/mariadb.conf.d/50-galera.cnf}
|
||||
[mysqld]
|
||||
wsrep_on = ON
|
||||
wsrep_provider = /usr/lib/libgalera_smm.so
|
||||
wsrep_cluster_name = "mgdev1"
|
||||
wsrep_cluster_address = "gcomm://172.16.201.151,172.16.201.152,172.16.201.250"
|
||||
wsrep_node_address = "172.16.201.151"
|
||||
binlog_format = row
|
||||
default_storage_engine = InnoDB
|
||||
innodb_autoinc_lock_mode = 2
|
||||
bind-address = 0.0.0.0
|
||||
|
||||
##.152 config file
|
||||
{/etc/mysql/mariadb.conf.d/50-galera.cnf}
|
||||
[mysqld]
|
||||
wsrep_on = ON
|
||||
wsrep_provider = /usr/lib/libgalera_smm.so
|
||||
wsrep_cluster_name = "mgdev1"
|
||||
wsrep_cluster_address = "gcomm://172.16.201.151,172.16.201.152,172.16.201.250"
|
||||
wsrep_node_address = "172.16.201.152"
|
||||
binlog_format = row
|
||||
default_storage_engine = InnoDB
|
||||
innodb_autoinc_lock_mode = 2
|
||||
bind-address = 0.0.0.0
|
||||
# Only needed on Node 1 for the very first launch
|
||||
wsrep_new_cluster
|
||||
|
||||
##Deploy steps
|
||||
1. run on primary node (with new cluster flag first) - "sudo systemctl enable --now mariadb"
|
||||
2. run these to validate that it has joined the pi witness
|
||||
-- systemctl status mariadb
|
||||
-- mariadb -e "SHOW STATUS LIKE 'wsrep_cluster_size';"
|
||||
3. Make sure pi has correct cluster and ip's
|
||||
4. use "systemctl restart garb" and "systemctl status garb" to confirm status
|
||||
5. once cluster has pi and primary node then comment out the "wsrep_new_cluster" line in the 50-galera.cnf on the primary node
|
||||
6. run on the second node: "sudo systemctl enable --now mariadb" and use commands from step 3 to validate that there are 3 nodes
|
||||
|
||||
##Layer on Keepalived on both VMs
|
||||
##Common keepalived install steps on both VMs
|
||||
sudo apt update && sudo apt install -y keepalived
|
||||
|
||||
##.151 keepalived config
|
||||
nano /etc/keepalived/keepalived.conf
|
||||
#paste:
|
||||
vrrp_instance VI_1 {
|
||||
state MASTER
|
||||
interface eth0
|
||||
virtual_router_id 15
|
||||
priority 100
|
||||
advert_int 1
|
||||
authentication {
|
||||
auth_type PASS
|
||||
auth_pass 42
|
||||
}
|
||||
virtual_ipaddress {
|
||||
172.16.201.150/24
|
||||
}
|
||||
}
|
||||
|
||||
##.152 keepalived config
|
||||
nano /etc/keepalived/keepalived.conf
|
||||
#paste:
|
||||
vrrp_instance VI_1 {
|
||||
state BACKUP
|
||||
interface eth0
|
||||
virtual_router_id 15
|
||||
priority 90
|
||||
advert_int 1
|
||||
authentication {
|
||||
auth_type PASS
|
||||
auth_pass 42
|
||||
}
|
||||
virtual_ipaddress {
|
||||
172.16.201.150/24
|
||||
}
|
||||
}
|
||||
|
||||
##keepalived start - both vms
|
||||
sudo systemctl enable --now keepalived
|
||||
sudo systemctl status keepalived
|
||||
|
||||
##test from anywhere else
|
||||
ping 172.16.201.150
|
||||
45
~host-setups/glusterFS_install.sh
Normal file
45
~host-setups/glusterFS_install.sh
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
##run on PVE1
|
||||
lvcreate -V 50G -T pve/data -n gluster_brick
|
||||
mkfs.xfs /dev/pve/gluster_brick
|
||||
mkdir -p /data/gluster
|
||||
echo "/dev/pve/gluster_brick /data/gluster xfs defaults 0 0" >> /etc/fstab
|
||||
mount -a
|
||||
mkdir -p /data/gluster/git_vol
|
||||
|
||||
##run on PVE2
|
||||
lvcreate -V 50G -T pve-SSD1/data -n gluster_brick
|
||||
mkfs.xfs /dev/pve-SSD1/gluster_brick
|
||||
mkdir -p /data/gluster
|
||||
echo "/dev/pve-SSD1/gluster_brick /data/gluster xfs defaults 0 0" >> /etc/fstab
|
||||
mount -a
|
||||
mkdir -p /data/gluster/git_vol
|
||||
|
||||
##common tasks - run on both PVE's
|
||||
apt update && apt install -y glusterfs-server
|
||||
systemctl enable --now glusterd
|
||||
systemctl status glusterd
|
||||
|
||||
##Connect PVE1 to PVE2
|
||||
gluster peer probe 172.16.201.106
|
||||
|
||||
##Connect PVE1 (and 2 by proxy) to the Pi (Witness)
|
||||
gluster peer probe 172.16.201.250
|
||||
|
||||
#Validate that the peers see each other
|
||||
gluster peer status
|
||||
|
||||
##run on PVE1 to create the replica
|
||||
gluster volume create git_vol replica 3 arbiter 1 \
|
||||
172.16.201.206:/data/gluster/git_vol \
|
||||
172.16.201.106:/data/gluster/git_vol \
|
||||
172.16.201.250:/data/gluster/witness \
|
||||
force
|
||||
|
||||
##run on PVE1 to start the volume
|
||||
gluster volume start git_vol
|
||||
|
||||
##final mounts on both PVEs
|
||||
mkdir -p /mnt/shared/git
|
||||
echo "localhost:/git_vol /mnt/shared/git glusterfs defaults,_netdev 0 0" >> /etc/fstab
|
||||
mount -a
|
||||
df -h /mnt/shared/git
|
||||
26
~host-setups/pi5/PI_setup.sh
Normal file
26
~host-setups/pi5/PI_setup.sh
Normal file
|
|
@ -0,0 +1,26 @@
|
|||
##Pi base config
|
||||
1. Install pi OS lite
|
||||
2. set static ip on same subnet as maria
|
||||
sudo nmcli con add type vlan con-name vlan201 ifname vlan201 dev eth0 id 201 ip4 172.16.201.250/24 ipv4.method manual ipv4.never-default yes
|
||||
ip addr show vlan201
|
||||
ping 172.16.201.151
|
||||
ping 172.16.201.152
|
||||
|
||||
##glusterfs install
|
||||
sudo apt install -y glusterfs-server
|
||||
sudo systemctl enable --now glusterd
|
||||
sudo mkdir -p /data/gluster/witness
|
||||
sudo systemctl status glusterd --no-pager
|
||||
mkdir /data
|
||||
mkdir /data/gluster
|
||||
|
||||
##galera arbitratot install
|
||||
mkdir ~/garbd
|
||||
sudo apt install -y galera-arbitrator-4
|
||||
sudo nano /etc/default/garb
|
||||
#Paste This:
|
||||
GALERA_NODES="172.16.201.151:4567 172.16.201.152:4567 172.16.201.250:4567"
|
||||
GALERA_GROUP="mgdev1"
|
||||
GALERA_OPTIONS="pc.wait_prim=no"
|
||||
sudo systemctl enable --now garb
|
||||
sudo systemctl status garb
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
services:
|
||||
garbd:
|
||||
image: mariadb:11.4
|
||||
container_name: galera-arbitrator
|
||||
restart: always
|
||||
network_mode: host
|
||||
command:
|
||||
- garbd
|
||||
- --group=mgdev1
|
||||
# Must list ALL cluster nodes: PVE1, PVE2, and ITSELF (optional but good practice)
|
||||
- --address=gcomm://172.16.201.206,172.16.201.106,172.16.201.250
|
||||
- --options=pc.wait_prim=no
|
||||
Loading…
Reference in a new issue