265 lines
6.8 KiB
Text
265 lines
6.8 KiB
Text
----first host setup-------
|
|
|
|
##backup the db
|
|
cd /docker/management
|
|
mkdir ./backup
|
|
chmod 777 ./backup
|
|
docker run --rm --network container:ferretdb -v $(pwd)/backup:/backup mongo:6.0 mongodump --uri="mongodb://admin:admin@127.0.0.1:27017/komodo" --out=/backup
|
|
|
|
# CRITICAL SAFETY CHECK:
|
|
# Ensure these files actually exist and have size > 0
|
|
ls -lh ./backup/komodo/
|
|
# If you see empty folders or no files, STOP. Do not proceed to step 2.
|
|
|
|
##destroy the containers
|
|
docker compose down -v
|
|
|
|
##change compose to this
|
|
services:
|
|
mongo:
|
|
image: mongo:7.0
|
|
container_name: mongo
|
|
restart: always
|
|
network_mode: host
|
|
command: ["--replSet", "rs0", "--bind_ip_all", "--port", "27017"]
|
|
volumes:
|
|
- /docker/management/mongodb:/data/db
|
|
|
|
komodo:
|
|
# REVERT TO MOGHTECH
|
|
image: ghcr.io/moghtech/komodo-core:latest
|
|
container_name: komodo
|
|
cpus: 2.0
|
|
mem_limit: "2048m"
|
|
mem_reservation: "512m"
|
|
network_mode: host
|
|
env_file:
|
|
- /docker/management/.env
|
|
volumes:
|
|
- /var/run/docker.sock:/var/run/docker.sock:ro
|
|
- /root/.ssh:/home/komodo/.ssh:ro
|
|
- /docker/management/komodo/config:/config
|
|
- /docker/management/komodo/backups:/backups
|
|
- /docker/management/komodo/core-etc:/etc/komodo
|
|
- /docker/management/komodo/var:/var/lib/komodo
|
|
- /docker/management/komodo/repo-cache:/repo-cache
|
|
- /docker:/docker
|
|
environment:
|
|
# CHANGE TO 'URI'
|
|
# This tells Komodo: "Here is the full connection string, don't try to parse commas."
|
|
- KOMODO_DATABASE_URI=mongodb://127.0.0.1:27017/komodo?directConnection=true&replicaSet=rs0
|
|
|
|
# Explicitly unset ADDRESS to avoid conflicts
|
|
- KOMODO_DATABASE_ADDRESS=
|
|
depends_on:
|
|
- mongo
|
|
restart: unless-stopped
|
|
labels:
|
|
- "komodo.skip=true"
|
|
|
|
|
|
## up the mongodb service
|
|
docker compose up -d mongo
|
|
|
|
## restore the data to the container
|
|
docker run --rm --network host -v $(pwd)/backup:/backup mongo:6.0 mongorestore --uri="mongodb://127.0.0.1:27017/komodo" /backup/komodo
|
|
|
|
##make sure that mongo knows it's the primary
|
|
docker exec -it mongo mongosh --eval 'rs.initiate({_id: "rs0", members: [{ _id: 0, host: "172.16.201.201:27017" }]})'
|
|
|
|
##make sure the framework is done
|
|
##make sure mongo connection string is active in compose
|
|
docker compose up -d komodo
|
|
|
|
##open gui to confirm it works
|
|
|
|
##restore the data into the framework in mongo
|
|
docker run --rm --user 0:0 --network host -v $(pwd)/backup:/backup mongo:6.0 mongorestore --uri="mongodb://127.0.0.1:27017/komodo?replicaSet=rs0" /backup/komodo
|
|
|
|
##compose up the komodo
|
|
docker compose up -d komodo
|
|
|
|
##validate
|
|
|
|
----second host base setup-------
|
|
|
|
##get the env setup
|
|
mkdir /docker/managment/mongo
|
|
|
|
##deploy the same docker-compose.yml as above
|
|
##but only start the mongo for now
|
|
docker compose up -d mongo
|
|
|
|
----first host replica setup-------
|
|
|
|
##get into the original mongo
|
|
docker exec -it mongo mongosh
|
|
|
|
##add the new mongo member
|
|
rs.add("172.16.201.106")
|
|
|
|
##check status
|
|
rs.status()
|
|
|
|
|
|
----komodo redeploy-------
|
|
|
|
##change komodo compose file on both to have this as DB URI
|
|
environment:
|
|
# LIST BOTH IPs
|
|
# This allows the driver to failover automatically.
|
|
- KOMODO_DATABASE_URI=mongodb://172.16.201.206:27017,172.16.201.106:27017/komodo?replicaSet=rs0
|
|
|
|
##redeploy the original komodo
|
|
docker compose down komodo && docker compose up -d komodo
|
|
|
|
##deploy the secondary
|
|
docker compose up -d komodo
|
|
|
|
----second host replica check-------
|
|
|
|
##get into the second mongo
|
|
docker exec -it mongo mongosh
|
|
|
|
##do a secondary check
|
|
db.getMongo().setReadPref('secondary')
|
|
|
|
#create a stack called "replica-test"
|
|
|
|
##check that the data exists
|
|
use komodo
|
|
db.Stack.find({name: "replica-test"})
|
|
|
|
----add arbiter-------
|
|
|
|
##already deployed on pve1-lxc2 - redeploy
|
|
-->destroy the container
|
|
-->delete the mongo-arbiter folder in pve-lxc2/docker
|
|
--> use komodo to redeploy the mongo-arbiter
|
|
|
|
##do the following on whichever mongo is primary to add the arbiter
|
|
docker exec -it mongo mongosh
|
|
db.adminCommand({
|
|
setDefaultRWConcern: 1,
|
|
defaultWriteConcern: { w: 1 }
|
|
})
|
|
##add the arbiter running on pve1-lxc2
|
|
rs.addArb("172.16.201.102:27017")
|
|
|
|
##check DB HA
|
|
rs.status()
|
|
|
|
----now do the lsyncd-------
|
|
|
|
##make sure pve1-lxc6 has rsync
|
|
apt update && apt install rsync -y
|
|
|
|
##do on PVE2-lxc6
|
|
##install lsyncd and rsync
|
|
apt update && apt install lsyncd rsync -y
|
|
|
|
##generate keys
|
|
ssh-keygen -t rsa -b 4096
|
|
|
|
##lock down the keys - ensure root owns it
|
|
chown root:root /etc/ssh/ssh_config.d/20-systemd-ssh-proxy.conf
|
|
|
|
##lock down the keys - Ensure only root can write to it
|
|
chmod 644 /etc/ssh/ssh_config.d/20-systemd-ssh-proxy.conf
|
|
|
|
##May have to do a keygen on the target host
|
|
ssh-keygen -A
|
|
|
|
##send key to pve1-lxc6
|
|
ssh-copy-id root@172.16.201.101
|
|
|
|
## test
|
|
ssh root@172.16.201.101
|
|
|
|
##if it asks for password do this on the other lxc
|
|
chmod 700 /root/.ssh
|
|
chmod 600 /root/.ssh/authorized_keys
|
|
chown -R root:root /root/.ssh
|
|
|
|
##create config file
|
|
mkdir /etc/lsyncd
|
|
nano /etc/lsyncd/lsyncd.conf.lua
|
|
|
|
##paste this
|
|
settings {
|
|
logfile = "/var/log/lsyncd/lsyncd.log",
|
|
statusFile = "/var/log/lsyncd/lsyncd.status",
|
|
nodaemon = false,
|
|
}
|
|
|
|
-- The Sync Configuration
|
|
sync {
|
|
default.rsync,
|
|
|
|
-- The folder on THIS server to watch
|
|
-- CHECK: Is this mapped to /repo in your compose?
|
|
source = "/repo/mapletree-pve2lxc6/management/komodo",
|
|
|
|
-- The Destination
|
|
target = "root@172.16.201.106:/repo/mapletree-pve1lxc6/management/komodo",
|
|
|
|
-- Exclude temporary files
|
|
exclude = { '.git', '*.tmp', '*.bak' },
|
|
|
|
-- Rsync Options (Archive mode, Compress, Delete files on target if deleted on source)
|
|
rsync = {
|
|
archive = true,
|
|
compress = true,
|
|
verbose = true,
|
|
_extra = { "--omit-dir-times" }
|
|
}
|
|
}
|
|
|
|
##make sure the service is gunna work
|
|
mkdir -p /var/log/lsyncd
|
|
touch /var/log/lsyncd/lsyncd.log
|
|
touch /var/log/lsyncd/lsyncd.status
|
|
|
|
set the stack compose variables to (respectively):
|
|
REPO_ROOT=/repo/mapletree-pve2lxc6/management/komodo
|
|
REPO_ROOT=/repo/mapletree-pve1lxc6/management/komodo
|
|
|
|
##change the komodo compose block to include the repo...
|
|
volumes:
|
|
- ${REPO_ROOT}:/repo
|
|
|
|
##redeploy the komodos
|
|
--> may have to do this cli
|
|
|
|
##start the sync
|
|
systemctl restart lsyncd
|
|
systemctl status lsyncd
|
|
systemctl enable --now lsyncd
|
|
|
|
##the check (run on pve2 - 2 sessions)
|
|
tail -f /var/log/lsyncd/lsyncd.log
|
|
touch /repo/mapletree-pve2lxc6/management/komodo/sync_test.txt
|
|
|
|
##check on PVE1
|
|
ls -l /repo/mapletree-pve1lxc6/management/komodo/
|
|
|
|
|
|
----now do the keepalived setup-------
|
|
|
|
##Make sure the LXC's are configured for the IP:
|
|
echo "net.ipv4.ip_nonlocal_bind=1" >> /etc/sysctl.conf
|
|
sysctl -p
|
|
|
|
##make sure the keepalive folder is created and mounted into the periphery
|
|
mkdir /docker/keepalived || true
|
|
nano /root/periphery/docker-compose.yml
|
|
|
|
##don't forget to compose down/up the periphery
|
|
docker compose down && docker compose up -d
|
|
|
|
##deploy the containers
|
|
##make sure the vars are done
|
|
--> in Komodo
|
|
|
|
|
|
|