bunch of stuff in initial commit

This commit is contained in:
Craig McDaniel
2025-09-23 10:54:10 -05:00
commit 9fbc496fa2
24 changed files with 1398 additions and 0 deletions

11
ansible/ansible.cfg Normal file
View File

@@ -0,0 +1,11 @@
[defaults]
ansible_user = theboss
host_key_checking = False
inventory = inventory
retry_files_enabled = False
[ssh_connection]
ssh_args = -o ControlMaster=auto -o ControlPersist=60s -o UserKnownHostsFile=/dev/null -o IdentitiesOnly=no -o StrictHostKeyChecking=no
[inventory]
enable_plugins = host_list, script, yaml, ini, community.docker.docker_containers, aws_ec2

71
ansible/build/Dockerfile Normal file
View File

@@ -0,0 +1,71 @@
# Dockerfile
# @author Craig McDaniel
#
# Set up an image that we can run Ansible inside of it for building and deploying.
#
FROM debian:trixie-slim
# This makes sure any apt config scripts never ask us anything
ENV DEBIAN_FRONTEND=noninteractive
# Set timezone to UTC
ENV TZ=UTC
RUN ln -snf /usr/share/zoneinfo/$TZ /etc/localtime && echo $TZ > /etc/timezone
# busnet group/user
RUN groupadd -g 1001 busnet \
&& useradd -u 1001 -g 1001 -d /opt/busnet -s /bin/bash busnet \
&& mkdir /opt/busnet \
&& chown -R busnet:busnet /opt/busnet
# This sounds scarier than it is. It allows us to install Python libraries with pip directly.
# It makes more sense when you install this on your main OS, but I want full control over this
# container image. -CM
ENV PIP_BREAK_SYSTEM_PACKAGES 1
# Install packages that we need
RUN apt-get update
#RUN apt-get -y install software-properties-common apt-transport-https apt-utils ca-certificates sshpass
RUN apt-get -y install ca-certificates sshpass gpg python3-minimal python3-pip python3-docker git \
rsync curl less netcat-openbsd
RUN pip3 install requests botocore boto3
# Use pip to get the latest version of Ansible and libs
RUN pip3 install ansible cryptography
# Install some Ansible modules.
RUN ansible-galaxy collection install community.docker
# Install just the docker CLI, not anything else. We use the CLI to connect to the host OS docker daemon.
#RUN curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg \
# && echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null \
# && apt-get update \
# && apt-get install -y docker-ce-cli \
# && pip install docker
# Install just the docker CLI, not anything else. We use the CLI to connect to the host OS docker daemon.
RUN . /etc/os-release \
&& curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg \
&& echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian ${VERSION_CODENAME} stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null \
&& apt-get update \
&& apt-get install -y docker-ce-cli \
&& pip install docker
# Clean up packages that we don't need after installing things
#RUN apt-get -y remove software-properties-common apt-transport-https apt-utils build-essential python3-pip \
# && apt-get -y autoremove \
# && apt-get -y clean
# Remove docs and man pages
RUN rm -rf /usr/share/doc \
&& rm -rf /usr/share/man \
&& rm -rf /usr/share/locale
# Ansible environment variables
# Note: It is assumed the container mount point "/opt/busnet/git/" will be provided at runtime via a bind mount.
ENV ANSIBLE_ROLES_PATH=/opt/busnet/git/devops/ansible/roles
ENV ANSIBLE_INVENTORY=/opt/busnet/git/devops/ansible/inventory
WORKDIR /opt/busnet/git/devops/ansible

View File

@@ -0,0 +1,9 @@
# Default vars file for all hosts. These are used by all, and in many cases are just placeholder
# values that are overridden if and when needed.
---
###################################################################################################
# SSH information
###################################################################################################
#ansible_user: root
ansible_port: 22

2
ansible/inventory/hosts Normal file
View File

@@ -0,0 +1,2 @@
[all]
scanner.busnet

View File

@@ -0,0 +1,58 @@
# @author Craig McDaniel
#
---
- name: Bootstrap a server for use for the first time.
become: true
gather_facts: true
hosts: "{{hostname}}"
tasks:
- name: set timezone to UTC
timezone:
name: UTC
- name: Create busnet system group
group:
name: busnet
gid: 31337
state: present
system: yes
- name: Create busnet system user
user:
name: busnet
group: busnet
uid: 31337
state: present
system: yes
home: /opt/busnet
create_home: no
- name: Create directories
file:
path: "{{item}}"
state: directory
owner: busnet
group: busnet
mode: u=rwx,g=rwx,o=rx
with_items:
- /opt/busnet
- name: Update all packages to the latest version
apt:
upgrade: yes
update_cache: yes
- name: Install base packages with apt
apt:
name: "{{ packages }}"
state: present
vars:
packages:
- build-essential
- autoconf
- whois
- traceroute
- mtr
- python3
- docker.io

View File

@@ -0,0 +1 @@
../../templates/

View File

@@ -0,0 +1,16 @@
# scanner.busnet
#
---
- name: Install and Configure Direwolf on scanner.busnet
hosts: scanner.busnet
become: true
gather_facts: true
tasks:
#- name: Hamlib
# ansible.builtin.import_role:
# name: hamlib
- name: Direwolf
ansible.builtin.import_role:
name: scanner_direwolf

View File

@@ -0,0 +1 @@
../../roles/

View File

@@ -0,0 +1 @@
../../templates/

View File

@@ -0,0 +1,5 @@
---
# By default, we use the ansible distribution release variable, but you might need to override it.
zabbix_distribution: "{{ansible_distribution}}" # "Ubuntu', 'Debian', etc...
zabbix_distribution_release: "{{ansible_distribution_release}}" # 'focial', 'bionic', etc...

View File

@@ -0,0 +1,40 @@
# Bootstrap a docker container image
#
- name: Create busnet system group
group:
name: busnet
gid: 31337
state: present
system: yes
- name: Create busnet system user
user:
name: busnet
group: busnet
uid: 31337
state: present
system: yes
home: /opt/busnet
create_home: no
- name: Make sure ubuntu user has busnet group
user:
name: ubuntu
group: busnet
state: present
- name: Create directories
file:
path: "{{item}}"
state: directory
owner: busnet
group: busnet
mode: u=rwx,g=rwx,o=rx
with_items:
- /opt/busnet
- name: Update all packages to the latest version
apt:
upgrade: yes
update_cache: yes

View File

@@ -0,0 +1,36 @@
# https://github.com/dj-wasabi/ansible-zabbix-agent
# No longer maintained by the above, but now just keep this up to date manually when
# we upgrade Zabbix agent versions.
---
# Apt GPG keys. "34", "32" are short versions for 3.4, 3.2, etc...
sign_keys:
"60":
focal:
sign_key: A14FE591
bionic:
sign_key: A14FE591
"50":
focal:
sign_key: A14FE591
bionic:
sign_key: A14FE591
"34":
focal:
sign_key: A14FE591
bionic:
sign_key: A14FE591
sonya:
sign_key: A14FE591
serena:
sign_key: A14FE591
stretch:
sign_key: A14FE591
wheezy:
sign_key: 79EA5ED4
jessie:
sign_key: 79EA5ED4
trusty:
sign_key: 79EA5ED4
xenial:
sign_key: E709712C

View File

@@ -0,0 +1,46 @@
# Install Hamlib
#
- name: Create various directories
file:
path: "{{item}}"
state: directory
owner: busnet
group: busnet
mode: u=rwx,g=rwx,o=rx
with_items:
- /opt/busnet/hamlib
- /opt/busnet/hamlib/source
#- name: Install correct prerequite packages when running Ubuntu Noble
# ansible.builtin.include_tasks: ubuntu_noble.yml
# when: ansible_distribution == "Ubuntu" and ansible_distribution_release == "noble"
- name: Git stuff
ansible.builtin.command:
cmd: "git config --global --add safe.directory /opt/busnet/hamlib/source"
- name: Checkout Hamlib source
ansible.builtin.git:
repo: "https://github.com/Hamlib/Hamlib"
dest: /opt/busnet/hamlib/source
version: 4.6.5
- name: bootstrap
ansible.builtin.command:
cmd: "./bootstrap"
chdir: /opt/busnet/hamlib/source
- name: configure
ansible.builtin.command:
cmd: "./configure"
chdir: /opt/busnet/hamlib/source
- name: make
community.general.make:
chdir: /opt/busnet/hamlib/source
- name: make install
community.general.make:
chdir: /opt/busnet/hamlib/source
target: install

View File

@@ -0,0 +1,125 @@
# Role: scanner_direwolf
#
# This installs and configures direwolf on the scanner.bus computer
# - Set up udev rules
# - Set up systemd services
# - Actually down, compile and install direwolf program
# - Copy configuration files
- name: Make sure busnet user is in groups for audio and USB access
ansible.builtin.user:
name: busnet
groups: "{{item}}"
append: yes
with_items:
- dialout
- pulse-access
- audio
- plugdev
###################################################################################################
# UDEV RULES
###################################################################################################
- name: Copy udev rules for Yaesu FT-991a radio USB interface
template:
src: templates/99-ft991a-direwolf.rules.j2
dest: /etc/udev/rules.d/99-ft991a-direwolf.rules
owner: root
group: root
register: ft991a_udev_result
- name: Reload udev rules
ansible.builtin.command:
cmd: "udevadm control --reload"
when: ft991a_udev_result.changed
- name: udevadm trigger
ansible.builtin.command:
cmd: "udevadm trigger"
when: ft991a_udev_result.changed
###################################################################################################
# SYSTEMD
###################################################################################################
- name: Install systemd unit file for Yaesu FT-991a
template:
src: "templates/ft991a-direwolf.service.j2"
dest: "/etc/systemd/system/ft991a-direwolf.service"
owner: root
group: root
mode: "0644"
- name: Enable ft991a-direwolf service
systemd:
name: "ft991a-direwolf"
daemon_reload: true
enabled: true
- name: Create various directories
file:
path: "{{item}}"
state: directory
owner: busnet
group: busnet
mode: u=rwx,g=rwx,o=rx
with_items:
- /opt/busnet/direwolf
- /opt/busnet/direwolf/config
- /opt/busnet/direwolf/logs
- name: Copy Direwolf config for Yaesu FT-991a
template:
src: "templates/ft991a-direwolf.conf.j2"
dest: "/opt/busnet/direwolf/config/ft991a.conf"
owner: busnet
group: busnet
###################################################################################################
# Compile and install Difewolf
###################################################################################################
- name: Install correct prerequite packages when running Ubuntu Noble
ansible.builtin.include_tasks: ubuntu_noble.yml
when: ansible_distribution == "Ubuntu" and ansible_distribution_release == "noble"
- name: Git stuff
ansible.builtin.command:
cmd: "git config --global --add safe.directory /opt/busnet/direwolf/source"
- name: Checkout direwolf source
ansible.builtin.git:
repo: "https://www.github.com/wb2osz/direwolf"
dest: /opt/busnet/direwolf/source
version: 1.7
- name: Create the build directory
ansible.builtin.file:
path: /opt/busnet/direwolf/source/build
state: directory
mode: '0755'
- name: Configure the build with cmake
ansible.builtin.command:
cmd: "cmake .."
chdir: /opt/busnet/direwolf/source/build
- name: Build direwolf
ansible.builtin.command:
cmd: "make -j4"
chdir: /opt/busnet/direwolf/source/build
- name: make install
community.general.make:
chdir: /opt/busnet/direwolf/source/build
target: install
#- name: Install/Update various software in the standard repos
# apt:
# name: "{{ packages }}"
# state: latest
# vars:
# packages:
# - direwolf

View File

@@ -0,0 +1,13 @@
- name: Install base packages with apt on Ubuntu Noble
apt:
name: "{{ packages }}"
state: present
vars:
packages:
- alsa-utils
- libasound2-dev
- libudev-dev
- libavahi-client-dev
- libgps-dev
- build-essential
- cmake

View File

@@ -0,0 +1,25 @@
# udev rule to provide fixed device name for the Yeasu FT-991a
# This information was retrieved by the following. Assuming the radio USB is hooked up to /dev/ttyUSB0
#
# craig@scanner:~$ udevadm info /dev/ttyUSB0 | grep ID_SERIAL
# E: ID_SERIAL=Silicon_Labs_CP2105_Dual_USB_to_UART_Bridge_Controller_01A9F7D6
# E: ID_SERIAL_SHORT=01A9F7D6
#
# This creates /dev/radio/ft991a-00 (CAT control) and /dev/radio/ft991a-01 (sound)
# It does not matter what USB port it's plugged into, or how many other USB devices are also plugged
# in.
# ---------------------------------------------------------------------------------------------------
SUBSYSTEM=="tty", ENV{ID_SERIAL_SHORT}=="01A9F7D6", SYMLINK+="radio/ft991a-$env{ID_USB_INTERFACE_NUM}", MODE="660", GROUP="plugdev"
# This works by triggering systemd service when the Yaesu 991a USB device USB appears. We identify
# the device by the unique serial number.
# ---------------------------------------------------------------------------------------------------
ACTION=="add", SUBSYSTEM=="tty", ENV{ID_SERIAL_SHORT}=="01A9F7D6", TAG+="systemd" ENV{SYSTEMD_WANTS}+="ft991a-direwolf.service"
# This tells systemd to shut down the direwolf service when the Yeasu 991a device disappears. We
# identify the device by the unique serial number. Remove the service from SYSTEMD_WANTS ENV.
# ---------------------------------------------------------------------------------------------------
ACTION=="remove", SUBSYSTEM=="tty", ENV{ID_SERIAL_SHORT}=="01A9F7D6",TAG+="systemd" ENV{SYSTEMD_WANTS}-="ft991a-direwolf.service"

View File

@@ -0,0 +1,22 @@
# This file is managed by BusNet Ansible
#
# This is the Direwolf configuration for connecting Direwolf to the Yaesu FT-991a radio when it is
# plugged in via USB.
#
# There us a udev rule which creates "/dev/radio/ft99a1-00" and "/dev/radio/ft991a-01" so that we
# do not have to worry about what USB port or tty number is assigned when this radio is connected
# to the computer.
# We can reference this audio device by name already.
ADEVICE plughw:CODEC,0
# The custom udev rule makes this device available by name.
PTT /dev/radio/ft991a-01 RTS
#PTT RIG 1035 /dev/radio/ft991a-00
MYCALL K0BIT-1
KISSPORT 8001
AGWPORT 8011
# This will start an APRS beacon
#PBEACON every=1 overlay=S symbol="bus" lat=29.958260914551104 long=-90.05442788530239 comment="I'm like testing APRS, man" via=WIDE1-1,WIDE2-1

View File

@@ -0,0 +1,21 @@
# This file is managed by BusNet Ansible.
#
# This service starts Direwolf when the Yeasu FT-991A radio USB is connected to this computer.
#
# There is a corresponding udev rule that triggers this service automatically when it detects that
# the radio's USB devices are connected. There is also a udev rule that shuts it down when the USB
# is disconnected.
[Unit]
Description=Direwolf TNC for %i
[Service]
ExecStartPre=/bin/sleep 5
ExecStart=/usr/local/bin/direwolf -t 0 -c /opt/busnet/direwolf/config/ft991a.conf
Restart=on-failure
RestartSec=5
User=busnet
Group=busnet
[Install]
WantedBy=multi-user.target

124
orchestrate/ansible.sh Executable file
View File

@@ -0,0 +1,124 @@
#!/bin/bash
#
# Busnet Ansible
# @author Craig McDaniel
#
# This script uses Ansible to provision computers in the BusNet.
#
INCLUDE_DIR="/opt/busnet/git/devops/orchestrate/lib"
# Read in libs
source ${INCLUDE_DIR}/lib.sh
COMMAND=$1
# Show help text
show_help()
{
echo
echo "BusNet ansible script."
echo
echo "Usage: $0 <command>"
echo
echo "General Ansible stuff:"
echo "---------------------------------------------------------------------------------------------------------------"
echo " --build Build the Anisble docker image. We use this everywhere."
echo " --run Start bash shell inside the ansible container."
echo " --bootstrap-server Bootstrap a brand new server for the first time. This executes the Ansible bootstrap"
echo " role on it. Run this once."
echo
echo "scanner.busnet:"
echo "---------------------------------------------------------------------------------------------------------------"
echo " --scanner-direwolf Install and configure direwolf on scanner.busnet"
echo
}
# Main entry point.
main()
{
# Account/billing server
if [ "${COMMAND}" = "--run" ]; then
docker_run_it 'ansible-busnet:latest' runner bash
elif [ "${COMMAND}" = "--bootstrap-server" ]; then
ansible_bootstrap_server
elif [ "${COMMAND}" = "--build" ]; then
ansible_build
elif [ "${COMMAND}" = "--scanner-direwolf" ]; then
scanner_direwolf
# Show Help
else
show_help
fi
}
ansible_bootstrap_server()
{
# Check if the private key exists before proceeding.
local PRIVATE_KEY_PATH="${HOME}/.ssh/${CONFIG_ANSIBLE_KEY_NAME}"
local PUBLIC_KEY_PATH="${PRIVATE_KEY_PATH}.pub"
if [[ ! -f "$PUBLIC_KEY_PATH" ]]; then
echo "Error: Public key not found at $PUBLIC_KEY_PATH"
echo "The function generate_busnet_key() should have generated one already. Something did not work correctly. Doing nothing."
return 1 # Exit the function with an error code
fi
echo "Bootstrap a new server!"
echo "This will copy the SSH key and execute the ansible bootstrap role."
echo
echo "The hostname you enter below MUST be added to Ansible inventory first. If it's not, go add it now!"
echo
# Prompt for the hostname and the user to connect as.
read -e -p "Enter the hostname of the server: " HOSTNAME
read -e -p "Enter the remote username (e.g., ec2-user): " USERNAME
echo
# Copy the SSH public key to the remote server.
echo "Attempting to copy SSH key to ${USERNAME}@${HOSTNAME}..."
if ssh-copy-id -i "${PRIVATE_KEY_PATH}" "${USERNAME}"@"${HOSTNAME}"; then
echo "SSH key copied successfully."
else
echo "Error: Failed to copy SSH key. Please check the hostname, username, and your SSH connection."
return 1
fi
echo
echo "Running the Ansible playbook..."
ansible_playbook playbooks/general/bootstrap.yml "hostname=${HOSTNAME}"
if [ $? != 0 ]; then
echo
echo "Ansible playbook execution failed."
echo
else
echo
echo "Ansible playbook execution complete."
echo
fi
}
scanner_direwolf()
{
ansible_playbook playbooks/scanner/direwolf.yml
if [ $? != 0 ]; then
echo
echo "Ansible playbook execution failed."
echo
else
echo
echo "Ansible playbook execution complete."
echo
fi
}
# Start the script
main

View File

@@ -0,0 +1,137 @@
# Docker compose for our backend system (REST API/Websocket/LDAP)
# @author Craig McDaniel
#
# This runs a backend server in your development environment. This gets you a running API server.
# You get:
#
# * BorgDrone PublicAPI application. This is the web server for the REST API and RealTimeAPI.
# - REST API: API endpoint, CB endpoint and SystemAPI endpoint.
# - RealTime API: A Websocket server for receiving events in real time.
#
# * BorgDrone LDAP server application.
#
# * Local redis cache.
#
# * Multiple PHP-FPM daemons, each running in their own docker container and on separate TCP ports
# so that we can serve up PHP code that need different PHP versions.
#
version: "3.5"
services:
# BorgDrone acts as a web server
borgdrone:
image: ${APISERVER_IMAGE_NAME}
hostname: bg-${HOSTNAME}
container_name: borgdrone
# Run nodemon for BorgDrone hot reload. Console displays all logs to stdout.
command: "node_modules/.bin/nodemon -i node_modules --signal SIGTERM borgdrone.js --console"
#command: "/opt/omilia/borgdrone/node_modules/.bin/forever --minUptime 1000 --spinSleepTime 1000 --workingDir /opt/omilia/borgdrone borgdrone.js --console"
working_dir: "/opt/omilia/borgdrone"
# This starts a mini init program that properly conveys SIGTERM to node when stopping the container.
init: true
stop_signal: SIGTERM
environment:
- RELEASE_MODE=dev
# Borgdrone needs to know what the host IP address is. It will look for this var.
- IP_ADDRESS=${IPADDRESS_API}
volumes:
- /opt/omilia/svn:/opt/omilia/svn
- /opt/omilia/git:/opt/omilia/git
# Core and PublicAPI must share the same directory here
- /opt/omilia/core:/opt/omilia/core
# Map core source tree to current
- ${SVN_DIR}/core/src:/opt/omilia/core/${SVN_CODE_NAME}/current
- ${SVN_DIR}/lib/ttcore:/opt/omilia/core/${SVN_CODE_NAME}/current/ttcore
# Borgdrone source tree map
- ${SVN_DIR}/borgdrone/src:/opt/omilia/borgdrone
# PublicAPI will move CORE versions into the local storage sometimes
- php-core-store:/opt/omilia/mnt/php-core-store
# Shared App Data
- shared-app-data:/opt/omilia/mnt/shared-app-data
restart: on-failure
network_mode: "host"
# Redis short term (local) memory cache.
shorttermcache:
image: redis:6
hostname: shorttermcache
container_name: shorttermcache
command: "redis-server"
restart: on-failure
network_mode: "host"
# PHP-FPM 7.3 listens on port 9000
php-fpm-7.3:
# See lib/build-php.sh to see where this variable comes from.
image: ${PHP_FPM_IMAGE_73}
hostname: php-fpm-73
container_name: php-fpm-73
# Run PHP-FPM and tell it to use the dev config.
command: "php-fpm -e --nodaemonize --fpm-config /usr/local/etc/php-fpm.dev.conf"
#command: "php-fpm --nodaemonize --fpm-config /usr/local/etc/php-fpm.prod.conf"
working_dir: "/opt/omilia"
environment:
- COOKIE_DOMAIN=omilia.io
volumes:
# Core and PublicAPI must share the same directory here
- /opt/omilia/core:/opt/omilia/core
# Map core source tree to current
- /opt/omilia/svn:/opt/omilia/svn
- ${SVN_DIR}/core/src:/opt/omilia/core/${SVN_CODE_NAME}/current
- ${SVN_DIR}/lib/ttcore:/opt/omilia/core/${SVN_CODE_NAME}/current/ttcore
# PublicAPI will move CORE versions into the local storage sometimes
- php-core-store:/opt/omilia/mnt/php-core-store
# Shared App Data
- shared-app-data:/opt/omilia/mnt/shared-app-data
# Account audio files uploaded or recorded by customers
- account-audio-files:/opt/omilia/mnt/account-audio-files
restart: on-failure
network_mode: "host"
# PHP-FPM 7.4 listens on port 9001
php-fpm-7.4:
# See lib/build-php.sh to see where this variable comes from.
image: ${PHP_FPM_IMAGE_74}
hostname: php-fpm-74
container_name: php-fpm-74
# Run PHP-FPM and tell it to use the dev config.
command: "php-fpm -e --nodaemonize --fpm-config /usr/local/etc/php-fpm.dev.conf"
#command: "php-fpm --nodaemonize --fpm-config /usr/local/etc/php-fpm.prod.conf"
working_dir: "/opt/omilia"
environment:
- COOKIE_DOMAIN=omilia.io
volumes:
# Core and PublicAPI must share the same directory here
- /opt/omilia/core:/opt/omilia/core
# Map core source tree to current
- /opt/omilia/svn:/opt/omilia/svn
- ${SVN_DIR}/core/src:/opt/omilia/core/${SVN_CODE_NAME}/current
- ${SVN_DIR}/lib/ttcore:/opt/omilia/core/${SVN_CODE_NAME}/current/ttcore
# PublicAPI will move CORE versions into the local storage sometimes
- php-core-store:/opt/omilia/mnt/php-core-store
# Shared App Data
- shared-app-data:/opt/omilia/mnt/shared-app-data
# Account audio files uploaded or recorded by customers
- account-audio-files:/opt/omilia/mnt/account-audio-files
restart: on-failure
network_mode: "host"
# Volumes, NFS shares
volumes:
data:
php-core-store:
driver_opts:
type: nfs
o: "addr=fs-007e461671b517080.efs.us-east-1.amazonaws.com,nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport"
device: ":/"
shared-app-data:
driver_opts:
type: nfs
o: "addr=fs-09e63d68dea814115.efs.us-east-1.amazonaws.com,nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport"
device: ":/"
account-audio-files:
driver_opts:
type: nfs
o: "addr=fs-09d0eb0a28f3cb6b3.efs.us-east-1.amazonaws.com,nfsvers=4.1,rsize=1048576,wsize=1048576,hard,timeo=600,retrans=2,noresvport"
device: ":/account-audio"

View File

@@ -0,0 +1,54 @@
#
#version: "3.5"
services:
apache2:
image: ${BILLING_APACHE2_IMAGE_NAME}
hostname: billing-apache2
container_name: billing-apache2
restart: on-failure
networks:
billingnet:
ipv4_address: 10.77.93.10
volumes:
- /opt/neutopia/git/whmcsphp/src:/opt/neutopia/whmcs
- ssl-certs:/opt/neutopia/mnt/ssl-certs
billing-phpfpm:
image: ${BILLING_PHPFPM_IMAGE_NAME}
hostname: billing-phpfpm
container_name: billing-phpfpm
# Run PHP-FPM and tell it to use the dev config.
#command: "php-fpm -e --nodaemonize --fpm-config /usr/local/etc/php-fpm.dev.conf"
#working_dir: "/opt/omilia"
#environment:
# - COOKIE_DOMAIN=omilia.io
networks:
billingnet:
ipv4_address: 10.77.93.11
ipv6_address: 10:77:93::11
volumes:
- /opt/neutopia/git/whmcsphp/src:/opt/neutopia/whmcs
- ssl-certs:/opt/neutopia/mnt/ssl-certs
# These NFS shares require an existing Wireguard VPN to be set up and running on your computer or
# on your network.
volumes:
ssl-certs:
driver_opts:
type: nfs
o: "addr=${ACCOUNT_NFS_SERVER},nfsvers=4.2,rw,hard,timeo=600"
device: ":/opt/neutopia/mnt/ssl-certs"
# Create a network called "billingnet"
networks:
billingnet:
driver: bridge
name: "billingnet"
enable_ipv6: true
ipam:
config:
- subnet: 10.77.93.0/24
- subnet: 10:77:93::/64

26
orchestrate/lib/config.sh Normal file
View File

@@ -0,0 +1,26 @@
# Configuration variables
# By default, platform. Override this when you want to build for another platform.
CONFIG_DOCKER_BUILD_PLATFORM="linux/amd64,linux/arm64"
# These are SSH keys used by Ansible to connect to hosts on BusNet. This key pair will be auto
# generated.
CONFIG_ANSIBLE_KEY_NAME="ansiblebusnet"
# This is the location of the ssh keys on your computer.
CONFIG_ANSIBLE_KEY_DIR="${HOME}/.ssh"
# This is where we mount the ssh key dir inside of a container.
CONFIG_ANSIBLE_KEY_DIR_INSIDE="/opt/busnet/ssh-keys"
# Mount these volumes on all containers we run
CONFIG_DOCKER_MOUNTS="--mount src="/opt/busnet/git,target=/opt/busnet/git,type=bind"
--mount src="/opt/busnet/data,target=/opt/busnet/data,type=bind"
--mount src="${CONFIG_ANSIBLE_KEY_DIR},target=${CONFIG_ANSIBLE_KEY_DIR_INSIDE},type=bind"
-v /var/run/docker.sock:/var/run/docker.sock
"
# Include this environment variables on all containers we run
CONFIG_DOCKER_ENV="--env ANSIBLE_DIR=$ANSIBLE_DIR
--env REAL_HOSTNAME=$HOSTNAME
"

363
orchestrate/lib/lib.sh Normal file
View File

@@ -0,0 +1,363 @@
#!/bin/bash
# General lib for devops scripts
# @author Craig McDaniel
#
# Source config data
source ${INCLUDE_DIR}/config.sh
# Save all arguments to the script here.
BASH_ARGS=$@
# Ensure dir exists
mkdir -p /opt/busnet/data
# Make sure there is an ssh key pair for the current user. This will be distributed to servers and
# used for Ansible SSH connections. It's not fancy at all. This is a simple solution for the BusNet
# system.
generate_busnet_key()
{
local KEY_PATH="${HOME}/.ssh/${CONFIG_ANSIBLE_KEY_NAME}"
# Only run if the key isn't there.
if [[ ! -f "$KEY_PATH" ]]; then
echo "No SSH private key detected for ansible-busnet. Generating one. DO NOT REMOVE THIS!"
ssh-keygen -t ed25519 -f "${KEY_PATH}" -N "" -C "ansible-busnet@$(hostname)"
fi
}
generate_busnet_key
# Make sure the docker image specified by $1 is built and available for use.
buildx_image()
{
IMAGE_NAME=$1
DOCKERFILE=$2
DOCKERDIR=$3
#DOCKERARGS=""
# If --no-cache was specified, pass that on and ALSO add --pull to get the latest images.
if [[ "${BASH_ARGS}" =~ "--no-cache" ]]; then
DOCKERARGS="${DOCKERARGS} --pull --no-cache"
# If --pull was specified, pass just that on.
elif [[ "${BASH_ARGS}" =~ "--pull" ]]; then
DOCKERARGS="${DOCKERARGS} --pull"
fi
echo "Building docker image ${IMAGE_NAME} using ${DOCKERFILE} in directory ${DOCKERDIR}"
echo "Docker args: ${DOCKERARGS}"
# This uses the new docker builder. It can build multi-platform images. The default-load=true
# will load the image into the local machine so that docker image ls shows it.
docker buildx create --name busnet-builder --driver-opt=default-load=true
docker buildx build --builder busnet-builder --tag $IMAGE_NAME $DOCKERARGS --platform $CONFIG_DOCKER_BUILD_PLATFORM --file $DOCKERFILE $DOCKERDIR
}
# Execute Ansible inside the specified container. In order for this to work, the container image
# must have Ansible and required Ansible modules already installed.
# $1 playbook to execute
# $2 extra vars to pass to Ansible in --extra-vars
#
# Set the VARIABLE $DOCKER_OPTIONS with any custom options you want, and we'll pass this to the docker
# program verbatim.
ansible_playbook()
{
ANSIBLE_IMAGE_NAME="ansible-busnet"
PLAYBOOK=$1
EXTRA_VARS=$2
echo "Running Ansible playbook ${PLAYBOOK} in a container using ansible image ${ANSIBLE_IMAGE_NAME} ..."
docker inspect $ANSIBLE_IMAGE_NAME &>/dev/null
if [ $? != 0 ]; then
if [ "${ANSIBLE_IMAGE_NAME}" == "ansible-busnet" ]; then
echo "You need to build the ansible docker image first with --ansible-build"
exit 1
else
echo "I just attempted to run the container $ANSIBLE_IMAGE_NAME, but I could not because it doesn't exist on your system."
exit 1
fi;
fi
# SSH remote user
read -e -p "Enter username for remote SSH: " ansible_user
# SSH key file
ansible_ssh_private_key_file=${CONFIG_ANSIBLE_KEY_DIR_INSIDE}/${CONFIG_ANSIBLE_KEY_NAME}
EXTRA_VARS="${EXTRA_VARS} ansible_user=${ansible_user} ansible_ssh_private_key_file=${ansible_ssh_private_key_file}"
echo "EXTRA VARS (if any): ${EXTRA_VARS}"
echo
docker run \
--rm \
-it \
--network host \
$CONFIG_DOCKER_ENV \
$CONFIG_DOCKER_MOUNTS \
--name ansible-busnet-runner \
$DOCKER_OPTIONS \
$ANSIBLE_IMAGE_NAME \
ansible-playbook $PLAYBOOK --extra-vars "${EXTRA_VARS}"
}
# Regular 'ole docker run, non-interactively.
#
# $1 docker image name to run
# $2 name of the container
# $3 command to run. Defaults to sleep infinity (what's the point in that)
# $4 optional workdir
#
# Set the variable DOCKER_OPTIONS with any custom options you want, and we'll pass this to the docker
# program verbatim.
#
docker_run()
{
IMAGE_NAME=$1
CONTAINER_NAME=$2
if [[ -z "$3" ]]; then
COMMAND="sleep infinity"
else
COMMAND=$3
fi
if [[ -z "$4" ]]; then
WORKDIR=""
else
WORKDIR="-w ${4}"
fi
docker inspect $IMAGE_NAME &>/dev/null
if [ $? != 0 ]; then
if [ "${IMAGE_NAME}" == "ansible-builder" ]; then
echo "You need to build the ansible docker image first with --ansible-build"
exit 1
else
echo "I just attempted to run the container $IMAGE_NAME, but I could not because it doesn't exist on your system. You may need to build it first with build.sh"
exit 1
fi;
fi
# Stop it if it's already running.
docker_stop $CONTAINER_NAME
docker run --rm --network host ${WORKDIR} \
$CONFIG_DOCKER_MOUNTS \
$CONFIG_DOCKER_ENV \
--name $CONTAINER_NAME \
$DOCKER_OPTIONS \
$IMAGE_NAME \
$COMMAND
}
# Run a container in interactive mode. When you exit, the container exists and is removed automatically.
#
# $1 docker image name to run
# $2 name of the container
# $3 command to run. Defaults to sleep infinity (what's the point in that)
# $4 optional workdir
#
# Set the variable DOCKER_OPTIONS with any custom options you want, and we'll pass this to the docker
# program verbatim.
#
docker_run_it()
{
IMAGE_NAME=$1
CONTAINER_NAME=$2
if [[ -z "$3" ]]; then
COMMAND="sleep infinity"
else
COMMAND=$3
fi
if [[ -z "$4" ]]; then
WORKDIR=""
else
WORKDIR="-w ${4}"
fi
docker inspect $IMAGE_NAME &>/dev/null
if [ $? != 0 ]; then
if [ "${IMAGE_NAME}" == "ansible-builder" ]; then
echo "You need to build the ansible docker image first with --ansible-build"
exit 1
else
echo "I just attempted to run the container $IMAGE_NAME, but I could not because it doesn't exist on your system. You may need to build it first with build.sh"
exit 1
fi;
fi
echo "COmmand: ${COMMAND}"
# Stop it if it's already running.
docker_stop $CONTAINER_NAME
docker run --rm -it --network host ${WORKDIR} \
$CONFIG_DOCKER_MOUNTS \
$CONFIG_DOCKER_ENV \
--name $CONTAINER_NAME \
$DOCKER_OPTIONS \
$IMAGE_NAME \
$COMMAND
}
# Same as docker_run_it() but do not check if the image exists. This works only if the image is
# publicly available or in a repo that we have access to. Docker will just fetch the image automatically,
# whereas docker_run_it() will throw an error if it doesn't exst locally. Ugh.
# $1 docker image name to run
# $2 name of the container
# $3 command to run. Defaults to sleep infinity (what's the point in that)
# $4 optional workdir
#
# Set the variable DOCKER_OPTIONS with any custom options you want, and we'll pass this to the docker
# program verbatim.
#
docker_run_it_nocheck()
{
IMAGE_NAME=$1
CONTAINER_NAME=$2
if [[ -z "$3" ]]; then
COMMAND="sleep infinity"
else
COMMAND=$3
fi
if [[ -z "$4" ]]; then
WORKDIR=""
else
WORKDIR="-w ${4}"
fi
# Stop it if it's already running.
docker_stop $CONTAINER_NAME
docker run --rm -it --network host ${WORKDIR} \
$CONFIG_DOCKER_MOUNTS \
$CONFIG_DOCKER_ENV \
--name $CONTAINER_NAME \
$DOCKER_OPTIONS \
$IMAGE_NAME \
$COMMAND
}
# Run and detach a container with the command "sleep infinity". We'll then connect to this container
# with Ansible and do some tasks on it, then save its state.
# $1 docker image name to run
# $2 name of the container
# $3 command to run. Defaults to sleep infinity
#
# Set the variable DOCKER_OPTIONS with any custom options you want, and we'll pass this to the docker
# program verbatim.
#
# Set the variable DOCKER_FORCE_NEW=1 to always start a new container each time. The default is to re-use
# already running containers.
#
docker_run_detach()
{
IMAGE_NAME=$1
CONTAINER_NAME=$2
if [[ -z "$3" ]]; then
COMMAND="sleep infinity"
else
COMMAND=$3
fi
docker inspect $IMAGE_NAME &>/dev/null
if [ $? != 0 ]; then
if [ "${IMAGE_NAME}" == "ansible-builder" ]; then
echo "You need to build the ansible docker image first with --ansible-build"
exit 1
else
echo "I just attempted to run the image $IMAGE_NAME, but I could not because it doesn't exist on your system."
exit 1
fi;
fi
# Always create a new container if this flag is set.
# This stops any previously running containers. If none are running, then move on.
if [ "${DOCKER_FORCE_NEW}" == "1" ]; then
docker_stop $CONTAINER_NAME
fi
# Run the image in a container if it's not already running.
if [ $( docker ps | grep $CONTAINER_NAME | wc -l ) == 0 ]; then
echo "Running docker image ${IMAGE_NAME} in container ${CONTAINER_NAME} ..."
docker run \
--rm \
--detach \
--network host \
$CONFIG_DOCKER_MOUNTS \
$CONFIG_DOCKER_ENV \
--name $CONTAINER_NAME \
$DOCKER_OPTIONS \
$IMAGE_NAME \
$COMMAND
else
echo "Docker image ${IMAGE_NAME} already running in container ${CONTAINER_NAME}."
fi
}
# Same as docker_run_detach() but do not check if the image exists. This works only if the image is
# publicly available or in a repo that we have access to. Docker will just fetch the image automatically,
# whereas docker_run_detach() will throw an error if it doesn't exst locally. Ugh.
# $1 docker image name to run
# $2 name of the container
# $3 command to run. Defaults to sleep infinity
#
# Set the variable DOCKER_OPTIONS with any custom options you want, and we'll pass this to the docker
# program verbatim.
#
# Set the variable DOCKER_FORCE_NEW=1 to always start a new container each time. The default is to re-use
# already running containers.
#
docker_run_detach_nocheck()
{
IMAGE_NAME=$1
CONTAINER_NAME=$2
if [[ -z "$3" ]]; then
COMMAND="sleep infinity"
else
COMMAND=$3
fi
# Always create a new container if this flag is set.
# This stops any previously running containers. If none are running, then move on.
if [ "${DOCKER_FORCE_NEW}" == "1" ]; then
docker_stop $CONTAINER_NAME
fi
# Run the image in a container if it's not already running.
if [ $( docker ps | grep $CONTAINER_NAME | wc -l ) == 0 ]; then
echo "Running docker image ${IMAGE_NAME} in container ${CONTAINER_NAME} ..."
docker run \
--rm \
--detach \
--network host \
$CONFIG_DOCKER_MOUNTS \
$CONFIG_DOCKER_ENV \
--name $CONTAINER_NAME \
$DOCKER_OPTIONS \
$IMAGE_NAME \
$COMMAND
else
echo "Docker image ${IMAGE_NAME} already running in container ${CONTAINER_NAME}."
fi
}
# Stop a docker container
docker_stop()
{
CONTAINER_NAME=$1
if [ $(docker container ls | grep $CONTAINER_NAME | wc -l) -ge 1 ]; then
echo "Stopping docker container ${CONTAINER_NAME}"
docker stop $CONTAINER_NAME
fi
}

191
orchestrate/run.sh Executable file
View File

@@ -0,0 +1,191 @@
#!/bin/bash
# @author Craig McDaniel
#
# Fontend script to docker compose
#
# This is how we run various services in development environments for development.
#
INCLUDE_DIR="/opt/neutopia/git/devops/orchestrate/lib"
# Read in libs
source ${INCLUDE_DIR}/lib.sh
source ${INCLUDE_DIR}/build-lib.sh
# Set some default values. We'll fill these out a few lines down.
COMMAND=""
COMPOSE_FILE=""
AUTO_PREPARE=1
# If the user specifies this flag, then don't run the prepare Ansible playbooks.
if [[ $@ =~ "--no-prepare" ]]; then
AUTO_PREPARE=0
fi
# Use getopts to get the value of the -f argument so we know what compose file to use.
OPTERR=0
while getopts "c:y:" OPTION $@;
do
if [ "${OPTION}" == "c" ]; then
COMMAND=$OPTARG
elif [ "${OPTION}" == "y" ]; then
COMPOSE_FILE=$OPTARG
fi
done
# Build the $DOCKERARGS variable. Exclude any options that we don't want to pass directly to the
# docker-compose script. When we're done, this string will be passed literally to docker-compose.
ARGSORIG=($@)
DOCKER_COMMAND_ARGS=""
i=0
while [ $i -lt 25 ]; do
# Skip the next 2 args: the arg and the value.
if [ "${ARGSORIG[$i]}" == "-c" ]; then
((i=i+2))
# Skip the next 2 args: the arg and the value.
elif [ "${ARGSORIG[$i]}" == "-y" ]; then
((i=i+2))
# Skip the next arg.
elif [ "${ARGSORIG[$i]}" == "--no-prepare" ]; then
((i=i+1))
else
DOCKER_COMMAND_ARGS="${DOCKER_COMMAND_ARGS} ${ARGSORIG[$i]}"
((i++))
fi;
done
# docker-compose expects arguments in this order
DOCKERARGS="-f ${COMPOSE_FILE} ${COMMAND} ${DOCKER_COMMAND_ARGS}"
# Set these ENV variables so we can use them inside the compose YAML files
# Get the image names
# whmcsphp
export BILLING_APACHE2_TAG=`cd /opt/neutopia/git/whmcsphp; git log -n 1 --pretty=format:"%H" .`
export BILLING_APACHE2_IMAGE_NAME="billing-apache2:${BILLING_APACHE2_TAG}"
export BILLING_PHPFPM_TAG=`cd /opt/neutopia/git/whmcsphp; git log -n 1 --pretty=format:"%H" .`
export BILLING_PHPFPM_IMAGE_NAME="billing-phpfpm:${BILLING_PHPFPM_TAG}"
# You have to do this in order for docker-compose to be able to use $HOSTNAME
export HOSTNAME=$HOSTNAME
# Show help text
show_help()
{
echo
echo "Run services in a development environment using docker compose."
echo
echo "Usage: "
echo "$0 -c <command> -y <compose-yaml-file> [command-options]"
echo
echo "Options that are used by this script only:"
echo " -c The command to pass to docker compose. 'run', 'start', 'stop', 'logs', so on..."
echo " -y Specify compose YAML file to use."
echo " --no-prepare Optionally. Do not run the Ansible prepare playbook. You will do the preparing yourself!"
echo
echo "Commands are as follows:"
echo " prepare Run the Ansible prepare playbook to prepare the dev environment for the matching compose YML file."
echo " up Bring all services up and connect stdin/stdout to it."
echo " logs View your container logs. Combine with -f to continually display logs as they are generated."
echo " down Bring services down."
echo " * Run docker compose --help for an exhaustive list of commands."
echo
echo "Command options are passed directly to docker-compose and depend on the command."
echo " * For an example of options to the 'up' command, run docker compose up --help"
echo " * For an example of options to the 'down' command, run docker compose down --help"
echo " * And so on ..."
echo
}
# Show help?
if [[ $@ =~ "--help" ]]; then
show_help
exit 0
fi
# Main entry point.
main()
{
if [ "${COMMAND}" = "build" ]; then
echo
echo "The build command is not supported here. You must use the build.sh script to build images.";
echo
exit 1
elif [ "${COMMAND}" = "prepare" ]; then
check_for_valid_args
prepare
elif [ "${COMMAND}" != "" ]; then
check_for_valid_args
echo "Using compose file: ${COMPOSE_FILE}"
#prepare
docker compose $DOCKERARGS
else
show_help
fi
}
# Make sure the docker compose file and the command are both specified. Show error messages if not.
check_for_valid_args()
{
if [ -z $COMMAND ]; then
echo "No command was specified. Please specify a command to pass to docker compose with -c"
echo " --help for details."
exit 1
fi
if [ -z $COMPOSE_FILE ]; then
echo "Please specify a docker compose file with the -y argument. None was specified."
echo " --help for details."
exit 1
fi
}
# ** CURRENTLY NOT USED **
# Run the correct ansible playbook to prepare the environment, depending on what compose file we're running.
# If --no-prepare was specified, we don't execute this.
prepare()
{
# Only prepare if AUTO_PREPARE=1
if [ "${AUTO_PREPARE}" != 1 ]; then
return
fi
# Only run prepare for 'prepare', 'up' and 'start' commands.
if [[ "${COMMAND}" != "prepare" && "${COMMAND}" != "up" && "${COMMAND}" != "start" ]]; then
return
fi
if [ "${COMPOSE_FILE}" = "docker-compose-one.yml" ]; then
prepare_billing
elif [ "${COMPOSE_FILE}" = "docker-compose-two.yml" ]; then
prepare_hehe
if [ $? != 0 ]; then
echo
echo "Just tried to create a docker container with docker image '${IMAGE_NAME}', but that operation failed."
echo "You may need to build the apiserver image again if you have increased the borgdrone SVN revision since the last time you built the image.";
echo "Run 'docker image ls' to see what tags you have for the appserver image, if any."
exit $?;
fi
else
echo "Can't prepare anything because I don't know that docker compose YML file ${COMPOSE_FILE}. Make sure you edit run.sh and add the code for this compose file."
exit 1
fi
}
# ** CURRENTLY NOT USED **
# Prepare the the billing system to run in a dev environment.
prepare_billing()
{
ansible_playbook playbooks/build/prepare_billing.yml
if [ $? != 0 ]; then exit $?; fi
docker_stop apiserver-php
}
# Start the script
main