Compare commits
108 Commits
Author | SHA1 | Date | |
---|---|---|---|
|
85dd5aa4ad | ||
|
81dd98b952 | ||
|
7125f17b22 | ||
bd0753b4db | |||
|
779f28a5f7 | ||
|
9878fa9ce3 | ||
|
aeb679a898 | ||
e40b900750 | |||
ae83abef53 | |||
0e05eaa531 | |||
|
8c345020f7 | ||
|
10a6bc704c | ||
|
ba921a53a2 | ||
|
7bb96a1863 | ||
|
d0f65b8841 | ||
|
72e14d7199 | ||
|
566aed3752 | ||
3ab1b3ff54 | |||
dafd8f345a | |||
|
56e2f6f05d | ||
|
e343bc2cb5 | ||
|
6d267dfd04 | ||
|
b049833844 | ||
fbf15c52b5 | |||
3e1fee6022 | |||
|
27c5be7964 | ||
|
7b2ac2e56a | ||
9407f7caaf | |||
12cfe301e3 | |||
|
c2a1fbd9d8 | ||
|
d43536b67b | ||
|
da5521295d | ||
|
e39e1033c6 | ||
|
ec9d4c6e4f | ||
6876c9351b | |||
4df278bb0b | |||
|
550661c205 | ||
|
131982b37f | ||
|
e6e772055e | ||
|
2af0300c5a | ||
|
5b2e3436f5 | ||
|
274e9456ea | ||
|
c1717a06e7 | ||
|
e28d6e47a8 | ||
|
2e0129402d | ||
|
3c1e396b19 | ||
|
96c9dddf89 | ||
62f9ff63a8 | |||
|
0102fc9241 | ||
|
5601eaf8b7 | ||
|
33d154eccc | ||
|
77079a019c | ||
|
adb579572c | ||
|
b535f52493 | ||
|
33014294b0 | ||
|
b9eab36585 | ||
|
279c886c07 | ||
|
415d65ac1c | ||
|
6d00aefb21 | ||
|
2c782808ed | ||
|
ba8af23f42 | ||
|
c5da839c5b | ||
a575bcbf46 | |||
|
06658b59d3 | ||
2f914cf8d9 | |||
|
b362f2e37f | ||
|
8eb3d1eef1 | ||
|
2b91706d86 | ||
|
933d182244 | ||
153249211a | |||
c5765ca952 | |||
2bcf430dfd | |||
|
bc7d30ea59 | ||
|
e23001223c | ||
|
4a7a854f6f | ||
|
5804346e42 | ||
|
43fcc62014 | ||
|
a9ba3698bd | ||
|
8e3a28334e | ||
f808a394aa | |||
|
97398388d6 | ||
|
0839a78d41 | ||
43b529d2d0 | |||
81cc2b14ab | |||
|
42c3275e19 | ||
283b42bce1 | |||
|
fe5ffd3add | ||
|
bc87393fba | ||
|
f9d43d43f9 | ||
ee0b103593 | |||
6cd309eb9a | |||
93aebc5251 | |||
|
513b3f9f6a | ||
|
dd9f1a1c2f | ||
|
de3896d5ca | ||
|
6bc1120a64 | ||
|
f7c6ce03ea | ||
|
e1e69d5212 | ||
|
ca0fbeea73 | ||
f2c82d24e2 | |||
0fdbac0ea8 | |||
638f51eaa6 | |||
0f9850d6f8 | |||
c26a2f6efb | |||
5af97a3714 | |||
6d73464c70 | |||
|
ea9c55b6bf | ||
3088f3904f |
44
.drone.yml
44
.drone.yml
@@ -3,7 +3,7 @@ type: kubernetes
|
|||||||
name: default
|
name: default
|
||||||
|
|
||||||
node_selector:
|
node_selector:
|
||||||
physical-node: dev2
|
physical-node: dev1
|
||||||
|
|
||||||
trigger:
|
trigger:
|
||||||
event:
|
event:
|
||||||
@@ -14,26 +14,6 @@ workspace:
|
|||||||
path: /drone/src
|
path: /drone/src
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: build multiarch from dev
|
|
||||||
image: docker.io/owncloudci/drone-docker-buildx:4
|
|
||||||
privileged: true
|
|
||||||
settings:
|
|
||||||
cache-from: [ "registry.dev.format.hu/framework-scheduler" ]
|
|
||||||
registry: registry.dev.format.hu
|
|
||||||
repo: registry.dev.format.hu/framework-scheduler
|
|
||||||
tags: latest
|
|
||||||
dockerfile: Dockerfile
|
|
||||||
username:
|
|
||||||
from_secret: dev-hu-registry-username
|
|
||||||
password:
|
|
||||||
from_secret: dev-hu-registry-password
|
|
||||||
platforms:
|
|
||||||
- linux/amd64
|
|
||||||
- linux/arm64
|
|
||||||
when:
|
|
||||||
event:
|
|
||||||
- push
|
|
||||||
|
|
||||||
- name: pull image to dockerhub
|
- name: pull image to dockerhub
|
||||||
image: docker.io/owncloudci/drone-docker-buildx:4
|
image: docker.io/owncloudci/drone-docker-buildx:4
|
||||||
privileged: true
|
privileged: true
|
||||||
@@ -51,3 +31,25 @@ steps:
|
|||||||
when:
|
when:
|
||||||
event:
|
event:
|
||||||
- tag
|
- tag
|
||||||
|
|
||||||
|
- name: build multiarch from dev
|
||||||
|
image: docker.io/owncloudci/drone-docker-buildx:4
|
||||||
|
privileged: true
|
||||||
|
#environment:
|
||||||
|
# DOCKER_PLUGIN_MIRROR: "https://mirror.dev.format.hu"
|
||||||
|
settings:
|
||||||
|
cache-from: [ "registry.dev.format.hu/framework-scheduler" ]
|
||||||
|
registry: registry.dev.format.hu
|
||||||
|
repo: registry.dev.format.hu/framework-scheduler
|
||||||
|
tags: latest
|
||||||
|
dockerfile: Dockerfile
|
||||||
|
username:
|
||||||
|
from_secret: dev-hu-registry-username
|
||||||
|
password:
|
||||||
|
from_secret: dev-hu-registry-password
|
||||||
|
platforms:
|
||||||
|
- linux/amd64
|
||||||
|
- linux/arm64
|
||||||
|
when:
|
||||||
|
event:
|
||||||
|
- push
|
@@ -14,10 +14,10 @@ FROM alpine:latest
|
|||||||
# COPY --from=redis-source /usr/src/redis/src/redis-cli /usr/bin/redis-cli
|
# COPY --from=redis-source /usr/src/redis/src/redis-cli /usr/bin/redis-cli
|
||||||
# RUN chmod +x /usr/bin/redis-cli
|
# RUN chmod +x /usr/bin/redis-cli
|
||||||
|
|
||||||
RUN apk add --update --no-cache docker-cli wget curl dos2unix jq openssl git coreutils inotify-tools acl
|
RUN apk add --update --no-cache docker-cli wget curl dos2unix jq openssl git coreutils inotify-tools acl apache2-utils
|
||||||
|
|
||||||
COPY scripts/scheduler/*.sh /scripts/
|
COPY scripts/scheduler/*.sh /scripts/
|
||||||
RUN find ./scripts -name "*.sh" | xargs dos2unix
|
RUN find ./scripts -name "*.sh" | xargs dos2unix
|
||||||
RUN ["chmod", "+x", "-R", "/scripts/"]
|
RUN ["chmod", "+x", "-R", "/scripts/"]
|
||||||
|
|
||||||
ENTRYPOINT ["/scripts/entrypoint.sh"]
|
CMD /scripts/entrypoint.sh
|
89
scripts/scheduler/backup_challenge_clients.sh
Normal file
89
scripts/scheduler/backup_challenge_clients.sh
Normal file
@@ -0,0 +1,89 @@
|
|||||||
|
# Get MY IP
|
||||||
|
|
||||||
|
# Get VPN network if exists
|
||||||
|
|
||||||
|
# Define port
|
||||||
|
|
||||||
|
# Define local IP range
|
||||||
|
|
||||||
|
# Define VPN IP range
|
||||||
|
|
||||||
|
# Store results
|
||||||
|
|
||||||
|
|
||||||
|
scan_network(){
|
||||||
|
MyIP=$(ifconfig ${Interface}|grep inet |awk '{ print $2 }');
|
||||||
|
TargetIP=$(echo $MyIP|cut -d . -f1-3);
|
||||||
|
X=0
|
||||||
|
OpenIP=""
|
||||||
|
for i in $(seq 1 255); do
|
||||||
|
nc -w 1 -z $TargetIP.$i 60022;
|
||||||
|
if [ $? -eq 0 ]
|
||||||
|
then
|
||||||
|
if [ $MyIP != $TargetIP.$i ]
|
||||||
|
then
|
||||||
|
if [ $X = 1 ]
|
||||||
|
then
|
||||||
|
# tobb nyitott IP
|
||||||
|
echo "Found more than one IP addresses"
|
||||||
|
echo "MAILKULDES"
|
||||||
|
echo "">OpenIP.txt;
|
||||||
|
# TODO mailkuldes ahova kell
|
||||||
|
exit 1;
|
||||||
|
else
|
||||||
|
OpenIP=$TargetIP.$i;
|
||||||
|
fi
|
||||||
|
X=1;
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
if [ $X = 1 ]
|
||||||
|
then
|
||||||
|
echo $OpenIP>OpenIP.txt;
|
||||||
|
echo "start LVM SYNC";
|
||||||
|
echo "OpenIP mukodik = "$OpenIP;
|
||||||
|
lvm_sync_create $OpenIP;
|
||||||
|
else
|
||||||
|
echo "No available local IP address found!"
|
||||||
|
try_target_VPN;
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
try_target_IP(){
|
||||||
|
MyIP=$(ifconfig ${Interface}|grep inet |awk '{ print $2 }');
|
||||||
|
nc -w 1 -z $OpenIP 60022;
|
||||||
|
if [ $? -eq 0 ]
|
||||||
|
then
|
||||||
|
if [ $MyIP = $OpenIP ]
|
||||||
|
then
|
||||||
|
echo "Only own IP address found = "$OpenIP
|
||||||
|
scan_network;
|
||||||
|
fi
|
||||||
|
else
|
||||||
|
scan_network;
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
try_target_VPN(){
|
||||||
|
nc -w 1 -z $VPN 60022;
|
||||||
|
if [ $? -eq 0 ]
|
||||||
|
then
|
||||||
|
for i in {0..99}; do
|
||||||
|
MyVPN=$(ifconfig tun$i 2>/dev/null |grep inet |awk '{ print $2 }');
|
||||||
|
echo "My VPN="$MyVPN;
|
||||||
|
echo "Found VPN="$VPN;
|
||||||
|
if [ $VPN != $MyVPN ]
|
||||||
|
then
|
||||||
|
echo "VPN accessible="$VPN;
|
||||||
|
lvm_sync_create $VPN;
|
||||||
|
else
|
||||||
|
echo "Only own VPN accessible="$VPN;
|
||||||
|
exit 1;
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
else
|
||||||
|
echo "No available server"
|
||||||
|
fi
|
||||||
|
}
|
@@ -3,8 +3,23 @@
|
|||||||
cd /scripts
|
cd /scripts
|
||||||
DEBUG_MODE=${DEBUG_MODE:-false}
|
DEBUG_MODE=${DEBUG_MODE:-false}
|
||||||
|
|
||||||
|
VERSION="1.1.4"
|
||||||
|
|
||||||
#DOCKER_REGISTRY_URL=${DOCKER_REGISTRY_URL:-registry.format.hu}
|
#DOCKER_REGISTRY_URL=${DOCKER_REGISTRY_URL:-registry.format.hu}
|
||||||
DOCKER_REGISTRY_URL=${DOCKER_REGISTRY_URL:-safebox}
|
DOCKER_REGISTRY_URL=${DOCKER_REGISTRY_URL:-safebox}
|
||||||
|
DOCKER_REGISTRY_USERNAME=$DOCKER_REGISTRY_USERNAME
|
||||||
|
DOCKER_REGISTRY_PASSWORD=$DOCKER_REGISTRY_PASSWORD
|
||||||
|
|
||||||
|
if [ -n "$DOCKER_REGISTRY_USERNAME" ] && [ -n "$DOCKER_REGISTRY_PASSWORD" ]; then
|
||||||
|
echo "Logging in to Docker registry $DOCKER_REGISTRY_URL"
|
||||||
|
echo "$DOCKER_REGISTRY_PASSWORD" | docker login $DOCKER_REGISTRY_URL --username $DOCKER_REGISTRY_USERNAME --password-stdin
|
||||||
|
DOCKER_REGISTRY_ENVS="--env DOCKER_REGISTRY_USERNAME=$DOCKER_REGISTRY_USERNAME --env DOCKER_REGISTRY_PASSWORD=$DOCKER_REGISTRY_PASSWORD"
|
||||||
|
|
||||||
|
else
|
||||||
|
echo "No Docker registry credentials provided, skipping login."
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
USER_INIT_PATH=$USER_INIT_PATH
|
USER_INIT_PATH=$USER_INIT_PATH
|
||||||
GLOBAL_VERSION=${GLOBAL_VERSION:-latest}
|
GLOBAL_VERSION=${GLOBAL_VERSION:-latest}
|
||||||
SERVICE_DIR=${SERVICE_DIR:-/etc/user/config/services}
|
SERVICE_DIR=${SERVICE_DIR:-/etc/user/config/services}
|
||||||
@@ -19,6 +34,14 @@ FRAMEWORK_SCHEDULER_NETWORK_SUBNET=${FRAMEWORK_SCHEDULER_NETWORK_SUBNET:-"172.19
|
|||||||
FRAMEWORK_SCHEDULER_VERSION=${FRAMEWORK_SCHEDULER_VERSION:-latest}
|
FRAMEWORK_SCHEDULER_VERSION=${FRAMEWORK_SCHEDULER_VERSION:-latest}
|
||||||
RUN_FORCE=${RUN_FORCE:-false}
|
RUN_FORCE=${RUN_FORCE:-false}
|
||||||
|
|
||||||
|
if [ "$DEBUG_MODE" == "true" ]; then
|
||||||
|
DOCKER_START="--entrypoint=sh $DOCKER_REGISTRY_URL/$FRAMEWORK_SCHEDULER_IMAGE:$FRAMEWORK_SCHEDULER_VERSION -c 'sleep 86400'"
|
||||||
|
SET_DEBUG_MODE="--env DEBUG_MODE=true"
|
||||||
|
else
|
||||||
|
DOCKER_START="$DOCKER_REGISTRY_URL/$FRAMEWORK_SCHEDULER_IMAGE:$FRAMEWORK_SCHEDULER_VERSION"
|
||||||
|
SET_DEBUG_MODE=""
|
||||||
|
fi
|
||||||
|
|
||||||
WEB_SERVER=${WEB_SERVER:-webserver}
|
WEB_SERVER=${WEB_SERVER:-webserver}
|
||||||
WEB_IMAGE=${WEB_IMAGE:-web-installer}
|
WEB_IMAGE=${WEB_IMAGE:-web-installer}
|
||||||
WEBSERVER_PORT=${WEBSERVER_PORT:-8080}
|
WEBSERVER_PORT=${WEBSERVER_PORT:-8080}
|
||||||
@@ -30,6 +53,7 @@ REDIS_VERSION=${REDIS_VERSION:-latest}
|
|||||||
|
|
||||||
SOURCE=${SOURCE:-user-config}
|
SOURCE=${SOURCE:-user-config}
|
||||||
SMARTHOST_PROXY_PATH=$SMARTHOST_PROXY_PATH
|
SMARTHOST_PROXY_PATH=$SMARTHOST_PROXY_PATH
|
||||||
|
HTPASSWD_FILE=${HTPASSWD_FILE:-/etc/system/config/smarthost-proxy/nginx/htpasswd}
|
||||||
|
|
||||||
GIT_URL=${GIT_URL:-git.format.hu}
|
GIT_URL=${GIT_URL:-git.format.hu}
|
||||||
REPO=$REPO
|
REPO=$REPO
|
||||||
@@ -79,6 +103,8 @@ $VOLUME_MOUNTS \
|
|||||||
--env SETUP_VERSION=$SETUP_VERSION \
|
--env SETUP_VERSION=$SETUP_VERSION \
|
||||||
--env GLOBAL_VERSION=$GLOBAL_VERSION \
|
--env GLOBAL_VERSION=$GLOBAL_VERSION \
|
||||||
--env HOST_FILE=$HOST_FILE \
|
--env HOST_FILE=$HOST_FILE \
|
||||||
|
$SET_DEBUG_MODE \
|
||||||
|
$DOCKER_REGISTRY_ENVS \
|
||||||
$DOCKER_REGISTRY_URL$SETUP:$SETUP_VERSION"
|
$DOCKER_REGISTRY_URL$SETUP:$SETUP_VERSION"
|
||||||
|
|
||||||
DEBUG=1
|
DEBUG=1
|
||||||
@@ -90,14 +116,316 @@ debug() {
|
|||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
## Start prevously deployed firewall rules depend on framework scheduler startup at first time
|
add_json_target(){
|
||||||
|
|
||||||
if [ -d /etc/user/config/services ]; then
|
local TASK_NAME=$1
|
||||||
cd /etc/user/config/services
|
|
||||||
for FIREWALL in $(ls firewall*.json); do
|
if [ -n "$TASK_NAME" ]; then
|
||||||
$service_exec $FIREWALL start &
|
TASK="upgrade-$TASK_NAME"
|
||||||
done
|
fi
|
||||||
fi
|
|
||||||
|
if [ ! -f $SHARED/output/$TASK.json ]; then
|
||||||
|
install -m 664 -g 65534 /dev/null $SHARED/output/$TASK.json
|
||||||
|
fi
|
||||||
|
echo $JSON_TARGET | base64 -d >$SHARED/output/$TASK.json
|
||||||
|
}
|
||||||
|
|
||||||
|
backup_query_state() {
|
||||||
|
|
||||||
|
echo "backup_query_state"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
generate_backup_server_secrets () {
|
||||||
|
|
||||||
|
mkdir -p $SECRET_DIR/backup/server
|
||||||
|
|
||||||
|
echo '{
|
||||||
|
"backupserver":{
|
||||||
|
"SSH_HOST":"'$SSH_HOST'",
|
||||||
|
"SSH_USER":"'$SSH_USER'",
|
||||||
|
"SSH_PORT":"'$SSH_PORT'",
|
||||||
|
"SSH_PASSWORD":"'$SSH_PASSWORD'",
|
||||||
|
"BACKUP_PASSWORD":"'$BACKUP_PASSWORD'",
|
||||||
|
"PERIOD":"'$PERIOD'",
|
||||||
|
"COMPRESSION":"'$COMPRESSION'",
|
||||||
|
"DIRECTORIES":"'$DIRECTORIES'",
|
||||||
|
"SERVICES":"'$SERVICES'"
|
||||||
|
}
|
||||||
|
}' | jq -r . > $SECRET_DIR/backup/server/backup.json
|
||||||
|
}
|
||||||
|
|
||||||
|
defaulting_missing_paramaters() {
|
||||||
|
|
||||||
|
if [ "$SSH_HOST" == "" ] || [ "$SSH_HOST" == "null" ]; then
|
||||||
|
SSH_HOST="localhost"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$SSH_PORT" == "" ] || [ "$SSH_PORT" == "null" ]; then
|
||||||
|
SSH_PORT="20022"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$SSH_USER" == "" ] || [ "$SSH_USER" == "null" ]; then
|
||||||
|
SSH_USER="backup"
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$SSH_PASSWORD" == "" ] || [ "$SSH_PASSWORD" == "null" ]; then
|
||||||
|
SSH_PASSWORD="backup"
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
|
create_backup_service () {
|
||||||
|
|
||||||
|
ADDITIONAL=""
|
||||||
|
ADDITIONAL='"EXTRA":"--rm","PRE_START":[],"DEPEND": [],"CMD":""'
|
||||||
|
|
||||||
|
BACKUP_SERVER='{
|
||||||
|
"main": {
|
||||||
|
"SERVICE_NAME": "backup-server"
|
||||||
|
},
|
||||||
|
"containers": [
|
||||||
|
{
|
||||||
|
"IMAGE": "alpine:latest",
|
||||||
|
"NAME": "backup-init",
|
||||||
|
"NETWORK": "host",
|
||||||
|
"UPDATE": "true",
|
||||||
|
"MEMORY": "64M",
|
||||||
|
"EXTRA": "--rm",
|
||||||
|
"VOLUMES":[
|
||||||
|
{
|
||||||
|
"SOURCE": "USER_DATA",
|
||||||
|
"DEST": "/etc/user/data/",
|
||||||
|
"TYPE": "rw"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"ENTRYPOINT": "sh -c",
|
||||||
|
"CMD": "mkdir -p /etc/user/data/backup/server/ssh",
|
||||||
|
"POST_START": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"IMAGE": "safebox/backup-server:latest",
|
||||||
|
"NAME": "backupserver",
|
||||||
|
"NETWORK": "'$NETWORK'",
|
||||||
|
"UPDATE": "true",
|
||||||
|
"MEMORY": "64M",
|
||||||
|
"VOLUMES":[
|
||||||
|
{
|
||||||
|
"SOURCE": "USER_DATA",
|
||||||
|
"DEST": "/etc/user/data/",
|
||||||
|
"TYPE": "ro"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"SOURCE": "USER_CONFIG",
|
||||||
|
"DEST": "/etc/user/config/",
|
||||||
|
"TYPE": "ro"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"SOURCE": "USER_SECRET",
|
||||||
|
"DEST": "/etc/user/secret/",
|
||||||
|
"TYPE": "ro"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"SOURCE": "/etc/user/data/backup/server/ssh",
|
||||||
|
"DEST": "/home/'$SSH_USER'/",
|
||||||
|
"TYPE": "rw"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"ENV_FILES":["/etc/user/secret/backup/server/backup.json"],
|
||||||
|
'$ADDITIONAL',
|
||||||
|
"POST_START": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}'
|
||||||
|
# create backup server secrets from variables
|
||||||
|
generate_backup_server_secrets
|
||||||
|
}
|
||||||
|
|
||||||
|
backup_set_service() {
|
||||||
|
|
||||||
|
local BACKUP_PASSWORD="$1"
|
||||||
|
local PERIOD="$2"
|
||||||
|
local COMPRESSION="$3"
|
||||||
|
|
||||||
|
local PLANNED_TIME="$(echo "$4" | base64 -d)"
|
||||||
|
local DIRECTRIES="$5"
|
||||||
|
local SERVICES="$6"
|
||||||
|
local SSH_HOST="$7"
|
||||||
|
|
||||||
|
local VPN="$8"
|
||||||
|
local SSH_PORT="$9"
|
||||||
|
local SSH_USER="${10}"
|
||||||
|
local SSH_PASSWORD="${11}"
|
||||||
|
local OPERATION="${12}"
|
||||||
|
|
||||||
|
defaulting_missing_paramaters
|
||||||
|
|
||||||
|
if [ "$OPERATION" == "DELETE" ]; then
|
||||||
|
|
||||||
|
sed -i '/service-backup/d' /etc/user/data/cron/crontab.txt
|
||||||
|
# delete service
|
||||||
|
rm -f /etc/user/config/services/service-backup-server*
|
||||||
|
rm -rf /etc/user/data/backup/server
|
||||||
|
rm -rf /etc/user/secret/backup/server
|
||||||
|
debug "Service backup server service deleted."
|
||||||
|
|
||||||
|
elif [ "$OPERATION" == "MODIFY" ]; then
|
||||||
|
|
||||||
|
# modify only secrets for backup server, it will be affected at the next cron job
|
||||||
|
generate_backup_server_secrets
|
||||||
|
|
||||||
|
else
|
||||||
|
|
||||||
|
if [ -z "$SSH_PORT" ] ; then
|
||||||
|
SSH_PORT=20022
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$VPN" == "true" ]; then
|
||||||
|
NETWORK=$VPN_NETWORK
|
||||||
|
create_backup_service
|
||||||
|
else
|
||||||
|
NETWORK="host"
|
||||||
|
create_backup_service
|
||||||
|
fi
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ -n "$PLANNED_TIME" ]; then
|
||||||
|
if [ "$VPN" == "true" ]; then
|
||||||
|
if [ -n "$BACKUP_SERVER" ] ; then
|
||||||
|
echo "$BACKUP_SERVER" | jq -r . >/etc/user/config/services/service-backup-server-vpn.json
|
||||||
|
fi
|
||||||
|
echo "'$PLANNED_TIME' service service-backup-server-vpn" >> /etc/user/data/cron/crontab.txt
|
||||||
|
else
|
||||||
|
if [ -n "$BACKUP_SERVER" ] ; then
|
||||||
|
echo "$BACKUP_SERVER" | jq -r . >/etc/user/config/services/service-backup-server-local.json
|
||||||
|
fi
|
||||||
|
echo "'$PLANNED_TIME' service service-backup-server-local" >> /etc/user/data/cron/crontab.txt
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
backup_set_client() {
|
||||||
|
|
||||||
|
local NAME="$1"
|
||||||
|
local SIZE="$2"
|
||||||
|
local VPN="$3"
|
||||||
|
local SSH_PORT="$4"
|
||||||
|
local SSH_USER="$5"
|
||||||
|
local SSH_PASSWORD="$6"
|
||||||
|
local OPERATION="$7"
|
||||||
|
local VPN_KEY="$8"
|
||||||
|
|
||||||
|
defaulting_missing_paramaters
|
||||||
|
|
||||||
|
if [ "$OPERATION" == "DELETE" ]; then
|
||||||
|
# delete service
|
||||||
|
if [ -f "/etc/user/config/services/service-backup-client-$NAME.json" ]; then
|
||||||
|
|
||||||
|
debug "service-backup-client-$NAME.json stop force dns-remove"
|
||||||
|
$service_exec service-backup-client-$NAME.json stop force dns-remove
|
||||||
|
rm -f /etc/user/config/services/service-backup-client-$NAME.json
|
||||||
|
debug "Service backup client $NAME deleted."
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
else
|
||||||
|
|
||||||
|
if [ -z "$SSH_PORT" ] ; then
|
||||||
|
SSH_PORT=20022
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ "$VPN" == "true" ]; then
|
||||||
|
NETWORK=$NAME
|
||||||
|
PORT='"PORTS": [{"SOURCE":"null","DEST":"'$SSH_PORT'","TYPE":"tcp"}],'
|
||||||
|
else
|
||||||
|
NETWORK="host"
|
||||||
|
PORT='"PORTS": [{"SOURCE":"'$SSH_PORT'","DEST":"'$SSH_PORT'","TYPE":"tcp"}],'
|
||||||
|
fi
|
||||||
|
|
||||||
|
ADDITIONAL=""
|
||||||
|
ADDITIONAL='"EXTRA":"--restart=always","PRE_START":[],"DEPEND":[],"CMD": ""'
|
||||||
|
ENVS='"ENVS":[{"SSH_USER":"'$SSH_USER'"},{"SSH_PORT":"'$SSH_PORT'"},{"SSH_PASSWORD":"'$SSH_PASSWORD'"},{"VPN_CLIENT_KEY":"'$VPN_KEY'"}],'
|
||||||
|
|
||||||
|
echo '{
|
||||||
|
"main": {
|
||||||
|
"SERVICE_NAME": "'$NAME'"
|
||||||
|
},
|
||||||
|
"containers": [
|
||||||
|
{
|
||||||
|
"IMAGE": "alpine:latest",
|
||||||
|
"NAME": "'$NAME'-init",
|
||||||
|
"NETWORK": "host",
|
||||||
|
"UPDATE": "true",
|
||||||
|
"MEMORY": "64M",
|
||||||
|
"EXTRA": "--rm",
|
||||||
|
"VOLUMES":[
|
||||||
|
{
|
||||||
|
"SOURCE": "USER_DATA",
|
||||||
|
"DEST": "/etc/user/data/",
|
||||||
|
"TYPE": "rw"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"ENTRYPOINT": "sh -c",
|
||||||
|
"CMD": "mkdir -p /etc/user/data/backup/clients/'$NAME'/backup && mkdir -p /etc/user/data/backup/clients/'$NAME'/ssh && chmod -R '$SSH_USER':'$SSH_USER' /etc/user/data/backup/clients/'$NAME'",
|
||||||
|
"POST_START": []
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"IMAGE": "safebox/backup-client:latest",
|
||||||
|
"NAME": "'$NAME'",
|
||||||
|
"UPDATE": "true",
|
||||||
|
"MEMORY": "64M",
|
||||||
|
"NETWORK": "'$NETWORK'",
|
||||||
|
'$ADDITIONAL',
|
||||||
|
'$ENVS'
|
||||||
|
'$PORT'
|
||||||
|
"VOLUMES":[
|
||||||
|
{
|
||||||
|
"SOURCE": "/etc/user/data/backup/clients/'$NAME'/backup",
|
||||||
|
"DEST": "/home/'$SSH_USER'/backup",
|
||||||
|
"TYPE": "rw"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"SOURCE": "/etc/user/data/backup/clients/'$NAME'/ssh",
|
||||||
|
"DEST": "/home/'$SSH_USER'/.ssh",
|
||||||
|
"TYPE": "rw"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"POST_START": []
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}' | jq -r . >/etc/user/config/services/service-backup-client-$NAME.json
|
||||||
|
|
||||||
|
debug "service-backup-client-$NAME.json start info"
|
||||||
|
$service_exec service-backup-client-$NAME.json start info &
|
||||||
|
|
||||||
|
fi
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
backup_challenge_clients() {
|
||||||
|
|
||||||
|
echo "backup_challenge_clients"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
restore_from_backup() {
|
||||||
|
|
||||||
|
echo "restore_from_backup"
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
create_htpasswd_file() {
|
||||||
|
|
||||||
|
local USER="$1"
|
||||||
|
local PASSWD="$2"
|
||||||
|
|
||||||
|
if [ ! -f "$HTPASSWD_FILE" ]; then
|
||||||
|
install -m 664 -g 65534 /dev/null $HTPASSWD_FILE
|
||||||
|
htpasswd -cb $HTPASSWD_FILE $USER $PASSWD
|
||||||
|
fi
|
||||||
|
}
|
||||||
|
|
||||||
deploy_additionals() {
|
deploy_additionals() {
|
||||||
|
|
||||||
@@ -146,14 +474,80 @@ remove_additionals() {
|
|||||||
|
|
||||||
debug "UNINSTALL: $NAME"
|
debug "UNINSTALL: $NAME"
|
||||||
|
|
||||||
|
# delete firewall rules
|
||||||
|
FIREWALLS=""
|
||||||
|
FIREWALLS="$(ls $SERVICE_DIR/firewall-*.json | grep $NAME)"
|
||||||
|
for FIREWALL in $(echo $FIREWALLS); do
|
||||||
|
cat $FIREWALL | jq '.containers[] |= (
|
||||||
|
if (.ENVS | map(has("OPERATION")) | any) then
|
||||||
|
# If any entry has OPERATION key, update it
|
||||||
|
.ENVS = [.ENVS[] | if has("OPERATION") then {"OPERATION": "DELETE"} else . end]
|
||||||
|
else
|
||||||
|
# If no entry has OPERATION key, add new entry
|
||||||
|
.ENVS += [{"OPERATION": "DELETE"}]
|
||||||
|
end
|
||||||
|
)' >$FIREWALL.tmp
|
||||||
|
debug "$service_exec $FIREWALL.tmp start info"
|
||||||
|
$service_exec $FIREWALL.tmp start info
|
||||||
|
rm $FIREWALL.tmp
|
||||||
|
done
|
||||||
|
|
||||||
|
# delete domains
|
||||||
|
DOMMAINS=""
|
||||||
|
DOMAINS="$(ls $SERVICE_DIR/domain-*.json | grep $NAME)"
|
||||||
|
for DOMAIN in $(echo $DOMAINS); do
|
||||||
|
cat $DOMAIN | jq '.containers[] |= (
|
||||||
|
if (.ENVS | map(has("OPERATION")) | any) then
|
||||||
|
# If any entry has OPERATION key, update it
|
||||||
|
.ENVS = [.ENVS[] | if has("OPERATION") then {"OPERATION": "DELETE"} else . end]
|
||||||
|
else
|
||||||
|
# If no entry has OPERATION key, add new entry
|
||||||
|
.ENVS += [{"OPERATION": "DELETE"}]
|
||||||
|
end
|
||||||
|
)' >$DOMAIN.tmp
|
||||||
|
debug "$service_exec $DOMAIN.tmp start info"
|
||||||
|
$service_exec $DOMAIN.tmp start info
|
||||||
|
rm $DOMAIN.tmp
|
||||||
|
done
|
||||||
|
|
||||||
|
# remove related directories and files
|
||||||
|
# get volume destinations
|
||||||
|
DESTINATIONS=""
|
||||||
|
VOLUMES=""
|
||||||
|
DESTINATIONS=$(cat $SERVICE_DIR/service-$NAME.json | jq -r '[.containers[] | select(has("VOLUMES")) | .VOLUMES[] | select(.SHARED != "true") | .SOURCE] | unique[]' | grep $NAME)
|
||||||
|
for DESTINATION in $(echo $DESTINATIONS); do
|
||||||
|
if [ -d "$DESTINATION" ] || [ -f "$DESTINATION" ]; then
|
||||||
|
rm -rf $DESTINATION
|
||||||
|
debug "deleted directory or file: $DESTINATION"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
ENV_FILES=$(cat $SERVICE_DIR/service-$NAME.json | jq -r '[.containers[] | select(has("ENV_FILES")) | .ENV_FILES[]] | unique[]')
|
||||||
|
for ENV_FILE in $(echo $ENV_FILES); do
|
||||||
|
if [ -f "$ENV_FILE" ]; then
|
||||||
|
rm -rf $ENV_FILE
|
||||||
|
debug "deleted enviroment file: $ENV_FILE"
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
|
VOLUMES=$(cat $SERVICE_DIR/service-$NAME.json | jq -r '[.containers[] | select(has("VOLUMES")) | .VOLUMES[] | select(.SHARED != "true") | .SOURCE] | unique[]' | grep -vE 'USER|SYSTEM')
|
||||||
|
|
||||||
# stop service
|
# stop service
|
||||||
# force - remove stopped container, docker rm
|
# force - remove stopped container, docker rm
|
||||||
debug "$service_exec service-$NAME.json stop force dns-remove"
|
debug "$service_exec service-$NAME.json stop force dns-remove"
|
||||||
$service_exec service-$NAME.json stop force dns-remove
|
$service_exec service-$NAME.json stop force dns-remove
|
||||||
|
|
||||||
|
for VOLUME in $(echo $VOLUMES | grep -vE 'USER|SYSTEM|SHARED'); do
|
||||||
|
if [ "$(echo $VOLUME | cut -d '/' -f1)" ]; then
|
||||||
|
docker volume rm $VOLUME
|
||||||
|
debug "deleted volume: $VOLUME"
|
||||||
|
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
|
||||||
# remove service files
|
# remove service files
|
||||||
rm $SERVICE_DIR/*"-"$NAME.json # service, domain, etc.
|
rm $SERVICE_DIR/*"-"$NAME.json # service, domain, etc.
|
||||||
rm $SECRET_DIR/$NAME/$NAME.json
|
|
||||||
}
|
}
|
||||||
|
|
||||||
get_repositories() {
|
get_repositories() {
|
||||||
@@ -163,15 +557,20 @@ get_repositories() {
|
|||||||
local TREES=""
|
local TREES=""
|
||||||
local REPO
|
local REPO
|
||||||
|
|
||||||
|
if [ ! -f "/etc/user/config/repositories.json" ]; then
|
||||||
|
create_repositories_json
|
||||||
|
fi
|
||||||
|
|
||||||
REPOS=$(jq -r .repositories[] /etc/user/config/repositories.json) # list of repos, delimiter by space
|
REPOS=$(jq -r .repositories[] /etc/user/config/repositories.json) # list of repos, delimiter by space
|
||||||
for REPO in $REPOS; do
|
for REPO in $REPOS; do
|
||||||
|
|
||||||
BASE=$(basename $REPO | cut -d '.' -f1)
|
BASE=$(basename $REPO | cut -d '.' -f1)
|
||||||
if [ ! -d "/tmp/$BASE" ]; then
|
if [ ! -d "/tmp/$BASE" ]; then
|
||||||
git clone $REPO /tmp/$BASE >/dev/null
|
GIT_HTTP_CONNECT_TIMEOUT=10 GIT_HTTP_TIMEOUT=30 git clone $REPO /tmp/$BASE >/dev/null
|
||||||
else
|
else
|
||||||
cd /tmp/$BASE
|
cd /tmp/$BASE
|
||||||
git pull >/dev/null
|
date >> /tmp/pull.log
|
||||||
|
GIT_HTTP_CONNECT_TIMEOUT=10 GIT_HTTP_TIMEOUT=30 git pull >> /tmp/pull.log
|
||||||
fi
|
fi
|
||||||
if [ -f "/tmp/$BASE/applications-tree.json" ]; then
|
if [ -f "/tmp/$BASE/applications-tree.json" ]; then
|
||||||
TREES=$TREES" /tmp/$BASE/applications-tree.json"
|
TREES=$TREES" /tmp/$BASE/applications-tree.json"
|
||||||
@@ -187,6 +586,9 @@ check_volumes() {
|
|||||||
if [ ! -d "/var/tmp/shared" ]; then
|
if [ ! -d "/var/tmp/shared" ]; then
|
||||||
/usr/bin/docker volume create SHARED
|
/usr/bin/docker volume create SHARED
|
||||||
RET=0
|
RET=0
|
||||||
|
else
|
||||||
|
rm -rf /var/tmp/shared/input/*
|
||||||
|
rm -rf /var/tmp/shared/output/*
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if [ ! -d "/etc/system/data/" ]; then
|
if [ ! -d "/etc/system/data/" ]; then
|
||||||
@@ -429,30 +831,30 @@ check_update() {
|
|||||||
|
|
||||||
REPOSITORY_URL=$(echo $IMAGE | cut -d '/' -f1)
|
REPOSITORY_URL=$(echo $IMAGE | cut -d '/' -f1)
|
||||||
|
|
||||||
# if image repository url doesn't contain dot (safebox)
|
# if image repository url doesn't contain dot (safebox)
|
||||||
if [[ "$(echo "$REPOSITORY_URL" | grep '\.')" == "" ]]; then
|
if [[ "$(echo "$REPOSITORY_URL" | grep '\.')" == "" ]]; then
|
||||||
REMOTE_URL="registry.hub.docker.com"
|
REMOTE_URL="registry.hub.docker.com"
|
||||||
TEMP_PATH=$IMAGE
|
TEMP_PATH=$IMAGE
|
||||||
TEMP_IMAGE=$(echo $TEMP_PATH | cut -d ':' -f1)
|
|
||||||
TOKEN=$(curl -s "https://auth.docker.io/token?service=registry.docker.io&scope=repository:{$TEMP_IMAGE}:pull" | jq -r .token)
|
|
||||||
TOKEN_HEADER='-H "Authorization: Bearer '$TOKEN'"'
|
|
||||||
else
|
|
||||||
REMOTE_URL=""
|
|
||||||
# -f2- IMAGE can contain subdirectories
|
|
||||||
TEMP_PATH=$(echo $IMAGE | cut -d '/' -f2-)
|
|
||||||
TOKEN=""
|
|
||||||
TOKEN_HEADER=""
|
|
||||||
fi
|
|
||||||
|
|
||||||
debug "TEMP PATH: $TEMP_PATH"
|
|
||||||
TEMP_IMAGE=$(echo $TEMP_PATH | cut -d ':' -f1)
|
TEMP_IMAGE=$(echo $TEMP_PATH | cut -d ':' -f1)
|
||||||
TEMP_VERSION=$(echo $TEMP_PATH | cut -d ':' -f2)
|
TOKEN=$(curl -s "https://auth.docker.io/token?service=registry.docker.io&scope=repository:{$TEMP_IMAGE}:pull" | jq -r .token)
|
||||||
if [ "$TEMP_VERSION" == "$TEMP_IMAGE" ]; then # version is not set
|
TOKEN_HEADER='-H "Authorization: Bearer '$TOKEN'"'
|
||||||
TEMP_VERSION="latest"
|
else
|
||||||
fi
|
REMOTE_URL=""
|
||||||
|
# -f2- IMAGE can contain subdirectories
|
||||||
|
TEMP_PATH=$(echo $IMAGE | cut -d '/' -f2-)
|
||||||
|
TOKEN=""
|
||||||
|
TOKEN_HEADER=""
|
||||||
|
fi
|
||||||
|
|
||||||
REMOTE_URL="https://$REMOTE_URL/v2/$TEMP_IMAGE/manifests/$TEMP_VERSION"
|
debug "TEMP PATH: $TEMP_PATH"
|
||||||
debug "REMOTE_URL: $REMOTE_URL"
|
TEMP_IMAGE=$(echo $TEMP_PATH | cut -d ':' -f1)
|
||||||
|
TEMP_VERSION=$(echo $TEMP_PATH | cut -d ':' -f2)
|
||||||
|
if [ "$TEMP_VERSION" == "$TEMP_IMAGE" ]; then # version is not set
|
||||||
|
TEMP_VERSION="latest"
|
||||||
|
fi
|
||||||
|
|
||||||
|
REMOTE_URL="https://$REMOTE_URL/v2/$TEMP_IMAGE/manifests/$TEMP_VERSION"
|
||||||
|
debug "REMOTE_URL: $REMOTE_URL"
|
||||||
|
|
||||||
# Check whether repository url is available
|
# Check whether repository url is available
|
||||||
#CURL_CHECK="curl -m 5 -s -o /dev/null -w "%{http_code}" https://$REPOSITORY_URL/v2/"
|
#CURL_CHECK="curl -m 5 -s -o /dev/null -w "%{http_code}" https://$REPOSITORY_URL/v2/"
|
||||||
@@ -460,12 +862,12 @@ check_update() {
|
|||||||
CURL_CHECK_CODE=$(eval $CURL_CHECK)
|
CURL_CHECK_CODE=$(eval $CURL_CHECK)
|
||||||
|
|
||||||
# if valid accessible url
|
# if valid accessible url
|
||||||
if [[ "$CURL_CHECK_CODE" == "200" ]] ; then
|
if [[ "$CURL_CHECK_CODE" == "200" ]]; then
|
||||||
debug "$REMOTE_URL repository accessed successfully"
|
debug "$REMOTE_URL repository accessed successfully"
|
||||||
|
|
||||||
#digest=$(curl --silent -H "Accept: application/vnd.docker.distribution.manifest.v2+json" "$REMOTE_URL" | jq -r '.config.digest');
|
#digest=$(curl --silent -H "Accept: application/vnd.docker.distribution.manifest.v2+json" "$REMOTE_URL" | jq -r '.config.digest');
|
||||||
# Digest for the whole manifest, which includes all architectures.
|
# Digest for the whole manifest, which includes all architectures.
|
||||||
CURL_DIGEST='curl -s -I '"$TOKEN_HEADER"' -H "Accept: application/vnd.oci.image.index.v1+json" '"$REMOTE_URL"' | grep -i Docker-Content-Digest | cut -d " " -f2 | tr -d "\r\n"'
|
CURL_DIGEST='curl -s -I '"$TOKEN_HEADER"' -H "Accept: application/vnd.oci.image.index.v1+json" '"$REMOTE_URL"' | grep -i Docker-Content-Digest | cut -d " " -f2 | tr -d "\r\n"'
|
||||||
digest=$(eval $CURL_DIGEST)
|
digest=$(eval $CURL_DIGEST)
|
||||||
|
|
||||||
#debug "docker images -q --no-trunc $REPOSITORY_URL/$TEMP_IMAGE:$TEMP_VERSION";
|
#debug "docker images -q --no-trunc $REPOSITORY_URL/$TEMP_IMAGE:$TEMP_VERSION";
|
||||||
@@ -497,7 +899,7 @@ check_update() {
|
|||||||
debug "$REMOTE_URL not accessible, http error code: $CURL_CHECK_CODE"
|
debug "$REMOTE_URL not accessible, http error code: $CURL_CHECK_CODE"
|
||||||
|
|
||||||
echo "Force image pull has started without digest check..."
|
echo "Force image pull has started without digest check..."
|
||||||
DOCKER_PULL="docker pull $IMAGE"
|
DOCKER_PULL="/usr/bin/docker pull $IMAGE"
|
||||||
eval $DOCKER_PULL
|
eval $DOCKER_PULL
|
||||||
STATUS=$?
|
STATUS=$?
|
||||||
debug "PULL STATUS: $STATUS"
|
debug "PULL STATUS: $STATUS"
|
||||||
@@ -511,7 +913,11 @@ check_update() {
|
|||||||
|
|
||||||
upgrade_scheduler() {
|
upgrade_scheduler() {
|
||||||
|
|
||||||
DOCKER_START="--entrypoint=sh $DOCKER_REGISTRY_URL/$FRAMEWORK_SCHEDULER_IMAGE:$FRAMEWORK_SCHEDULER_VERSION -c '/scripts/upgrade.sh'"
|
# Upgrading framework scheduler
|
||||||
|
debug "Upgrading framework scheduler..."
|
||||||
|
/usr/bin/docker pull "$DOCKER_REGISTRY_URL/$FRAMEWORK_SCHEDULER_IMAGE:$FRAMEWORK_SCHEDULER_VERSION"
|
||||||
|
|
||||||
|
FRAMEWORK_SCHEDULER_NAME="$FRAMEWORK_SCHEDULER_NAME-$(head /dev/urandom | tr -dc '0-9' | head -c 6)"
|
||||||
|
|
||||||
DOCKER_RUN="/usr/bin/docker run -d \
|
DOCKER_RUN="/usr/bin/docker run -d \
|
||||||
-v SHARED:/var/tmp/shared \
|
-v SHARED:/var/tmp/shared \
|
||||||
@@ -523,40 +929,48 @@ upgrade_scheduler() {
|
|||||||
-v USER_CONFIG:/etc/user/config \
|
-v USER_CONFIG:/etc/user/config \
|
||||||
-v USER_SECRET:/etc/user/secret \
|
-v USER_SECRET:/etc/user/secret \
|
||||||
--restart=always \
|
--restart=always \
|
||||||
|
--name $FRAMEWORK_SCHEDULER_NAME \
|
||||||
|
$DOCKER_REGISTRY_ENVS \
|
||||||
|
$SET_DEBUG_MODE \
|
||||||
--env WEBSERVER_PORT=$WEBSERVER_PORT \
|
--env WEBSERVER_PORT=$WEBSERVER_PORT \
|
||||||
--network $FRAMEWORK_SCHEDULER_NETWORK \
|
--network $FRAMEWORK_SCHEDULER_NETWORK \
|
||||||
--env RUN_FORCE=$RUN_FORCE \
|
--env RUN_FORCE=$RUN_FORCE \
|
||||||
--env DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL \
|
--env DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL \
|
||||||
$DOCKER_START"
|
$DOCKER_START"
|
||||||
|
echo "DOCKER RUN: $DOCKER_RUN"
|
||||||
eval "$DOCKER_RUN"
|
eval "$DOCKER_RUN"
|
||||||
}
|
}
|
||||||
|
|
||||||
upgrade() {
|
upgrade() {
|
||||||
local NAME=$1
|
local NAME=$1
|
||||||
|
|
||||||
if [ "$NAME" == "web-installer" ]; then
|
if [ "$NAME" == "webserver" ]; then
|
||||||
|
debug "$service_exec service-framework.containers.$NAME stop force"
|
||||||
|
$service_exec service-framework.containers.$NAME stop force
|
||||||
|
debug "$service_exec service-framework.containers.$NAME start info"
|
||||||
|
$service_exec service-framework.containers.$NAME start info &
|
||||||
|
else
|
||||||
|
|
||||||
debug "$service_exec service-framework-scheduler.containers.webserver start info"
|
debug "$service_exec $NAME.json stop force"
|
||||||
$service_exec service-framework-scheduler.containers.webserver stop force
|
$service_exec $NAME.json stop force
|
||||||
$service_exec service-framework-scheduler.containers.webserver start info &
|
debug "$service_exec $NAME.json start info"
|
||||||
|
$service_exec $NAME.json start info &
|
||||||
|
|
||||||
else
|
fi
|
||||||
|
|
||||||
debug "$service_exec service-$NAME.json start info"
|
PID=$!
|
||||||
$service_exec service-$NAME.json stop force
|
|
||||||
$service_exec service-$NAME.json start info &
|
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
execute_task() {
|
execute_task() {
|
||||||
TASK="$1"
|
TASK="$1"
|
||||||
B64_JSON="$2"
|
B64_JSON="$2"
|
||||||
DATE=$(date +"%Y%m%d%H%M")
|
DATE=$(date +"%Y%m%d%H%M")
|
||||||
|
|
||||||
# Executing task
|
# Executing task
|
||||||
debug "TASK: $(echo $TASK | cut -d ':' -f1)"
|
|
||||||
TASK_NAME=$(echo $TASK | cut -d ':' -f1)
|
TASK_NAME=$(echo $TASK | cut -d ':' -f1)
|
||||||
|
if [ "$TASK_NAME" != "check_vpn" ]; then
|
||||||
|
debug "TASK: $(echo $TASK_NAME | cut -d ':' -f1)"
|
||||||
|
fi
|
||||||
|
|
||||||
# checking sytem status
|
# checking sytem status
|
||||||
SYSTEM_STATUS=$(ls /etc/user/config/services/*.json | grep -v service-framework.json)
|
SYSTEM_STATUS=$(ls /etc/user/config/services/*.json | grep -v service-framework.json)
|
||||||
@@ -579,6 +993,24 @@ execute_task() {
|
|||||||
#fi;
|
#fi;
|
||||||
JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "INSTALL_STATUS": "'$INSTALL_STATUS'" }' | jq -r . | base64 -w0)
|
JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "INSTALL_STATUS": "'$INSTALL_STATUS'" }' | jq -r . | base64 -w0)
|
||||||
|
|
||||||
|
elif [ "$TASK_NAME" == "request_letsencrypt" ]; then
|
||||||
|
DOMAINS=$(echo $B64_JSON | base64 -d | jq -r 'keys[]')
|
||||||
|
for DOMAIN in $(echo $DOMAINS); do
|
||||||
|
REQUEST=$(echo $B64_JSON | base64 -d | jq -r ".[\"$DOMAIN\"].status")
|
||||||
|
|
||||||
|
if [ "$REQUEST" == "requested" ]; then
|
||||||
|
echo "New certificate for $DOMAIN is requested."
|
||||||
|
echo "Modifying $DOMAIN_FILE.json for $DOMAIN"
|
||||||
|
jq '.containers[0].ENVS |= map(if has("OPERATION") then .OPERATION = "MODIFY" else . end) | \
|
||||||
|
.containers[0].ENVS |= map(if has("DOMAIN") then .DOMAIN = "'$DOMAIN'" else . end)' \
|
||||||
|
/etc/user/config/services/$DOMAIN_FILE.json > /tmp/$DOMAIN_FILE.json && \
|
||||||
|
mv /tmp/$DOMAIN_FILE.json /etc/user/config/services/$DOMAIN_FILE.json
|
||||||
|
debug "$service_exec $DOMAIN_FILE.json start info"
|
||||||
|
$service_exec $DOMAIN_FILE.json start info &
|
||||||
|
fi
|
||||||
|
done
|
||||||
|
JSON_TARGET=$B64_JSON
|
||||||
|
|
||||||
elif [ "$TASK_NAME" == "system" ]; then
|
elif [ "$TASK_NAME" == "system" ]; then
|
||||||
#SYSTEM_LIST="core-dns.json cron.json domain-local-backend.json firewall-letsencrypt.json firewall-local-backend.json firewall-localloadbalancer-dns.json firewall-localloadbalancer-to-smarthostbackend.json firewall-smarthost-backend-dns.json firewall-smarthost-loadbalancer-dns.json firewall-smarthost-to-backend.json firewall-smarthostloadbalancer-from-publicbackend.json letsencrypt.json local-backend.json local-proxy.json service-framework.json smarthost-proxy-scheduler.json smarthost-proxy.json"
|
#SYSTEM_LIST="core-dns.json cron.json domain-local-backend.json firewall-letsencrypt.json firewall-local-backend.json firewall-localloadbalancer-dns.json firewall-localloadbalancer-to-smarthostbackend.json firewall-smarthost-backend-dns.json firewall-smarthost-loadbalancer-dns.json firewall-smarthost-to-backend.json firewall-smarthostloadbalancer-from-publicbackend.json letsencrypt.json local-backend.json local-proxy.json service-framework.json smarthost-proxy-scheduler.json smarthost-proxy.json"
|
||||||
SYSTEM_LIST="core-dns.json cron.json letsencrypt.json local-proxy.json service-framework.json smarthost-proxy-scheduler.json smarthost-proxy.json"
|
SYSTEM_LIST="core-dns.json cron.json letsencrypt.json local-proxy.json service-framework.json smarthost-proxy-scheduler.json smarthost-proxy.json"
|
||||||
@@ -704,18 +1136,21 @@ execute_task() {
|
|||||||
elif [ "$TASK_NAME" == "deployments" ]; then
|
elif [ "$TASK_NAME" == "deployments" ]; then
|
||||||
DEPLOYMENTS=""
|
DEPLOYMENTS=""
|
||||||
TREES=$(get_repositories)
|
TREES=$(get_repositories)
|
||||||
for TREE in $TREES; do
|
for TREE in "$TREES"; do
|
||||||
APPS=$(jq -rc '.apps[]' $TREE)
|
APPS=$(jq -rc '.apps[]' $TREE)
|
||||||
for APP in $APPS; do
|
#for APP in "$APPS"; do #space problem
|
||||||
|
while IFS= read -r APP; do
|
||||||
APP_NAME=$(echo "$APP" | jq -r '.name')
|
APP_NAME=$(echo "$APP" | jq -r '.name')
|
||||||
|
APP_SUBTITLE="$(echo "$APP" | jq -r '.subtitle')"
|
||||||
APP_VERSION=$(echo "$APP" | jq -r '.version')
|
APP_VERSION=$(echo "$APP" | jq -r '.version')
|
||||||
|
APP_ICON=$(echo "$APP" | jq -r '.icon')
|
||||||
if [ "$DEPLOYMENTS" != "" ]; then
|
if [ "$DEPLOYMENTS" != "" ]; then
|
||||||
SEP=","
|
SEP=","
|
||||||
else
|
else
|
||||||
SEP=""
|
SEP=""
|
||||||
fi
|
fi
|
||||||
DEPLOYMENTS=$DEPLOYMENTS$SEP'"'$APP_NAME'": "'$APP_VERSION'"'
|
DEPLOYMENTS="$DEPLOYMENTS"$SEP'"'$APP_NAME'":{"subtitle":"'"$APP_SUBTITLE"'","version":"'"$APP_VERSION"'","icon":"'"$APP_ICON"'"}'
|
||||||
done
|
done < <(echo "$APPS") # preserve DEPLOYMENTS variable
|
||||||
done
|
done
|
||||||
if [ "$DEPLOYMENTS" == "" ]; then
|
if [ "$DEPLOYMENTS" == "" ]; then
|
||||||
DEPLOYMENTS='"deployments": "NONE"'
|
DEPLOYMENTS='"deployments": "NONE"'
|
||||||
@@ -749,20 +1184,22 @@ execute_task() {
|
|||||||
|
|
||||||
for TREE in $TREES; do
|
for TREE in $TREES; do
|
||||||
APPS=$(jq -rc '.apps[]' $TREE)
|
APPS=$(jq -rc '.apps[]' $TREE)
|
||||||
for APP in $APPS; do
|
#for APP in $APPS; do
|
||||||
|
while IFS= read -r APP; do
|
||||||
APP_NAME=$(echo "$APP" | jq -r '.name' | awk '{print tolower($0)}')
|
APP_NAME=$(echo "$APP" | jq -r '.name' | awk '{print tolower($0)}')
|
||||||
|
APP_SUBTITLE=$(echo "$APP" | jq -r '.subtitle')
|
||||||
APP_VERSION=$(echo "$APP" | jq -r '.version')
|
APP_VERSION=$(echo "$APP" | jq -r '.version')
|
||||||
APP_DIR=$(dirname $TREE)"/"$APP_NAME
|
APP_DIR=$(dirname $TREE)"/"$APP_NAME
|
||||||
debug "$APP_TEMPLATE"
|
|
||||||
if [ "$APP_NAME" == "$DEPLOY_NAME" ]; then
|
if [ "$APP_NAME" == "$DEPLOY_NAME" ]; then
|
||||||
if [ "$DEPLOY_ACTION" == "ask" ]; then
|
if [ "$DEPLOY_ACTION" == "ask" ]; then
|
||||||
APP_TEMPLATE=$APP_DIR"/template.json"
|
APP_TEMPLATE=$APP_DIR"/template.json"
|
||||||
TEMPLATE=$(cat $APP_TEMPLATE | base64 -w0)
|
TEMPLATE=$(cat $APP_TEMPLATE | base64 -w0)
|
||||||
JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "STATUS": "0", "TEMPLATE": "'$TEMPLATE'" }' | jq -r . | base64 -w0)
|
debug "$APP_TEMPLATE"
|
||||||
|
JSON_TARGET=$(echo '{"DATE":"'$DATE'","STATUS": "0","TEMPLATE":"'$TEMPLATE'"}' | jq -r . | base64 -w0)
|
||||||
elif [ "$DEPLOY_ACTION" == "reinstall" ]; then
|
elif [ "$DEPLOY_ACTION" == "reinstall" ]; then
|
||||||
APP_TEMPLATE=$APP_DIR"/template.json"
|
APP_TEMPLATE=$APP_DIR"/template.json"
|
||||||
TEMPLATE=$(cat $APP_TEMPLATE)
|
TEMPLATE=$(cat $APP_TEMPLATE)
|
||||||
for LINE in $(cat $SERVICE_DIR/service-$DEPLOY_NAME.json | jq -rc '.containers[].ENVS[] | to_entries[]'); do
|
for LINE in $(cat $SERVICE_DIR/service-$DEPLOY_NAME.json | jq -rc '.containers[].ENVS[] | to_entries[]' 2>/dev/null); do
|
||||||
KEY=$(echo $LINE | jq -r .key)
|
KEY=$(echo $LINE | jq -r .key)
|
||||||
VALUE=$(echo $LINE | jq -r .value)
|
VALUE=$(echo $LINE | jq -r .value)
|
||||||
debug "$KEY: $VALUE"
|
debug "$KEY: $VALUE"
|
||||||
@@ -771,14 +1208,14 @@ execute_task() {
|
|||||||
TEMPLATE=$(echo "$TEMPLATE" | jq -r '.fields |= map(if .key == "'$KEY'" then .value = "'$VALUE'" else . end)')
|
TEMPLATE=$(echo "$TEMPLATE" | jq -r '.fields |= map(if .key == "'$KEY'" then .value = "'$VALUE'" else . end)')
|
||||||
done
|
done
|
||||||
# write ENV value from domain file to template value by key name
|
# write ENV value from domain file to template value by key name
|
||||||
for LINE in $(cat $SERVICE_DIR/domain-$DEPLOY_NAME.json | jq -rc '.containers[].ENVS[] | to_entries[]'); do
|
for LINE in $(cat $SERVICE_DIR/domain-$DEPLOY_NAME.json | jq -rc '.containers[].ENVS[] | to_entries[]' 2>/dev/null); do
|
||||||
KEY=$(echo $LINE | jq -r .key)
|
KEY=$(echo $LINE | jq -r .key)
|
||||||
VALUE=$(echo $LINE | jq -r .value)
|
VALUE=$(echo $LINE | jq -r .value)
|
||||||
debug "$KEY: $VALUE"
|
debug "$KEY: $VALUE"
|
||||||
TEMPLATE=$(echo "$TEMPLATE" | jq -r '.fields |= map(if .key == "'$KEY'" then .value = "'$VALUE'" else . end)')
|
TEMPLATE=$(echo "$TEMPLATE" | jq -r '.fields |= map(if .key == "'$KEY'" then .value = "'$VALUE'" else . end)')
|
||||||
done
|
done
|
||||||
# write ENV value from secret file to template value by key name
|
# write ENV value from secret file to template value by key name
|
||||||
for LINE in $(cat $SECRET_DIR/$DEPLOY_NAME/$DEPLOY_NAME.json | jq -rc '.[] | to_entries[]'); do
|
for LINE in $(cat $SECRET_DIR/$DEPLOY_NAME/$DEPLOY_NAME.json | jq -rc '.[] | to_entries[]' 2>/dev/null); do
|
||||||
KEY=$(echo $LINE | jq -r .key)
|
KEY=$(echo $LINE | jq -r .key)
|
||||||
VALUE=$(echo $LINE | jq -r .value)
|
VALUE=$(echo $LINE | jq -r .value)
|
||||||
debug "$KEY: $VALUE"
|
debug "$KEY: $VALUE"
|
||||||
@@ -788,6 +1225,7 @@ execute_task() {
|
|||||||
|
|
||||||
TEMPLATE=$(echo "$TEMPLATE" | base64 -w0)
|
TEMPLATE=$(echo "$TEMPLATE" | base64 -w0)
|
||||||
JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "STATUS": "0", "TEMPLATE": "'$TEMPLATE'" }' | jq -r . | base64 -w0)
|
JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "STATUS": "0", "TEMPLATE": "'$TEMPLATE'" }' | jq -r . | base64 -w0)
|
||||||
|
|
||||||
elif [ "$DEPLOY_ACTION" == "deploy" ]; then
|
elif [ "$DEPLOY_ACTION" == "deploy" ]; then
|
||||||
JSON_TARGET=""
|
JSON_TARGET=""
|
||||||
#JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "STATUS": "1" }' | jq -r . | base64 -w0) # deployment has started
|
#JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "STATUS": "1" }' | jq -r . | base64 -w0) # deployment has started
|
||||||
@@ -803,6 +1241,18 @@ execute_task() {
|
|||||||
DEPLOY_PAYLOAD=$(echo "$JSON" | jq -r .PAYLOAD) # base64 list of key-value pairs in JSON
|
DEPLOY_PAYLOAD=$(echo "$JSON" | jq -r .PAYLOAD) # base64 list of key-value pairs in JSON
|
||||||
deploy_additionals "$APP_DIR" "$DEPLOY_NAME" "$DEPLOY_PAYLOAD"
|
deploy_additionals "$APP_DIR" "$DEPLOY_NAME" "$DEPLOY_PAYLOAD"
|
||||||
sh /scripts/check_pid.sh "$PID" "$SHARED" "deploy-$DEPLOY_NAME" "$DATE" "$DEBUG" &
|
sh /scripts/check_pid.sh "$PID" "$SHARED" "deploy-$DEPLOY_NAME" "$DATE" "$DEBUG" &
|
||||||
|
|
||||||
|
elif [ "$DEPLOY_ACTION" == "edit" ]; then
|
||||||
|
JSON_TARGET=""
|
||||||
|
DEPLOY_PAYLOAD=$(echo "$JSON" | jq -r .PAYLOAD) # base64 list of key-value pairs in JSON
|
||||||
|
|
||||||
|
# stop service before edit
|
||||||
|
debug "$service_exec service-$DEPLOY_NAME.json stop force"
|
||||||
|
$service_exec service-$DEPLOY_NAME.json stop force
|
||||||
|
|
||||||
|
deploy_additionals "$APP_DIR" "$DEPLOY_NAME" "$DEPLOY_PAYLOAD"
|
||||||
|
sh /scripts/check_pid.sh "$PID" "$SHARED" "deploy-$DEPLOY_NAME" "$DATE" "$DEBUG" &
|
||||||
|
|
||||||
elif [ "$DEPLOY_ACTION" == "uninstall" ]; then
|
elif [ "$DEPLOY_ACTION" == "uninstall" ]; then
|
||||||
remove_additionals "$APP_DIR" "$DEPLOY_NAME"
|
remove_additionals "$APP_DIR" "$DEPLOY_NAME"
|
||||||
# uninstall has finished
|
# uninstall has finished
|
||||||
@@ -812,7 +1262,7 @@ execute_task() {
|
|||||||
JSON_TARGET=""
|
JSON_TARGET=""
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
done
|
done < <(echo "$APPS") # preserve variables
|
||||||
done
|
done
|
||||||
|
|
||||||
elif [ "$TASK_NAME" == "repositories" ]; then
|
elif [ "$TASK_NAME" == "repositories" ]; then
|
||||||
@@ -839,20 +1289,25 @@ execute_task() {
|
|||||||
|
|
||||||
VPN_STATUS="0"
|
VPN_STATUS="0"
|
||||||
VPN_RESULT=""
|
VPN_RESULT=""
|
||||||
CONTAINERS=$(docker ps -a --format '{{.Names}} {{.Status}}' | grep -w wireguardproxy)
|
if [ -f $SECRET_DIR/vpn-proxy/wg0.conf ]; then
|
||||||
if [ "$CONTAINERS" != "" ]; then
|
CONTAINERS=$(docker ps -a --format '{{.Names}} {{.Status}}' | grep -w wireguardproxy)
|
||||||
UP=$(echo $CONTAINERS | grep -w 'Up')
|
if [ "$CONTAINERS" != "" ]; then
|
||||||
if [ "$UP" != "" ]; then
|
UP=$(echo $CONTAINERS | grep -w 'Up')
|
||||||
VPN_STATUS="2"
|
if [ "$UP" != "" ]; then
|
||||||
else
|
VPN_STATUS="1"
|
||||||
VPN_STATUS="1"
|
else
|
||||||
|
VPN_STATUS="2"
|
||||||
|
fi
|
||||||
|
VPN_RESULT=$(echo "$CONTAINERS" | base64 -w0)
|
||||||
fi
|
fi
|
||||||
VPN_RESULT=$(echo "$CONTAINERS" | base64 -w0)
|
|
||||||
fi
|
fi
|
||||||
JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "STATUS": "'$VPN_STATUS'", "RESULT": "'$VPN_RESULT'" }' | jq -r . | base64 -w0)
|
JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "STATUS": "'$VPN_STATUS'", "RESULT": "'$VPN_RESULT'" }' | jq -r . | base64 -w0)
|
||||||
|
|
||||||
elif [ "$TASK_NAME" == "save_vpn" ]; then
|
elif [ "$TASK_NAME" == "save_vpn" ]; then
|
||||||
|
|
||||||
|
JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "STATUS": "0", "RESULT": "" }' | jq -r . | base64 -w0)
|
||||||
|
add_json_target
|
||||||
|
|
||||||
VPN_PROXY_REPO="wireguard-proxy-client"
|
VPN_PROXY_REPO="wireguard-proxy-client"
|
||||||
if [ ! -d "/tmp/$VPN_PROXY_REPO" ]; then
|
if [ ! -d "/tmp/$VPN_PROXY_REPO" ]; then
|
||||||
git clone https://git.format.hu/safebox/$VPN_PROXY_REPO.git /tmp/$VPN_PROXY_REPO >/dev/null
|
git clone https://git.format.hu/safebox/$VPN_PROXY_REPO.git /tmp/$VPN_PROXY_REPO >/dev/null
|
||||||
@@ -870,29 +1325,101 @@ execute_task() {
|
|||||||
# install vpn only
|
# install vpn only
|
||||||
sh /scripts/install.sh "$B64_JSON" "$service_exec" "vpn" "$GLOBAL_VERSION"
|
sh /scripts/install.sh "$B64_JSON" "$service_exec" "vpn" "$GLOBAL_VERSION"
|
||||||
|
|
||||||
JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "STATUS": "'$VPN_STATUS'", "RESULT": "'$VPN_RESULT'" }' | jq -r . | base64 -w0)
|
JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "STATUS": "1", "RESULT": "'$VPN_RESULT'" }' | jq -r . | base64 -w0)
|
||||||
|
|
||||||
elif [ "$TASK_NAME" == "containers" ]; then # not in use
|
elif [ "$TASK_NAME" == "containers" ]; then # not in use
|
||||||
CONTAINERS=$(docker ps -a --format '{{.Names}} {{.Status}}' | grep -v framework-scheduler)
|
CONTAINERS=$(docker ps -a --format '{{.Names}} {{.Status}}' | grep -v framework-scheduler)
|
||||||
RESULT=$(echo "$CONTAINERS" | base64 -w0)
|
RESULT=$(echo "$CONTAINERS" | base64 -w0)
|
||||||
JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "RESULT": "'$RESULT'" }' | jq -r . | base64 -w0)
|
JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "RESULT": "'$RESULT'" }' | jq -r . | base64 -w0)
|
||||||
|
|
||||||
|
elif [ "$TASK_NAME" == "backup" ]; then
|
||||||
|
|
||||||
|
TASK_TYPE=$(echo $B64_JSON | base64 -d | jq -r '.TASK_TYPE')
|
||||||
|
|
||||||
|
if [ "$TASK_TYPE" == "backup_query_state" ]; then
|
||||||
|
echo "task type is backup_query_state"
|
||||||
|
|
||||||
|
elif [ "$TASK_TYPE" == "backup_set_service" ]; then
|
||||||
|
|
||||||
|
BACKUP_PASSWORD="$(echo $B64_JSON | base64 -d | jq -r '.BACKUP_SERVER_PASSWORD')"
|
||||||
|
PERIOD="$(echo $B64_JSON | base64 -d | jq -r '.BACKUP_PERIOD')"
|
||||||
|
COMPRESSION="$(echo $B64_JSON | base64 -d | jq -r '.BACKUP_COMPRESSION')"
|
||||||
|
PLANNED_TIME="$(echo $B64_JSON | base64 -d | jq -r '.BACKUP_PLANNED_TIME')"
|
||||||
|
DIRECTRIES="$(echo $B64_JSON | base64 -d | jq -r '.BACKUP_DIRECTORIES')"
|
||||||
|
SERVICES="$(echo $B64_JSON | base64 -d | jq -r '.SERVICES')"
|
||||||
|
BACKUP_LOCAL_CLIENTS="$(echo $B64_JSON | base64 -d | jq -r '.BACKUP_LOCAL_CLIENTS')"
|
||||||
|
BACKUP_VPN_CLIENTS="$(echo $B64_JSON | base64 -d | jq -r '.BACKUP_VPN_CLIENTS')"
|
||||||
|
VPN="$(echo $B64_JSON | base64 -d | jq -r '.VPN')"
|
||||||
|
SSH_HOST="$(echo $B64_JSON | base64 -d | jq -r '.SSH_HOST')"
|
||||||
|
SSH_PORT="$(echo $B64_JSON | base64 -d | jq -r '.SSH_PORT')"
|
||||||
|
SSH_USER="$(echo $B64_JSON | base64 -d | jq -r '.SSH_USER')"
|
||||||
|
SSH_PASSWORD="$(echo $B64_JSON | base64 -d | jq -r '.SSH_PASSWORD')"
|
||||||
|
OPERATION="$(echo $B64_JSON | base64 -d | jq -r '.OPERATION')"
|
||||||
|
|
||||||
|
echo "task type is backup_set_service"
|
||||||
|
backup_set_service "$BACKUP_PASSWORD" "$PERIOD" "$COMPRESSION" "$PLANNED_TIME" "$DIRECTRIES" "$SERVICES" "$SSH_HOST" "$VPN" "$SSH_PORT" "$SSH_USER" "$SSH_PASSWORD" "$OPERATION"
|
||||||
|
|
||||||
|
elif [ "$TASK_TYPE" == "backup_set_client" ]; then
|
||||||
|
|
||||||
|
NAME="$(echo $B64_JSON | base64 -d | jq -r '.BACKUP_CLIENT_NAME')"
|
||||||
|
SIZE="$(echo $B64_JSON | base64 -d | jq -r '.BACKUP_CLIENT_SIZE')"
|
||||||
|
VPN="$(echo $B64_JSON | base64 -d | jq -r '.BACKUP_CLIENT_VPN')"
|
||||||
|
SSH_PORT="$(echo $B64_JSON | base64 -d | jq -r '.BACKUP_CLIENT_SSH_PORT')"
|
||||||
|
SSH_USER="$(echo $B64_JSON | base64 -d | jq -r '.BACKUP_CLIENT_SSH_USER')"
|
||||||
|
SSH_PASSWORD="$(echo $B64_JSON | base64 -d | jq -r '.BACKUP_CLIENT_SSH_PASSWORD')"
|
||||||
|
OPERATION="$(echo $B64_JSON | base64 -d | jq -r '.BACKUP_CLIENT_OPERATION')"
|
||||||
|
|
||||||
|
debug "task type is backup_set_client for $NAME"
|
||||||
|
backup_set_client "$NAME" "$SIZE" "$VPN" "$SSH_PORT" "$SSH_USER" "$SSH_PASSWORD" "$OPERATION"
|
||||||
|
|
||||||
|
elif [ "$TASK_TYPE" == "backup_challenge_clients" ]; then
|
||||||
|
echo "task type is backup_challenge_clients"
|
||||||
|
|
||||||
|
elif [ "$TASK_TYPE" == "restore_from_backup" ]; then
|
||||||
|
echo "task type is restore_from_backup"
|
||||||
|
|
||||||
|
else
|
||||||
|
echo "Unknown task type: $TASK_TYPE"
|
||||||
|
fi
|
||||||
|
|
||||||
|
RESULT=$(echo "$CONTAINERS" | base64 -w0)
|
||||||
|
JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "RESULT": "'$RESULT'" }' | jq -r . | base64 -w0)
|
||||||
|
|
||||||
elif [ "$TASK_NAME" == "upgrade" ]; then
|
elif [ "$TASK_NAME" == "upgrade" ]; then
|
||||||
JSON="$(echo $B64_JSON | base64 -d)"
|
JSON="$(echo $B64_JSON | base64 -d)"
|
||||||
NAME=$(echo "$JSON" | jq -r .NAME | awk '{print tolower($0)}')
|
NAME=$(echo "$JSON" | jq -r .NAME | awk '{print tolower($0)}')
|
||||||
if [ "$NAME" == "framework" ]; then
|
if [ "$NAME" == "framework" ]; then
|
||||||
upgrade_scheduler
|
JSON_TARGET=$(echo '{"DATE":"'$DATE'","STATUS":1}' | jq -r . | base64 -w0)
|
||||||
upgrade "web-installer"
|
add_json_target $NAME
|
||||||
else
|
echo "Upgrading service: webserver"
|
||||||
upgrade "$NAME"
|
upgrade webserver
|
||||||
fi
|
|
||||||
|
echo "Upgrading framework scheduler..."
|
||||||
|
upgrade_scheduler
|
||||||
|
echo "Removing old framework scheduler container..."
|
||||||
|
JSON_TARGET=$(echo '{"DATE":"'$DATE'","STATUS":2,"VERSION":"'$VERSION'"}' | jq -r . | base64 -w0)
|
||||||
|
add_json_target $NAME
|
||||||
|
sleep 1
|
||||||
|
/usr/bin/docker rm -f $HOSTNAME
|
||||||
|
|
||||||
|
JSON_TARGET="" # do not create upgrade.json
|
||||||
|
|
||||||
|
#CONTAINERS=$(docker ps -a --format '{{.Names}} {{.Status}}' | grep -E 'framework-scheduler|webserver')
|
||||||
|
else
|
||||||
|
echo "Upgrading service: $NAME"
|
||||||
|
upgrade "$NAME"
|
||||||
|
#CONTAINERS=$(docker ps -a --format '{{.Names}} {{.Status}}' | grep -w "$NAME")
|
||||||
|
fi
|
||||||
|
#RESULT=$(echo "$CONTAINERS" | base64 -w0)
|
||||||
|
sh /scripts/check_pid.sh "$PID" "$SHARED" "$TASK_NAME-$NAME" "$DATE" "$DEBUG" &
|
||||||
fi
|
fi
|
||||||
|
|
||||||
debug "JSON_TARGET: $JSON_TARGET"
|
# if [ "$TASK_NAME" != "check_vpn" ]; then
|
||||||
|
# debug "JSON_TARGET: $JSON_TARGET"
|
||||||
|
# fi
|
||||||
|
|
||||||
if [ "$JSON_TARGET" != "" ]; then
|
if [ "$JSON_TARGET" != "" ]; then
|
||||||
#redis-cli -h $REDIS_SERVER -p $REDIS_PORT SET $TASK "$JSON_TARGET"
|
add_json_target
|
||||||
install -m 664 -g 65534 /dev/null $SHARED/output/$TASK.json
|
|
||||||
echo $JSON_TARGET | base64 -d >$SHARED/output/$TASK.json
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -962,39 +1489,15 @@ check_redis_availability() {
|
|||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
start_framework_scheduler() {
|
|
||||||
|
|
||||||
if [ "$DEBUG_MODE" == "true" ]; then
|
|
||||||
DOCKER_START="--entrypoint=sh $DOCKER_REGISTRY_URL/$FRAMEWORK_SCHEDULER_IMAGE:$FRAMEWORK_SCHEDULER_VERSION -c 'sleep 86400'"
|
|
||||||
else
|
|
||||||
DOCKER_START="$DOCKER_REGISTRY_URL/$FRAMEWORK_SCHEDULER_IMAGE:$FRAMEWORK_SCHEDULER_VERSION"
|
|
||||||
fi
|
|
||||||
DOCKER_RUN="/usr/bin/docker run -d \
|
|
||||||
-v SHARED:/var/tmp/shared \
|
|
||||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
|
||||||
-v SYSTEM_DATA:/etc/system/data \
|
|
||||||
-v SYSTEM_CONFIG:/etc/system/config \
|
|
||||||
-v SYSTEM_LOG:/etc/system/log \
|
|
||||||
-v USER_DATA:/etc/user/data \
|
|
||||||
-v USER_CONFIG:/etc/user/config \
|
|
||||||
-v USER_SECRET:/etc/user/secret \
|
|
||||||
--restart=always \
|
|
||||||
--name $FRAMEWORK_SCHEDULER_NAME \
|
|
||||||
--env WEBSERVER_PORT=$WEBSERVER_PORT \
|
|
||||||
--network $FRAMEWORK_SCHEDULER_NETWORK \
|
|
||||||
--env RUN_FORCE=$RUN_FORCE \
|
|
||||||
--env DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL \
|
|
||||||
$DOCKER_START"
|
|
||||||
eval "$DOCKER_RUN"
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
### SYSTEM INITIALIZATION ###
|
### SYSTEM INITIALIZATION ###
|
||||||
|
## Start prevously deployed firewall rules depend on framework scheduler startup at first time
|
||||||
|
|
||||||
## DOCKER NETWORK VARIABLES
|
if [ -d /etc/user/config/services ]; then
|
||||||
## FILESYSTEM VARIABLES
|
cd /etc/user/config/services
|
||||||
## PORTS VARIABLES
|
for FIREWALL in $(ls firewall*.json); do
|
||||||
### RESTART SCHEDULER IF NEEDED
|
$service_exec $FIREWALL start &
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
SN=$(check_subnets)
|
SN=$(check_subnets)
|
||||||
if [ "$SN" != "1" ]; then
|
if [ "$SN" != "1" ]; then
|
||||||
@@ -1011,7 +1514,7 @@ fi
|
|||||||
|
|
||||||
VOL=$(check_volumes)
|
VOL=$(check_volumes)
|
||||||
if [ "$VOL" != "1" ]; then
|
if [ "$VOL" != "1" ]; then
|
||||||
start_framework_scheduler
|
upgrade_scheduler
|
||||||
/usr/bin/docker rm -f $HOSTNAME
|
/usr/bin/docker rm -f $HOSTNAME
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -1023,15 +1526,12 @@ if [ "$DF" != "1" ]; then
|
|||||||
fi
|
fi
|
||||||
|
|
||||||
#RS=$(docker ps | grep redis-server)
|
#RS=$(docker ps | grep redis-server)
|
||||||
WS=$(docker ps | grep webserver)
|
WS=$(/usr/bin/docker ps | grep -o webserver)
|
||||||
|
|
||||||
#if [[ "$WS" == "" && "$RS" == "" ]]; then
|
|
||||||
if [ "$WS" == "" ]; then
|
|
||||||
|
|
||||||
|
if [ "$WS" == "" ] && [ ! -f $SHARED/output/upgrade-framework.json ]; then
|
||||||
# START SERVICES
|
# START SERVICES
|
||||||
#$service_exec service-framework.containers.redis-server start &
|
echo "Starting webserver"
|
||||||
$service_exec service-framework.containers.webserver start &
|
$service_exec service-framework.containers.webserver start &
|
||||||
sleep 5
|
|
||||||
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
@@ -1043,13 +1543,21 @@ DATE=$(date +%F-%H-%M-%S)
|
|||||||
DIR=$SHARED/input
|
DIR=$SHARED/input
|
||||||
|
|
||||||
# Triggers by certificate or domain config changes
|
# Triggers by certificate or domain config changes
|
||||||
|
# Set installed version number
|
||||||
|
echo '{}' | jq --arg VERSION "$VERSION" '.VERSION = $VERSION' > $SHARED/output/version.json
|
||||||
|
############################
|
||||||
|
|
||||||
|
if [ "$DEBUG_MODE" == "true" ]; then
|
||||||
|
rm $DIR/*
|
||||||
|
fi
|
||||||
unset IFS
|
unset IFS
|
||||||
|
|
||||||
inotifywait --exclude "\.(swp|tmp)" -m -e CREATE,CLOSE_WRITE,DELETE,MOVED_TO -r $DIR |
|
inotifywait --exclude "\.(swp|tmp)" -m -e CREATE,CLOSE_WRITE,DELETE,MOVED_TO -r $DIR |
|
||||||
while read dir op file; do
|
while read dir op file; do
|
||||||
if [ "${op}" == "CLOSE_WRITE,CLOSE" ]; then
|
if [ "${op}" == "CLOSE_WRITE,CLOSE" ]; then
|
||||||
echo "new file created: $file"
|
if [ "$file" != "check_vpn.json" ]; then
|
||||||
|
echo "new file created: $file"
|
||||||
|
fi
|
||||||
B64_JSON=$(cat $DIR/$file | base64 -w0)
|
B64_JSON=$(cat $DIR/$file | base64 -w0)
|
||||||
TASK=$(echo $file | cut -d '.' -f1)
|
TASK=$(echo $file | cut -d '.' -f1)
|
||||||
execute_task "$TASK" "$B64_JSON"
|
execute_task "$TASK" "$B64_JSON"
|
||||||
|
@@ -200,14 +200,18 @@ elif [ "$FIRST_INSTALL" == "vpn" ]; then
|
|||||||
|
|
||||||
get_vpn_key
|
get_vpn_key
|
||||||
|
|
||||||
edit_user_json $LETSENCRYPT_MAIL $LETSENCRYPT_SERVERNAME
|
if [ "$VPN_PROXY" != "no" ]; then
|
||||||
|
|
||||||
$SERVICE_EXEC vpn-proxy stop force
|
edit_user_json $LETSENCRYPT_MAIL $LETSENCRYPT_SERVERNAME
|
||||||
$SERVICE_EXEC vpn-proxy start
|
|
||||||
echo "$INIT_SERVICE_PATH/vpn-proxy.json" >>$AUTO_START_SERVICES/.init_services
|
$SERVICE_EXEC vpn-proxy stop force
|
||||||
echo "$INIT_SERVICE_PATH/firewall-vpn-smarthost-loadbalancer" >>$AUTO_START_SERVICES/.init_services
|
$SERVICE_EXEC vpn-proxy start
|
||||||
echo "$INIT_SERVICE_PATH/firewall-vpn-proxy-postrouting" >>$AUTO_START_SERVICES/.init_services
|
echo "$INIT_SERVICE_PATH/vpn-proxy.json" >>$AUTO_START_SERVICES/.init_services
|
||||||
echo "$INIT_SERVICE_PATH/firewall-vpn-proxy-prerouting" >>$AUTO_START_SERVICES/.init_services
|
echo "$INIT_SERVICE_PATH/firewall-vpn-smarthost-loadbalancer" >>$AUTO_START_SERVICES/.init_services
|
||||||
|
echo "$INIT_SERVICE_PATH/firewall-vpn-proxy-postrouting" >>$AUTO_START_SERVICES/.init_services
|
||||||
|
echo "$INIT_SERVICE_PATH/firewall-vpn-proxy-prerouting" >>$AUTO_START_SERVICES/.init_services
|
||||||
|
|
||||||
|
fi;
|
||||||
|
|
||||||
exit
|
exit
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user