Compare commits
83 Commits
5a7c67c3f7
...
1.0.5
Author | SHA1 | Date | |
---|---|---|---|
|
b362f2e37f | ||
|
8eb3d1eef1 | ||
|
2b91706d86 | ||
|
933d182244 | ||
153249211a | |||
c5765ca952 | |||
2bcf430dfd | |||
|
bc7d30ea59 | ||
|
e23001223c | ||
|
4a7a854f6f | ||
|
5804346e42 | ||
|
43fcc62014 | ||
|
a9ba3698bd | ||
|
8e3a28334e | ||
f808a394aa | |||
|
97398388d6 | ||
|
0839a78d41 | ||
43b529d2d0 | |||
81cc2b14ab | |||
|
42c3275e19 | ||
283b42bce1 | |||
|
fe5ffd3add | ||
|
bc87393fba | ||
|
f9d43d43f9 | ||
ee0b103593 | |||
6cd309eb9a | |||
93aebc5251 | |||
|
513b3f9f6a | ||
|
dd9f1a1c2f | ||
|
de3896d5ca | ||
|
6bc1120a64 | ||
|
f7c6ce03ea | ||
|
e1e69d5212 | ||
|
ca0fbeea73 | ||
f2c82d24e2 | |||
0fdbac0ea8 | |||
638f51eaa6 | |||
0f9850d6f8 | |||
c26a2f6efb | |||
5af97a3714 | |||
6d73464c70 | |||
|
ea9c55b6bf | ||
3088f3904f | |||
c8bd49dab9 | |||
d62075e479 | |||
102b924007 | |||
682ee53d19 | |||
541ecbbd5e | |||
3325ffeda0 | |||
53c8ff14da | |||
9c5bc54452 | |||
30a3584a08 | |||
894925c5d9 | |||
e0c4065824 | |||
e9597802b6 | |||
7ac614714c | |||
8f1b66e637 | |||
9663bb400f | |||
24695b1688 | |||
e39a7494b3 | |||
782f2fa7bb | |||
4bce6e9731 | |||
|
7c25aea06a | ||
|
5a2ead04cc | ||
a3296361d6 | |||
ddc57f72bc | |||
|
9d8d690d1a | ||
|
ef05ce2a60 | ||
|
18e178d519 | ||
|
7cd01799e2 | ||
8016e2ebe0 | |||
580866a0e6 | |||
1eb082cc47 | |||
b74ac56acd | |||
e3c4a3e622 | |||
|
e37a64af25 | ||
642ae39dac | |||
917f9fee87 | |||
bb089578c1 | |||
d00626a5e7 | |||
ae6812e8fa | |||
ecb95ab194 | |||
cdfd9df079 |
46
.drone.yml
46
.drone.yml
@@ -3,21 +3,42 @@ type: kubernetes
|
||||
name: default
|
||||
|
||||
node_selector:
|
||||
physical-node: dev2
|
||||
physical-node: dev1
|
||||
|
||||
trigger:
|
||||
branch:
|
||||
- main
|
||||
event:
|
||||
- push
|
||||
- tag
|
||||
|
||||
workspace:
|
||||
path: /drone/src
|
||||
|
||||
steps:
|
||||
- name: build multiarch from dev
|
||||
- name: pull image to dockerhub
|
||||
image: docker.io/owncloudci/drone-docker-buildx:4
|
||||
privileged: true
|
||||
settings:
|
||||
cache-from: [ "safebox/framework-scheduler" ]
|
||||
repo: safebox/framework-scheduler
|
||||
tags: latest
|
||||
username:
|
||||
from_secret: dockerhub-username
|
||||
password:
|
||||
from_secret: dockerhub-password
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/arm64
|
||||
when:
|
||||
event:
|
||||
- tag
|
||||
|
||||
- name: build multiarch from dev
|
||||
image: docker.io/owncloudci/drone-docker-buildx:4
|
||||
privileged: true
|
||||
#environment:
|
||||
# DOCKER_PLUGIN_MIRROR: "https://mirror.dev.format.hu"
|
||||
settings:
|
||||
cache-from: [ "registry.dev.format.hu/framework-scheduler" ]
|
||||
registry: registry.dev.format.hu
|
||||
repo: registry.dev.format.hu/framework-scheduler
|
||||
tags: latest
|
||||
@@ -29,17 +50,6 @@ steps:
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/arm64
|
||||
|
||||
- name: pull image to dockerhub
|
||||
image: docker.io/owncloudci/drone-docker-buildx:4
|
||||
privileged: true
|
||||
settings:
|
||||
repo: safebox/framework-scheduler
|
||||
tags: latest
|
||||
username:
|
||||
from_secret: dockerhub-username
|
||||
password:
|
||||
from_secret: dockerhub-password
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/arm64
|
||||
when:
|
||||
event:
|
||||
- push
|
@@ -14,10 +14,10 @@ FROM alpine:latest
|
||||
# COPY --from=redis-source /usr/src/redis/src/redis-cli /usr/bin/redis-cli
|
||||
# RUN chmod +x /usr/bin/redis-cli
|
||||
|
||||
RUN apk add --update --no-cache docker-cli wget curl dos2unix jq openssl git coreutils inotify-tools acl
|
||||
RUN apk add --update --no-cache docker-cli wget curl dos2unix jq openssl git coreutils inotify-tools acl apache2-utils
|
||||
|
||||
COPY scripts/scheduler/*.sh /scripts/
|
||||
RUN find ./scripts -name "*.sh" | xargs dos2unix
|
||||
RUN ["chmod", "+x", "-R", "/scripts/"]
|
||||
|
||||
ENTRYPOINT ["/scripts/entrypoint.sh"]
|
||||
CMD /scripts/entrypoint.sh
|
@@ -3,7 +3,8 @@
|
||||
cd /scripts
|
||||
DEBUG_MODE=${DEBUG_MODE:-false}
|
||||
|
||||
DOCKER_REGISTRY_URL=${DOCKER_REGISTRY_URL:-registry.format.hu}
|
||||
#DOCKER_REGISTRY_URL=${DOCKER_REGISTRY_URL:-registry.format.hu}
|
||||
DOCKER_REGISTRY_URL=${DOCKER_REGISTRY_URL:-safebox}
|
||||
USER_INIT_PATH=$USER_INIT_PATH
|
||||
GLOBAL_VERSION=${GLOBAL_VERSION:-latest}
|
||||
SERVICE_DIR=${SERVICE_DIR:-/etc/user/config/services}
|
||||
@@ -29,6 +30,7 @@ REDIS_VERSION=${REDIS_VERSION:-latest}
|
||||
|
||||
SOURCE=${SOURCE:-user-config}
|
||||
SMARTHOST_PROXY_PATH=$SMARTHOST_PROXY_PATH
|
||||
HTPASSWD_FILE=${HTPASSWD_FILE:-/etc/system/config/smarthost-proxy/nginx/htpasswd}
|
||||
|
||||
GIT_URL=${GIT_URL:-git.format.hu}
|
||||
REPO=$REPO
|
||||
@@ -98,6 +100,19 @@ if [ -d /etc/user/config/services ]; then
|
||||
done
|
||||
fi
|
||||
|
||||
create_htpasswd_file() {
|
||||
|
||||
local USER="$1"
|
||||
local PASSWD="$2"
|
||||
|
||||
if [ ! -f "$HTPASSWD_FILE" ]; then
|
||||
install -m 664 -g 65534 /dev/null $HTPASSWD_FILE
|
||||
htpasswd -cb $HTPASSWD_FILE $USER $PASSWD
|
||||
fi
|
||||
}
|
||||
|
||||
install -m 664 -g 65534 /dev/null
|
||||
|
||||
deploy_additionals() {
|
||||
|
||||
local DIR="$1"
|
||||
@@ -140,13 +155,85 @@ deploy_additionals() {
|
||||
}
|
||||
|
||||
remove_additionals() {
|
||||
NAME="$1"
|
||||
local DIR="$1"
|
||||
local NAME="$2"
|
||||
|
||||
debug "UNINSTALL: $NAME"
|
||||
|
||||
# delete firewall rules
|
||||
FIREWALLS=""
|
||||
FIREWALLS="$(ls $SERVICE_DIR/firewall-*.json | grep $NAME)"
|
||||
for FIREWALL in $(echo $FIREWALLS); do
|
||||
cat $FIREWALL | jq '.containers[] |= (
|
||||
if (.ENVS | map(has("OPERATION")) | any) then
|
||||
# If any entry has OPERATION key, update it
|
||||
.ENVS = [.ENVS[] | if has("OPERATION") then {"OPERATION": "DELETE"} else . end]
|
||||
else
|
||||
# If no entry has OPERATION key, add new entry
|
||||
.ENVS += [{"OPERATION": "DELETE"}]
|
||||
end
|
||||
)' >$FIREWALL.tmp
|
||||
debug "$service_exec $FIREWALL.tmp start info"
|
||||
$service_exec $FIREWALL.tmp start info
|
||||
rm $FIREWALL.tmp
|
||||
done
|
||||
|
||||
# delete domains
|
||||
DOMMAINS=""
|
||||
DOMAINS="$(ls $SERVICE_DIR/domain-*.json | grep $NAME)"
|
||||
for DOMAIN in $(echo $DOMAINS); do
|
||||
cat $DOMAIN | jq '.containers[] |= (
|
||||
if (.ENVS | map(has("OPERATION")) | any) then
|
||||
# If any entry has OPERATION key, update it
|
||||
.ENVS = [.ENVS[] | if has("OPERATION") then {"OPERATION": "DELETE"} else . end]
|
||||
else
|
||||
# If no entry has OPERATION key, add new entry
|
||||
.ENVS += [{"OPERATION": "DELETE"}]
|
||||
end
|
||||
)' >$DOMAIN.tmp
|
||||
debug "$service_exec $DOMAIN.tmp start info"
|
||||
$service_exec $DOMAIN.tmp start info
|
||||
rm $DOMAIN.tmp
|
||||
done
|
||||
|
||||
# remove related directories and files
|
||||
# get volume destinations
|
||||
DESTINATIONS=""
|
||||
VOLUMES=""
|
||||
DESTINATIONS=$(cat $SERVICE_DIR/service-$NAME.json | jq -r '[.containers[] | select(has("VOLUMES")) | .VOLUMES[] | select(.SHARED != "true") | .SOURCE] | unique[]' | grep $NAME)
|
||||
for DESTINATION in $(echo $DESTINATIONS); do
|
||||
if [ -d "$DESTINATION" ] || [ -f "$DESTINATION" ]; then
|
||||
rm -rf $DESTINATION
|
||||
debug "deleted directory or file: $DESTINATION"
|
||||
fi
|
||||
done
|
||||
|
||||
ENV_FILES=$(cat $SERVICE_DIR/service-$NAME.json | jq -r '[.containers[] | select(has("ENV_FILES")) | .ENV_FILES[]] | unique[]')
|
||||
for ENV_FILE in $(echo $ENV_FILES); do
|
||||
if [ -f "$ENV_FILE" ]; then
|
||||
rm -rf $ENV_FILE
|
||||
debug "deleted enviroment file: $ENV_FILE"
|
||||
fi
|
||||
done
|
||||
|
||||
VOLUMES=$(cat $SERVICE_DIR/service-$NAME.json | jq -r '[.containers[] | select(has("VOLUMES")) | .VOLUMES[] | select(.SHARED != "true") | .SOURCE] | unique[]' | grep -vE 'USER|SYSTEM')
|
||||
|
||||
# stop service
|
||||
debug "$service_exec service-$NAME.json stop force dns-remove &"
|
||||
$service_exec service-$NAME.json stop force dns-remove &
|
||||
# force - remove stopped container, docker rm
|
||||
debug "$service_exec service-$NAME.json stop force dns-remove"
|
||||
$service_exec service-$NAME.json stop force dns-remove
|
||||
|
||||
for VOLUME in $(echo $VOLUMES | grep -vE 'USER|SYSTEM|SHARED'); do
|
||||
if [ "$(echo $VOLUME | cut -d '/' -f1)" ]; then
|
||||
docker volume rm $VOLUME
|
||||
debug "deleted volume: $VOLUME"
|
||||
|
||||
fi
|
||||
done
|
||||
|
||||
# remove service files
|
||||
rm $SERVICE_DIR/*"-"$NAME.json # service, domain, etc.
|
||||
|
||||
}
|
||||
|
||||
get_repositories() {
|
||||
@@ -156,16 +243,15 @@ get_repositories() {
|
||||
local TREES=""
|
||||
local REPO
|
||||
|
||||
|
||||
REPOS=$(jq -r .repositories[] /etc/user/config/repositories.json) # list of repos, delimiter by space
|
||||
for REPO in $REPOS; do
|
||||
|
||||
BASE=$(basename $REPO | cut -d '.' -f1)
|
||||
if [ ! -d "/tmp/$BASE" ]; then
|
||||
git clone $REPO /tmp/$BASE >/dev/null
|
||||
GIT_HTTP_CONNECT_TIMEOUT=10 GIT_HTTP_TIMEOUT=30 git clone $REPO /tmp/$BASE >/dev/null
|
||||
else
|
||||
cd /tmp/$BASE
|
||||
git pull >/dev/null
|
||||
GIT_HTTP_CONNECT_TIMEOUT=10 GIT_HTTP_TIMEOUT=30 git pull >/dev/null
|
||||
fi
|
||||
if [ -f "/tmp/$BASE/applications-tree.json" ]; then
|
||||
TREES=$TREES" /tmp/$BASE/applications-tree.json"
|
||||
@@ -338,7 +424,7 @@ create_framework_json() {
|
||||
|
||||
ADDITIONAL=""
|
||||
ADDITIONAL='"EXTRA": "--label logging=promtail_user --label logging_jobname=containers --restart=always", "PRE_START": [], "DEPEND": [], "CMD": ""'
|
||||
ENVS='"ENVS": [{"RUN_FORCE": "'$RUN_FORCE'"}, {"WEBSERVER_PORT": "'$WEBSERVER_PORT'"}],'
|
||||
ENVS='"ENVS": [{"RUN_FORCE": "'$RUN_FORCE'"}, {"DOCKER_REGISTRY_URL": "'$DOCKER_REGISTRY_URL'"}, {"WEBSERVER_PORT": "'$WEBSERVER_PORT'"}],'
|
||||
echo '{
|
||||
"main": {
|
||||
"SERVICE_NAME": "framework"
|
||||
@@ -423,46 +509,56 @@ check_update() {
|
||||
|
||||
REPOSITORY_URL=$(echo $IMAGE | cut -d '/' -f1)
|
||||
|
||||
# Check whether repository url is available
|
||||
|
||||
CURL_CHECK="curl -m 5 -s -o /dev/null -w "%{http_code}" https://$REPOSITORY_URL/v2/"
|
||||
CURL_CHECK_CODE=$(eval $CURL_CHECK)
|
||||
if [[ "$CURL_CHECK_CODE" == "200" ]]; then
|
||||
debug "$REPOSITORY_URL accessed successful"
|
||||
|
||||
# if repository url is not set
|
||||
if [[ "$(echo "$REPOSITORY_URL" | grep '\.')" == "" ]]; then
|
||||
REPOSITORY_URL="docker.io"
|
||||
TEMP_PATH=$IMAGE
|
||||
else
|
||||
# -f2- IMAGE can contain subdirectories
|
||||
TEMP_PATH=$(echo $IMAGE | cut -d '/' -f2-)
|
||||
fi
|
||||
|
||||
debug "TEMP PATH: $TEMP_PATH"
|
||||
# if image repository url doesn't contain dot (safebox)
|
||||
if [[ "$(echo "$REPOSITORY_URL" | grep '\.')" == "" ]]; then
|
||||
REMOTE_URL="registry.hub.docker.com"
|
||||
TEMP_PATH=$IMAGE
|
||||
TEMP_IMAGE=$(echo $TEMP_PATH | cut -d ':' -f1)
|
||||
TEMP_VERSION=$(echo $TEMP_PATH | cut -d ':' -f2)
|
||||
if [ "$TEMP_VERSION" == "$TEMP_IMAGE" ]; then # version is not set
|
||||
TEMP_VERSION="latest"
|
||||
fi
|
||||
TOKEN=$(curl -s "https://auth.docker.io/token?service=registry.docker.io&scope=repository:{$TEMP_IMAGE}:pull" | jq -r .token)
|
||||
TOKEN_HEADER='-H "Authorization: Bearer '$TOKEN'"'
|
||||
else
|
||||
REMOTE_URL=""
|
||||
# -f2- IMAGE can contain subdirectories
|
||||
TEMP_PATH=$(echo $IMAGE | cut -d '/' -f2-)
|
||||
TOKEN=""
|
||||
TOKEN_HEADER=""
|
||||
fi
|
||||
|
||||
debug "TEMP PATH: $TEMP_PATH"
|
||||
TEMP_IMAGE=$(echo $TEMP_PATH | cut -d ':' -f1)
|
||||
TEMP_VERSION=$(echo $TEMP_PATH | cut -d ':' -f2)
|
||||
if [ "$TEMP_VERSION" == "$TEMP_IMAGE" ]; then # version is not set
|
||||
TEMP_VERSION="latest"
|
||||
fi
|
||||
|
||||
REMOTE_URL="https://$REMOTE_URL/v2/$TEMP_IMAGE/manifests/$TEMP_VERSION"
|
||||
debug "REMOTE_URL: $REMOTE_URL"
|
||||
|
||||
# Check whether repository url is available
|
||||
#CURL_CHECK="curl -m 5 -s -o /dev/null -w "%{http_code}" https://$REPOSITORY_URL/v2/"
|
||||
CURL_CHECK='curl -m 5 -s -o /dev/null -I '"$TOKEN_HEADER"' -w "%{http_code}" '"$REMOTE_URL"
|
||||
CURL_CHECK_CODE=$(eval $CURL_CHECK)
|
||||
|
||||
# if valid accessible url
|
||||
if [[ "$CURL_CHECK_CODE" == "200" ]]; then
|
||||
debug "$REMOTE_URL repository accessed successfully"
|
||||
|
||||
REMOTE_URL="https://$REPOSITORY_URL/v2/$TEMP_IMAGE/manifests/$TEMP_VERSION"
|
||||
debug "$REMOTE_URL"
|
||||
#digest=$(curl --silent -H "Accept: application/vnd.docker.distribution.manifest.v2+json" "$REMOTE_URL" | jq -r '.config.digest');
|
||||
# Digest for the whole manifest, which includes all architectures.
|
||||
digest=$(curl -s -I -H "Accept: application/vnd.oci.image.index.v1+json" "$REMOTE_URL" | grep Docker-Content-Digest | cut -d ' ' -f2 | tr -d '\r\n')
|
||||
CURL_DIGEST='curl -s -I '"$TOKEN_HEADER"' -H "Accept: application/vnd.oci.image.index.v1+json" '"$REMOTE_URL"' | grep -i Docker-Content-Digest | cut -d " " -f2 | tr -d "\r\n"'
|
||||
digest=$(eval $CURL_DIGEST)
|
||||
|
||||
#debug "docker images -q --no-trunc $REPOSITORY_URL/$TEMP_IMAGE:$TEMP_VERSION";
|
||||
#local_digest=$(docker images -q --no-trunc $REPOSITORY_URL/$TEMP_IMAGE:$TEMP_VERSION)
|
||||
debug "docker image inspect $REPOSITORY_URL/$TEMP_IMAGE:$TEMP_VERSION --format '{{index .RepoDigests 0}}' | cut -d '@' -f2"
|
||||
debug "docker image inspect $IMAGE --format '{{index .RepoDigests 0}}' | cut -d '@' -f2"
|
||||
# Digest for the whole manifest, which includes all architectures.
|
||||
local_digest=$(docker image inspect $REPOSITORY_URL/$TEMP_IMAGE:$TEMP_VERSION --format '{{index .RepoDigests 0}}' | cut -d '@' -f2)
|
||||
local_digest=$(docker image inspect $IMAGE --format '{{index .RepoDigests 0}}' | cut -d '@' -f2)
|
||||
|
||||
debug "REMOTE DIGEST: $digest"
|
||||
debug "LOCAL DIGEST: $local_digest"
|
||||
|
||||
if [ "$digest" != "$local_digest" ]; then
|
||||
echo "Update available. Executing update command..."
|
||||
echo "Update available. You can execute update command..."
|
||||
UPDATE="1"
|
||||
#DOCKER_PULL="docker pull $REPOSITORY_URL/$TEMP_IMAGE:$TEMP_VERSION"
|
||||
#eval $DOCKER_PULL
|
||||
@@ -474,10 +570,11 @@ check_update() {
|
||||
# UPDATE="1";
|
||||
#fi
|
||||
else
|
||||
UPDATE="0"
|
||||
echo "Already up to date. Nothing to do."
|
||||
fi
|
||||
else
|
||||
debug "$REPOSITORY_URL not accessible, http error code: $CURL_CHECK_CODE"
|
||||
debug "$REMOTE_URL not accessible, http error code: $CURL_CHECK_CODE"
|
||||
|
||||
echo "Force image pull has started without digest check..."
|
||||
DOCKER_PULL="docker pull $IMAGE"
|
||||
@@ -492,14 +589,58 @@ check_update() {
|
||||
fi
|
||||
}
|
||||
|
||||
upgrade_scheduler() {
|
||||
|
||||
DOCKER_START="--entrypoint=sh $DOCKER_REGISTRY_URL/$FRAMEWORK_SCHEDULER_IMAGE:$FRAMEWORK_SCHEDULER_VERSION -c '/scripts/upgrade.sh'"
|
||||
|
||||
DOCKER_RUN="/usr/bin/docker run -d \
|
||||
-v SHARED:/var/tmp/shared \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-v SYSTEM_DATA:/etc/system/data \
|
||||
-v SYSTEM_CONFIG:/etc/system/config \
|
||||
-v SYSTEM_LOG:/etc/system/log \
|
||||
-v USER_DATA:/etc/user/data \
|
||||
-v USER_CONFIG:/etc/user/config \
|
||||
-v USER_SECRET:/etc/user/secret \
|
||||
--restart=always \
|
||||
--env WEBSERVER_PORT=$WEBSERVER_PORT \
|
||||
--network $FRAMEWORK_SCHEDULER_NETWORK \
|
||||
--env RUN_FORCE=$RUN_FORCE \
|
||||
--env DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL \
|
||||
$DOCKER_START"
|
||||
eval "$DOCKER_RUN"
|
||||
}
|
||||
|
||||
upgrade() {
|
||||
local NAME=$1
|
||||
|
||||
if [ "$NAME" == "web-installer" ]; then
|
||||
|
||||
debug "$service_exec service-framework.containers.webserver stop force"
|
||||
$service_exec service-framework.containers.webserver stop force
|
||||
debug "$service_exec service-framework.containers.webserver start info"
|
||||
$service_exec service-framework.containers.webserver start info &
|
||||
|
||||
else
|
||||
|
||||
debug "$service_exec $NAME.json stop force"
|
||||
$service_exec $NAME.json stop force
|
||||
debug "$service_exec $NAME.json start info"
|
||||
$service_exec $NAME.json start info &
|
||||
fi
|
||||
PID=$!
|
||||
}
|
||||
|
||||
execute_task() {
|
||||
TASK="$1"
|
||||
B64_JSON="$2"
|
||||
DATE=$(date +"%Y%m%d%H%M")
|
||||
|
||||
# Executing task
|
||||
debug "TASK: $(echo $TASK | cut -d ':' -f1)"
|
||||
TASK_NAME=$(echo $TASK | cut -d ':' -f1)
|
||||
if [ "$TASK_NAME" != "check_vpn" ]; then
|
||||
debug "TASK: $(echo $TASK_NAME | cut -d ':' -f1)"
|
||||
fi
|
||||
|
||||
# checking sytem status
|
||||
SYSTEM_STATUS=$(ls /etc/user/config/services/*.json | grep -v service-framework.json)
|
||||
@@ -522,6 +663,18 @@ execute_task() {
|
||||
#fi;
|
||||
JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "INSTALL_STATUS": "'$INSTALL_STATUS'" }' | jq -r . | base64 -w0)
|
||||
|
||||
elif [ "$TASK_NAME" == "request_letsencrypt" ]; then
|
||||
DOMAINS=$(echo $B64_JSON | base64 -d | jq -r 'keys[]')
|
||||
for DOMAIN in $(echo $DOMAINS); do
|
||||
REQUEST=$(echo $B64_JSON | base64 -d | jq -r ".[\"$DOMAIN\"].status")
|
||||
|
||||
if [ "$REQUEST" == "requested" ]; then
|
||||
echo "New certificate for $DOMAIN is requested."
|
||||
touch /etc/system/data/ssl/keys/$DOMAIN/new_certificate
|
||||
fi
|
||||
done
|
||||
JSON_TARGET=$B64_JSON
|
||||
|
||||
elif [ "$TASK_NAME" == "system" ]; then
|
||||
#SYSTEM_LIST="core-dns.json cron.json domain-local-backend.json firewall-letsencrypt.json firewall-local-backend.json firewall-localloadbalancer-dns.json firewall-localloadbalancer-to-smarthostbackend.json firewall-smarthost-backend-dns.json firewall-smarthost-loadbalancer-dns.json firewall-smarthost-to-backend.json firewall-smarthostloadbalancer-from-publicbackend.json letsencrypt.json local-backend.json local-proxy.json service-framework.json smarthost-proxy-scheduler.json smarthost-proxy.json"
|
||||
SYSTEM_LIST="core-dns.json cron.json letsencrypt.json local-proxy.json service-framework.json smarthost-proxy-scheduler.json smarthost-proxy.json"
|
||||
@@ -621,6 +774,7 @@ execute_task() {
|
||||
CONTAINER_NAMES=$(cat $SERVICE | jq -r .containers[].NAME)
|
||||
UPDATE_CONTAINERS=""
|
||||
UPTODATE_CONTAINERS=""
|
||||
ERROR_CONTAINERS=""
|
||||
for CONTAINER_NAME in $CONTAINER_NAMES; do
|
||||
#IMAGE=$(cat $SERVICE | jq -rc '.containers[] | select(.NAME=="'$CONTAINER_NAME'") | .IMAGE');
|
||||
IMAGE=$(cat $SERVICE | jq -rc --arg NAME "$CONTAINER_NAME" '.containers[] | select(.NAME==$NAME) | .IMAGE')
|
||||
@@ -629,13 +783,15 @@ execute_task() {
|
||||
check_update "$IMAGE"
|
||||
if [ "$UPDATE" == "1" ]; then
|
||||
UPDATE_CONTAINERS="$UPDATE_CONTAINERS $CONTAINER_NAME"
|
||||
else
|
||||
elif [ "$UPDATE" == "0" ]; then
|
||||
UPTODATE_CONTAINERS="$UPTODATE_CONTAINERS $CONTAINER_NAME"
|
||||
else
|
||||
ERROR_CONTAINERS="$UPTODATE_CONTAINERS $CONTAINER_NAME"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
#RESULT=$(echo "$CONTAINERS" | base64 -w0);
|
||||
SERVICES=$SERVICES$SEP'"'$SERVICE_NAME'": {"uptodate": "'$UPTODATE_CONTAINERS'", "update": "'$UPDATE_CONTAINERS'"}'
|
||||
SERVICES=$SERVICES$SEP'"'$SERVICE_NAME'": {"uptodate": "'$UPTODATE_CONTAINERS'", "update": "'$UPDATE_CONTAINERS'", "error": "'$ERROR_CONTAINERS'"}'
|
||||
fi
|
||||
done
|
||||
|
||||
@@ -682,7 +838,7 @@ execute_task() {
|
||||
|
||||
elif [ "$TASK_NAME" == "deployment" ]; then
|
||||
JSON="$(echo $B64_JSON | base64 -d)"
|
||||
DEPLOY_NAME=$(echo "$JSON" | jq -r .NAME | awk '{print tolower($0)}')
|
||||
DEPLOY_NAME=$(echo "$JSON" | jq -r .NAME | awk '{print tolower($0)}')
|
||||
DEPLOY_ACTION=$(echo "$JSON" | jq -r .ACTION)
|
||||
TREES=$(get_repositories)
|
||||
debug "$JSON"
|
||||
@@ -690,7 +846,7 @@ execute_task() {
|
||||
for TREE in $TREES; do
|
||||
APPS=$(jq -rc '.apps[]' $TREE)
|
||||
for APP in $APPS; do
|
||||
APP_NAME=$(echo "$APP" | jq -r '.name' | awk '{print tolower($0)}')
|
||||
APP_NAME=$(echo "$APP" | jq -r '.name' | awk '{print tolower($0)}')
|
||||
APP_VERSION=$(echo "$APP" | jq -r '.version')
|
||||
APP_DIR=$(dirname $TREE)"/"$APP_NAME
|
||||
debug "$APP_TEMPLATE"
|
||||
@@ -706,20 +862,51 @@ execute_task() {
|
||||
KEY=$(echo $LINE | jq -r .key)
|
||||
VALUE=$(echo $LINE | jq -r .value)
|
||||
debug "$KEY: $VALUE"
|
||||
TEMPLATE=$(echo "$TEMPLATE" | jq -r '.fields |= map(if .key == "SMTP_MSG_SIZE" then .value = "'$VALUE'" else . end)')
|
||||
#echo $TEMPLATE;
|
||||
# write ENV value from service files to template value by key name
|
||||
#TEMPLATE=$(echo "$TEMPLATE" | jq -r '.fields |= map(.value = "'$VALUE'")')
|
||||
TEMPLATE=$(echo "$TEMPLATE" | jq -r '.fields |= map(if .key == "'$KEY'" then .value = "'$VALUE'" else . end)')
|
||||
done
|
||||
# write ENV value from domain file to template value by key name
|
||||
for LINE in $(cat $SERVICE_DIR/domain-$DEPLOY_NAME.json | jq -rc '.containers[].ENVS[] | to_entries[]'); do
|
||||
KEY=$(echo $LINE | jq -r .key)
|
||||
VALUE=$(echo $LINE | jq -r .value)
|
||||
debug "$KEY: $VALUE"
|
||||
TEMPLATE=$(echo "$TEMPLATE" | jq -r '.fields |= map(if .key == "'$KEY'" then .value = "'$VALUE'" else . end)')
|
||||
done
|
||||
# write ENV value from secret file to template value by key name
|
||||
for LINE in $(cat $SECRET_DIR/$DEPLOY_NAME/$DEPLOY_NAME.json | jq -rc '.[] | to_entries[]'); do
|
||||
KEY=$(echo $LINE | jq -r .key)
|
||||
VALUE=$(echo $LINE | jq -r .value)
|
||||
debug "$KEY: $VALUE"
|
||||
TEMPLATE=$(echo "$TEMPLATE" | jq -r '.fields |= map(if .key == "'$KEY'" then .value = "'$VALUE'" else . end)')
|
||||
done
|
||||
#echo $TEMPLATE;
|
||||
|
||||
TEMPLATE=$(echo "$TEMPLATE" | base64 -w0)
|
||||
JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "STATUS": "0", "TEMPLATE": "'$TEMPLATE'" }' | jq -r . | base64 -w0)
|
||||
|
||||
elif [ "$DEPLOY_ACTION" == "deploy" ]; then
|
||||
JSON_TARGET=""
|
||||
#JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "STATUS": "1" }' | jq -r . | base64 -w0) # deployment has started
|
||||
JSON_TARGET=""
|
||||
#JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "STATUS": "1" }' | jq -r . | base64 -w0) # deployment has started
|
||||
#redis-cli -h $REDIS_SERVER -p $REDIS_PORT SET "$DEPLOY_ACTION-$DEPLOY_NAME" "$JSON_TARGET" # web_in
|
||||
|
||||
DEPLOY_PAYLOAD=$(echo "$JSON" | jq -r .PAYLOAD) # base64 list of key-value pairs in JSON
|
||||
deploy_additionals "$APP_DIR" "$DEPLOY_NAME" "$DEPLOY_PAYLOAD"
|
||||
sh /scripts/check_pid.sh "$PID" "$SHARED" "$DEPLOY_ACTION-$DEPLOY_NAME" "$DATE" "$DEBUG" &
|
||||
sh /scripts/check_pid.sh "$PID" "$SHARED" "$DEPLOY_ACTION-$DEPLOY_NAME" "$DATE" "$DEBUG" &
|
||||
elif [ "$DEPLOY_ACTION" == "redeploy" ]; then
|
||||
JSON_TARGET=""
|
||||
remove_additionals "$APP_DIR" "$DEPLOY_NAME"
|
||||
|
||||
DEPLOY_PAYLOAD=$(echo "$JSON" | jq -r .PAYLOAD) # base64 list of key-value pairs in JSON
|
||||
deploy_additionals "$APP_DIR" "$DEPLOY_NAME" "$DEPLOY_PAYLOAD"
|
||||
sh /scripts/check_pid.sh "$PID" "$SHARED" "deploy-$DEPLOY_NAME" "$DATE" "$DEBUG" &
|
||||
elif [ "$DEPLOY_ACTION" == "uninstall" ]; then
|
||||
remove_additionals "$APP_DIR" "$DEPLOY_NAME"
|
||||
# uninstall has finished
|
||||
JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "STATUS": "2" }' | jq -r . | base64 -w0)
|
||||
debug "JSON_TARGET: $JSON_TARGET"
|
||||
echo $JSON_TARGET | base64 -d >$SHARED/output/"uninstall-"$DEPLOY_NAME.json
|
||||
JSON_TARGET=""
|
||||
fi
|
||||
fi
|
||||
done
|
||||
@@ -745,21 +932,37 @@ execute_task() {
|
||||
add_repository "$NEW_REPO"
|
||||
JSON_TARGET=""
|
||||
|
||||
elif [ "$TASK_NAME" == "check_vpn" ]; then
|
||||
|
||||
VPN_STATUS="0"
|
||||
VPN_RESULT=""
|
||||
CONTAINERS=$(docker ps -a --format '{{.Names}} {{.Status}}' | grep -w wireguardproxy)
|
||||
if [ "$CONTAINERS" != "" ]; then
|
||||
UP=$(echo $CONTAINERS | grep -w 'Up')
|
||||
if [ "$UP" != "" ]; then
|
||||
VPN_STATUS="2"
|
||||
else
|
||||
VPN_STATUS="1"
|
||||
fi
|
||||
VPN_RESULT=$(echo "$CONTAINERS" | base64 -w0)
|
||||
fi
|
||||
JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "STATUS": "'$VPN_STATUS'", "RESULT": "'$VPN_RESULT'" }' | jq -r . | base64 -w0)
|
||||
|
||||
elif [ "$TASK_NAME" == "save_vpn" ]; then
|
||||
|
||||
VPN_PROXY_REPO="wireguard-proxy-client";
|
||||
VPN_PROXY_REPO="wireguard-proxy-client"
|
||||
if [ ! -d "/tmp/$VPN_PROXY_REPO" ]; then
|
||||
git clone $REPO /tmp/$VPN_PROXY_REPO >/dev/null
|
||||
git clone https://git.format.hu/safebox/$VPN_PROXY_REPO.git /tmp/$VPN_PROXY_REPO >/dev/null
|
||||
else
|
||||
cd /tmp/$VPN_PROXY_REPO
|
||||
git pull >/dev/null
|
||||
fi
|
||||
|
||||
cp -av /tmp/$VPN_PROXY_REPO/*.json $SERVICE_DIR/
|
||||
cp -av /tmp/$VPN_PROXY_REPO/*.json $SERVICE_DIR/
|
||||
|
||||
VPN_VOLUMES=$(jq -r .containers[0].VOLUMES[0].SOURCE $SERVICE_DIR/vpn-proxy.json)
|
||||
VOLUME=$(dirname $VPN_VOLUMES);
|
||||
mkdir -p $VOLUME;
|
||||
VPN_VOLUMES=$(jq -r .containers[0].VOLUMES[0].SOURCE $SERVICE_DIR/vpn-proxy.json)
|
||||
VOLUME=$(dirname $VPN_VOLUMES)
|
||||
mkdir -p $VOLUME
|
||||
|
||||
# install vpn only
|
||||
sh /scripts/install.sh "$B64_JSON" "$service_exec" "vpn" "$GLOBAL_VERSION"
|
||||
@@ -771,9 +974,24 @@ execute_task() {
|
||||
RESULT=$(echo "$CONTAINERS" | base64 -w0)
|
||||
JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "RESULT": "'$RESULT'" }' | jq -r . | base64 -w0)
|
||||
|
||||
elif [ "$TASK_NAME" == "upgrade" ]; then
|
||||
JSON="$(echo $B64_JSON | base64 -d)"
|
||||
NAME=$(echo "$JSON" | jq -r .NAME | awk '{print tolower($0)}')
|
||||
if [ "$NAME" == "framework" ]; then
|
||||
upgrade "web-installer"
|
||||
upgrade_scheduler
|
||||
#CONTAINERS=$(docker ps -a --format '{{.Names}} {{.Status}}' | grep -E 'framework-scheduler|webserver')
|
||||
else
|
||||
upgrade "$NAME"
|
||||
#CONTAINERS=$(docker ps -a --format '{{.Names}} {{.Status}}' | grep -w "$NAME")
|
||||
fi
|
||||
#RESULT=$(echo "$CONTAINERS" | base64 -w0)
|
||||
sh /scripts/check_pid.sh "$PID" "$SHARED" "$TASK_NAME-$NAME" "$DATE" "$DEBUG" &
|
||||
fi
|
||||
|
||||
debug "JSON_TARGET: $JSON_TARGET"
|
||||
if [ "$TASK_NAME" != "check_vpn" ]; then
|
||||
debug "JSON_TARGET: $JSON_TARGET"
|
||||
fi
|
||||
|
||||
if [ "$JSON_TARGET" != "" ]; then
|
||||
#redis-cli -h $REDIS_SERVER -p $REDIS_PORT SET $TASK "$JSON_TARGET"
|
||||
@@ -869,6 +1087,7 @@ start_framework_scheduler() {
|
||||
--env WEBSERVER_PORT=$WEBSERVER_PORT \
|
||||
--network $FRAMEWORK_SCHEDULER_NETWORK \
|
||||
--env RUN_FORCE=$RUN_FORCE \
|
||||
--env DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL \
|
||||
$DOCKER_START"
|
||||
eval "$DOCKER_RUN"
|
||||
|
||||
@@ -934,7 +1153,9 @@ unset IFS
|
||||
inotifywait --exclude "\.(swp|tmp)" -m -e CREATE,CLOSE_WRITE,DELETE,MOVED_TO -r $DIR |
|
||||
while read dir op file; do
|
||||
if [ "${op}" == "CLOSE_WRITE,CLOSE" ]; then
|
||||
echo "new file created: $file"
|
||||
if [ "$file" != "check_vpn.json" ]; then
|
||||
echo "new file created: $file"
|
||||
fi
|
||||
B64_JSON=$(cat $DIR/$file | base64 -w0)
|
||||
TASK=$(echo $file | cut -d '.' -f1)
|
||||
execute_task "$TASK" "$B64_JSON"
|
||||
|
@@ -4,6 +4,31 @@ SERVICE_EXEC=$2
|
||||
FIRST_INSTALL=$3
|
||||
GLOBAL_VERSION=$4
|
||||
|
||||
edit_user_json() {
|
||||
|
||||
if [ ! -f /etc/user/config/user.json ]; then
|
||||
install -m 664 -g 65534 /dev/null /etc/user/config/user.json
|
||||
echo '{}' >/etc/user/config/user.json
|
||||
|
||||
else
|
||||
if [ -z $(cat /etc/user/config/user.json) ]; then
|
||||
echo '{}' >/etc/user/config/user.json
|
||||
fi
|
||||
fi
|
||||
|
||||
TMP_FILE=$(mktemp)
|
||||
jq '
|
||||
if . == null or . == [] then
|
||||
{"letsencrypt": { "EMAIL": "'$LETSENCRYPT_MAIL'", "LETSENCRYPT_SERVER": "'$LETSENCRYPT_SERVERNAME'" }}
|
||||
else
|
||||
. + {"letsencrypt": { "EMAIL": "'$LETSENCRYPT_MAIL'", "LETSENCRYPT_SERVER": "'$LETSENCRYPT_SERVERNAME'" }}
|
||||
end
|
||||
' /etc/user/config/user.json >$TMP_FILE
|
||||
cat $TMP_FILE >/etc/user/config/user.json
|
||||
rm $TMP_FILE
|
||||
|
||||
}
|
||||
|
||||
get_vpn_key() {
|
||||
|
||||
if [ "$VPN_PASS" != "" ]; then
|
||||
@@ -113,6 +138,10 @@ if [[ $FIRST_INSTALL == "true" ]]; then
|
||||
VAR_CRON="--env CRON=$CRON"
|
||||
fi
|
||||
|
||||
if [ "$LOCAL_BACKEND" != "" ]; then
|
||||
VAR_LOCAL_BACKEND="--env LOCAL_BACKEND=$LOCAL_BACKEND"
|
||||
fi
|
||||
|
||||
if [ "$VPN_PROXY" == "yes" ]; then
|
||||
if [ "$LETSENCRYPT_SERVERNAME" = "" ]; then
|
||||
LETSENCRYPT_SERVERNAME="letsencrypt"
|
||||
@@ -146,6 +175,7 @@ if [[ $FIRST_INSTALL == "true" ]]; then
|
||||
$VAR_VPN_PROXY \
|
||||
$VAR_DOMAIN \
|
||||
$VAR_CRON \
|
||||
$VAR_LOCAL_BACKEND \
|
||||
$VAR_DISCOVERY \
|
||||
$VAR_DISCOVERY_DIR \
|
||||
$VAR_DISCOVERY_DIRECTORY \
|
||||
@@ -165,9 +195,21 @@ if [[ $FIRST_INSTALL == "true" ]]; then
|
||||
|
||||
elif [ "$FIRST_INSTALL" == "vpn" ]; then
|
||||
|
||||
get_vpn_key
|
||||
$SERVICE_EXEC vpn-proxy start
|
||||
exit;
|
||||
INIT_SERVICE_PATH=/etc/user/config/services
|
||||
AUTO_START_SERVICES="/etc/system/data/"
|
||||
|
||||
get_vpn_key
|
||||
|
||||
edit_user_json $LETSENCRYPT_MAIL $LETSENCRYPT_SERVERNAME
|
||||
|
||||
$SERVICE_EXEC vpn-proxy stop force
|
||||
$SERVICE_EXEC vpn-proxy start
|
||||
echo "$INIT_SERVICE_PATH/vpn-proxy.json" >>$AUTO_START_SERVICES/.init_services
|
||||
echo "$INIT_SERVICE_PATH/firewall-vpn-smarthost-loadbalancer" >>$AUTO_START_SERVICES/.init_services
|
||||
echo "$INIT_SERVICE_PATH/firewall-vpn-proxy-postrouting" >>$AUTO_START_SERVICES/.init_services
|
||||
echo "$INIT_SERVICE_PATH/firewall-vpn-proxy-prerouting" >>$AUTO_START_SERVICES/.init_services
|
||||
|
||||
exit
|
||||
|
||||
else
|
||||
$SUDO_CMD docker pull $DOCKER_REGISTRY_URL/installer-tool
|
||||
|
85
scripts/scheduler/upgrade.sh
Executable file
85
scripts/scheduler/upgrade.sh
Executable file
@@ -0,0 +1,85 @@
|
||||
#!/bin/sh
|
||||
|
||||
#! /bin/sh
|
||||
|
||||
cd /scripts
|
||||
DEBUG_MODE=${DEBUG_MODE:-false}
|
||||
|
||||
DATE=$(date +%F-%H-%M-%S)
|
||||
|
||||
#DOCKER_REGISTRY_URL=${DOCKER_REGISTRY_URL:-registry.format.hu}
|
||||
DOCKER_REGISTRY_URL=${DOCKER_REGISTRY_URL:-safebox}
|
||||
USER_INIT_PATH=$USER_INIT_PATH
|
||||
GLOBAL_VERSION=${GLOBAL_VERSION:-latest}
|
||||
SERVICE_DIR=${SERVICE_DIR:-/etc/user/config/services}
|
||||
SECRET_DIR=${SECRET_DIR:-/etc/user/secret}
|
||||
|
||||
SHARED=${SHARED:-/var/tmp/shared}
|
||||
|
||||
FRAMEWORK_SCHEDULER_IMAGE=${FRAMEWORK_SCHEDULER_IMAGE:-framework-scheduler}
|
||||
FRAMEWORK_SCHEDULER_NAME=${FRAMEWORK_SCHEDULER_NAME:-framework-scheduler}
|
||||
FRAMEWORK_SCHEDULER_NETWORK=${FRAMEWORK_SCHEDULER_NETWORK:-framework-network}
|
||||
FRAMEWORK_SCHEDULER_NETWORK_SUBNET=${FRAMEWORK_SCHEDULER_NETWORK_SUBNET:-"172.19.255.0/24"}
|
||||
FRAMEWORK_SCHEDULER_VERSION=${FRAMEWORK_SCHEDULER_VERSION:-latest}
|
||||
RUN_FORCE=${RUN_FORCE:-false}
|
||||
|
||||
WEB_SERVER=${WEB_SERVER:-webserver}
|
||||
WEB_IMAGE=${WEB_IMAGE:-web-installer}
|
||||
WEBSERVER_PORT=${WEBSERVER_PORT:-8080}
|
||||
WEBSERVER_VERSION=${WEBSERVER_VERSION:-latest}
|
||||
|
||||
if [[ -n "$DOCKER_REGISTRY_URL" && "$DOCKER_REGISTRY_URL" != "null" ]]; then
|
||||
SETUP="/setup"
|
||||
else
|
||||
SETUP="setup"
|
||||
DOCKER_REGISTRY_URL=""
|
||||
fi
|
||||
|
||||
SETUP_VERSION=${SETUP_VERSION:-$GLOBAL_VERSION}
|
||||
|
||||
# $DNS_PATH \
|
||||
#$CA_FILE \
|
||||
DNS_DIR="/etc/system/data/dns"
|
||||
DNS="--env DNS_DIR=$DNS_DIR"
|
||||
DNS_PATH="--volume $DNS_DIR:/etc/system/data/dns:rw"
|
||||
HOST_FILE=$DNS_DIR"/hosts.local"
|
||||
mkdir -p $DNS_DIR
|
||||
touch $HOST_FILE
|
||||
|
||||
mkdir -p /etc/system/data/ssl/certs
|
||||
mkdir -p /etc/system/data/ssl/keys
|
||||
|
||||
CA_PATH=/etc/system/data/ssl/certs
|
||||
CA="--env CA_PATH=$CA_PATH"
|
||||
CA_FILE="--volume $CA_PATH:$CA_PATH:ro"
|
||||
mkdir -p $CA_PATH
|
||||
|
||||
VOLUME_MOUNTS="-v SYSTEM_DATA:/etc/system/data -v SYSTEM_CONFIG:/etc/system/config -v SYSTEM_LOG:/etc/system/log -v USER_DATA:/etc/user/data -v USER_CONFIG:/etc/user/config -v USER_SECRET:/etc/user/secret"
|
||||
|
||||
service_exec="/usr/bin/docker run --rm \
|
||||
$DNS \
|
||||
$CA \
|
||||
-w /etc/user/config/services/ \
|
||||
$VOLUME_MOUNTS \
|
||||
-v /var/run/docker.sock:/var/run/docker.sock \
|
||||
--env VOLUME_MOUNTS="$(echo $VOLUME_MOUNTS | base64 -w0)" \
|
||||
--env DOCKER_REGISTRY_URL=$DOCKER_REGISTRY_URL \
|
||||
--env SETUP_VERSION=$SETUP_VERSION \
|
||||
--env GLOBAL_VERSION=$GLOBAL_VERSION \
|
||||
--env HOST_FILE=$HOST_FILE \
|
||||
$DOCKER_REGISTRY_URL$SETUP:$SETUP_VERSION"
|
||||
|
||||
SHARED=${SHARED:-/var/tmp/shared}
|
||||
TASK="scheduler-upgrade"
|
||||
|
||||
JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "UPGRADE_STATUS": "0" }' | jq -r . | base64 -w0) # install has started
|
||||
install -m 664 -g 65534 /dev/null $SHARED/output/$TASK.json
|
||||
echo $JSON_TARGET | base64 -d >$SHARED/output/$TASK.json
|
||||
|
||||
/usr/bin/docker rm -f framework-scheduler
|
||||
$service_exec service-framework.containers.framework-scheduler start
|
||||
|
||||
JSON_TARGET=$(echo '{ "DATE": "'$DATE'", "UPGRADE_STATUS": "1" }' | jq -r . | base64 -w0)
|
||||
echo $JSON_TARGET | base64 -d >$SHARED/output/$TASK.json
|
||||
|
||||
/usr/bin/docker rm -f $HOSTNAME
|
Reference in New Issue
Block a user