Add services.json & fluence.json (#8)

This commit is contained in:
folex 2021-09-02 12:48:48 +03:00 committed by GitHub
parent 0640872fdc
commit da12f542da
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 1234 additions and 124 deletions

View File

@ -1,5 +1,5 @@
{
"template": "${{CHANGELOG}}\n\n${{UNCATEGORIZED}}",
"pr_template": "- #${{NUMBER}} ${{TITLE}}",
"pr_template": "- ${{TITLE}}",
"empty_template": "- no changes"
}

View File

@ -3,44 +3,21 @@ name: Publish Container
on:
workflow_dispatch:
inputs:
fluence_branch:
description: "Fluence node branch"
required: false
default: "latest"
container_tag:
description: "Container tag"
required: false
default: "latest"
services_json:
description: "services.json URL"
required: false
default: "https://github.com/fluencelabs/builtin-services/releases/latest/download/services.json"
services_version:
description: "version of the services.json"
required: false
default: ""
do_release:
description: "whether to make a release"
required: true
default: "false"
jobs:
build-publish:
name: Build and publish ${{ github.event.inputs.container_tag || 'latest' }} from Fluence ${{ github.event.inputs.fluence_branch || 'latest' }} & services ${{ github.event.inputs.services_version || '[not specified]' }}
name: Build and publish ${{ github.event.inputs.container_tag || 'latest' }}
runs-on: ubuntu-latest
env:
FLUENCE_BRANCH: ${{ github.event.inputs.fluence_branch || 'latest' }}
CONTAINER_TAG: ${{ github.event.inputs.container_tag || 'latest' }}
SERVICES_JSON: ${{ github.event.inputs.services_json || 'https://github.com/fluencelabs/builtin-services/releases/latest/download/services.json' }}
SERVICES_VERSION: ${{ github.event.inputs.services_version || '' }}
DO_RELEASE: ${{ github.event.inputs.do_release || 'false' }}
steps:
- uses: actions/checkout@v2
- name: Set RELEASE_VERSION from ${{ env.GITHUB_REF }}
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
### Prepare docker & login to Dockerhub
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
@ -55,6 +32,7 @@ jobs:
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Login to DockerHub
uses: docker/login-action@v1
with:
@ -65,23 +43,9 @@ jobs:
run: |
TAG="${{ env.CONTAINER_TAG }}"
RUN="${{ github.run_number }}"
FLUENCE_BRANCH="${{ env.FLUENCE_BRANCH }}"
if [ "$TAG" = "latest" ] || [ "$TAG" = "" ]; then
if [ "$FLUENCE_BRANCH" = "latest" ]; then
TAG="latest"
else
TAG="$FLUENCE_BRANCH"
fi
fi
TAGS=fluencelabs/node:${TAG},fluencelabs/node:${TAG}_latest,fluencelabs/node:${TAG}_v${RUN}
SERVICES_VERSION="${{ env.SERVICES_VERSION }}"
if [ "$SERVICES_VERSION" != "" ]; then
TAGS="$TAGS,fluencelabs/node:${TAG}_${SERVICES_VERSION},fluencelabs/node:${TAG}_${SERVICES_VERSION}_v${RUN}"
fi
TAGS=fluencelabs/node:${TAG},fluencelabs/node:${TAG}_v${RUN}
echo "TAGS=$TAGS" | tee -a $GITHUB_ENV
echo "FLUENCE_TAG=${{ env.FLUENCE_BRANCH }}" | tee -a $GITHUB_ENV
echo "BUILD_DATE=$(date '+%Y-%m-%dT%H:%M:%S%:z')"
### Build and push docker image
@ -94,70 +58,7 @@ jobs:
push: true
tags: "${{ env.TAGS }}"
build-args: |
FLUENCE_TAG=${{ env.FLUENCE_TAG }}
SERVICES_JSON=${{ env.SERVICES_JSON }}
SERVICES_VERSION=${{ env.SERVICES_VERSION }}
COMMIT=${{ github.sha }}
BUILD_DATE=${{ env.BUILD_DATE }}
RUN_NUMBER=${{ github.run_number }}
TAG=${{ env.CONTAINER_TAG }}
### Create GitHub release
- name: Push tag ${{ env.BASE_VERSION }}
id: tag_version
uses: mathieudutour/github-tag-action@v5.5
with:
custom_tag: ${{ env.BASE_VERSION }}
tag_prefix: ""
github_token: ${{ secrets.PERSONAL_TOKEN }}
- name: Build Changelog
id: github_release
uses: mikepenz/release-changelog-builder-action@v1
with:
configuration: ".github/workflows/changelog_config.json"
toTag: ${{ steps.tag_version.outputs.new_tag }}
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Parse metadata from docker publish
id: metadata
uses: actions/github-script@v4
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
result-encoding: string
script: |
try {
let metadataRaw = `${{ steps.docker_publish.outputs.metadata }}`;
let metadata = JSON.parse(metadataRaw);
let imageTags = metadata['image.name'].split(",");
let tagString = imageTags.reduce(
(acc, tag) => { return `${acc}\n- ${tag}` },
""
)
console.log(tagString);
return tagString;
} catch (e) {
console.log("Err: " + e);
throw e;
}
- name: Release
id: release
if: ${{ env.DO_RELEASE == 'true' }}
uses: softprops/action-gh-release@v1
with:
name: Fluence Node ${{ steps.tag_version.outputs.new_tag }}
tag_name: ${{ steps.tag_version.outputs.new_tag }}
body: |
${{steps.github_release.outputs.changelog}}
## Docker tags
${{steps.metadata.outputs.result}}
## services.json
[services.json @ ${{ env.SERVICES_VERSION }}](${{ env.SERVICES_JSON }})
draft: false
prerelease: false
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

117
.github/workflows/release.yml vendored Normal file
View File

@ -0,0 +1,117 @@
name: "release"
on:
push:
tags:
- "v*"
jobs:
release:
name: "release"
runs-on: ubuntu-latest
defaults:
run:
shell: bash
steps:
### Setup
- name: Checkout repository
uses: actions/checkout@v2
- name: Set RELEASE_VERSION
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/*/}" >> $GITHUB_ENV
### Prepare docker & login to Dockerhub
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Cache Docker layers
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Determine container tags
run: |
RUN="${{ github.run_number }}"
VERSION="${{ env.RELEASE_VERSION }}"
TAGS=fluencelabs/node:${VERSION},fluencelabs/node:${VERSION}_${RUN},fluencelabs/node:latest
echo "TAGS=$TAGS" | tee -a $GITHUB_ENV
echo "BUILD_DATE=$(date '+%Y-%m-%dT%H:%M:%S%:z')"
### Build and push docker image
- name: Build and push
id: docker_publish
uses: docker/build-push-action@v2
with:
context: .
file: Dockerfile
push: true
tags: "${{ env.TAGS }}"
build-args: |
COMMIT=${{ github.sha }}
BUILD_DATE=${{ env.BUILD_DATE }}
RUN_NUMBER=${{ github.run_number }}
TAG=${{ env.RELEASE_VERSION }}
### Create GitHub release
- name: Build Changelog
id: github_release
uses: mikepenz/release-changelog-builder-action@v1
with:
configuration: ".github/workflows/changelog_config.json"
commitMode: true
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Parse metadata from docker publish
id: metadata
uses: actions/github-script@v4
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
result-encoding: string
script: |
try {
let metadataRaw = `${{ steps.docker_publish.outputs.metadata }}`;
let metadata = JSON.parse(metadataRaw);
let imageTags = metadata['image.name'].split(",");
let tagString = imageTags.reduce(
(acc, tag) => { return `${acc}\n- ${tag}` },
""
)
console.log(tagString);
return tagString;
} catch (e) {
console.log("Err: " + e);
throw e;
}
- name: Release
id: release
uses: softprops/action-gh-release@v1
with:
name: Fluence Node ${{ env.RELEASE_VERSION }}
tag_name: ${{ env.RELEASE_VERSION }}
body: |
${{steps.github_release.outputs.changelog}}
## Docker tags
${{steps.metadata.outputs.result}}
draft: false
prerelease: false
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

17
.github/workflows/tag.yml vendored Normal file
View File

@ -0,0 +1,17 @@
name: "tag"
on:
workflow_dispatch:
jobs:
tag:
name: "Tag"
runs-on: "ubuntu-latest"
steps:
- uses: actions/checkout@v2
- name: Bump version and push tag
id: tag_version
uses: mathieudutour/github-tag-action@v5.5
with:
github_token: ${{ secrets.PERSONAL_TOKEN }}

49
.github/workflows/update_fluence.yml vendored Normal file
View File

@ -0,0 +1,49 @@
name: "update_fluence"
on:
workflow_dispatch:
inputs:
version:
description: 'Fluence version'
required: true
url:
description: 'Fluence binary URL'
required: true
sha256:
description: 'Fluence binary SHA256 hash'
required: true
jobs:
update_fluence:
name: "Update Fluence"
runs-on: "ubuntu-latest"
steps:
- uses: actions/checkout@v2
- name: Download jq
run: |
curl -L https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64 -o /usr/local/bin/jq
chmod +x /usr/local/bin/jq
- name: git pull
run: |
git pull --rebase
- name: Update Fluence in fluence.json to ${{ github.event.inputs.version }}
run: |
echo '
{
"version": "${{ github.event.inputs.version }}",
"url": "${{ github.event.inputs.url }}",
"sha256": "${{ github.event.inputs.sha256 }}"
}
' > fluence/fluence.json
- name: Commit updated fluence.json
uses: EndBug/add-and-commit@v7
with:
add: 'fluence/fluence.json'
message: 'Update Fluence to ${{ github.event.inputs.version }}'
push: true
pull_strategy: 'NO-PULL'

55
.github/workflows/update_service.yml vendored Normal file
View File

@ -0,0 +1,55 @@
name: "update_service"
on:
workflow_dispatch:
inputs:
name:
description: 'Name of the service'
required: true
version:
description: 'Version of the service'
required: true
url:
description: 'Url of the service package'
required: true
sha256:
description: 'SHA256 hash of the service package'
required: true
jobs:
update:
name: "Update service"
runs-on: "ubuntu-latest"
steps:
- uses: actions/checkout@v2
- name: Download jq
run: |
curl -L https://github.com/stedolan/jq/releases/download/jq-1.5/jq-linux64 -o /usr/local/bin/jq
chmod +x /usr/local/bin/jq
- run: git pull --rebase
- name: Update ${{ github.event.inputs.name }} services.json to ${{ github.event.inputs.version }}
run: |
# check the service already exists
jq -e 'has("${{ github.event.inputs.name }}")' fluence/services.json
# update service's record
UPDATED=$(jq '."${{ github.event.inputs.name }}" = {
"version": "${{ github.event.inputs.version }}",
"url": "${{ github.event.inputs.url }}",
"sha256": "${{ github.event.inputs.sha256 }}"
}' fluence/services.json)
# write updated content to disk
echo "$UPDATED" > fluence/services.json
- name: Commit updated services.json
uses: EndBug/add-and-commit@v7
with:
add: 'fluence/services.json'
message: 'Update ${{ github.event.inputs.name }} to ${{ github.event.inputs.version }}'
push: true
pull_strategy: 'NO-PULL'

4
.gitignore vendored
View File

@ -102,3 +102,7 @@ dist
# TernJS port file
.tern-port
.DS_Store
.pyc

View File

@ -1,9 +1,7 @@
### NOTE: original linuxserver.org docker-ipfs image also builds & runs migrations.
### If needed, go to https://github.com/linuxserver/docker-ipfs to see how it's done.
ARG FLUENCE_TAG=latest
ARG IPFS=v0.9.0
FROM fluencelabs/fluence:${FLUENCE_TAG} as fluence
FROM ipfs/go-ipfs:${IPFS} as ipfs
@ -23,7 +21,7 @@ ENV IPFS_PATH=/config/ipfs
ENV IPFS_LOGGING_FMT=nocolor
ENV RUST_LOG="info,aquamarine=warn,tokio_threadpool=info,tokio_reactor=info,mio=info,tokio_io=info,soketto=info,yamux=info,multistream_select=info,libp2p_secio=info,libp2p_websocket::framed=info,libp2p_ping=info,libp2p_core::upgrade::apply=info,libp2p_kad::kbucket=info,cranelift_codegen=info,wasmer_wasi=info,cranelift_codegen=info,wasmer_wasi=info"
ENV RUST_BACKTRACE="1"
## set /fluence as the CMD binary
## set /run_fluence as the CMD binary
ENV S6_CMD_ARG0="/run_fluence"
# fluence builtins default envs
@ -46,16 +44,16 @@ RUN \
/var/lib/apt/lists/* \
/var/tmp/*
# download fluence builtin services
ARG SERVICES_JSON=https://github.com/fluencelabs/builtin-services/releases/latest/download/services.json
COPY download_builtins.sh /download_builtins.sh
RUN /download_builtins.sh ${SERVICES_JSON}
# download fluence & builtin services
COPY fluence/services.json /services.json
COPY fluence/download_builtins.sh /download_builtins.sh
RUN /download_builtins.sh
# copy fluence
# TODO: copy binary to /usr/bin & state to /config/fluence
COPY --from=fluence /fluence /fluence
COPY --from=fluence /.fluence /.fluence
COPY fluence/Config.default.toml /.fluence/v1/Config.toml
COPY fluence/fluence.json /fluence.json
COPY fluence/download_fluence.sh /download_fluence.sh
RUN /download_fluence.sh
# copy sidecars
COPY --from=ipfs /usr/local/bin/ipfs /usr/bin/ipfs
@ -65,7 +63,7 @@ COPY --from=ipfs /usr/local/bin/ipfs /usr/bin/ipfs
# configs may replace default configs of installed packages
COPY s6/root/ /
COPY run_fluence /run_fluence
COPY fluence/run_fluence /run_fluence
# ports and volumes
EXPOSE 5001

View File

@ -0,0 +1,69 @@
## ed25519, rsa, secp256k1 private keys available for this node. Generation is available only for ed25519 and secp256k1.
## Either value or path should be defined. Value is base58 bytes.
## ed25519 format is set by default
# root_key_pair.format = "ed25519"
# root_key_pair.value = "..."
# root_key_pair.path = "/.fluence/v1/secret_key.ed25519"
root_key_pair.generate_on_absence = true
# builtins_key_pair.format = "ed25519"
# builtins_key_pair.value = "..."
# builtins_key_pair.path = "/.fluence/v1/secret_key.ed25519"
builtins_key_pair.generate_on_absence = true
autodeploy_particle_ttl = "60s"
## Services will store their data here
# default is /.fluence/v1/services
# services_base_dir = "./services"
## AIR Interpreter will store its data here. NOTE: 'stepper' is an old name for interpreter.
# default is /.fluence/v1/stepper
# avm_base_dir = "./stepper"
## directory for TrustGraph certificates
# default is /.fluence/v1/certificates
# certificate_dir = "./certificates"
## directory for builtins
# default is /builtins
# builtins_base_dir = "./builtins"
## Path to AIR interpreter .wasm is set to specific version by default
## air_interpreter_path = "./aquamarine_${air_interpreter_wasm::VERSION}.wasm"
tcp_port = 7777
listen_ip = "0.0.0.0"
socket_timeout = "20s"
bootstrap_nodes = [
"/dns4/net01.fluence.dev/tcp/7001",
"/dns4/net01.fluence.dev/tcp/7770",
"/dns4/net02.fluence.dev/tcp/7001",
"/dns4/net03.fluence.dev/tcp/7001",
"/dns4/net04.fluence.dev/tcp/7001",
"/dns4/net05.fluence.dev/tcp/7001",
"/dns4/net06.fluence.dev/tcp/7001",
"/dns4/net07.fluence.dev/tcp/7001",
"/dns4/net08.fluence.dev/tcp/7001",
"/dns4/net09.fluence.dev/tcp/7001",
"/dns4/net10.fluence.dev/tcp/7001"
]
websocket_port = 9999
#external_address = "85.85.35.35"
prometheus_port = 18080
aquavm_pool_size = 16
## environment variables that will be passed to each service
## TODO: separate by service or move to service config
services_envs = { name = "value" }
[bootstrap_config]
reconnect_delay = "5s 500ms"
bootstrap_delay = "30s 45ms"
bootstrap_max_delay = "60s"
[root_weights]
12D3KooWB9P1xmV3c7ZPpBemovbwCiRRTKd3Kq2jsVPQN4ZukDfy = 1
12D3KooWBUJifCTgaxAUrcM9JysqCcS4CS8tiYH5hExbdWCAoNwb = 20
[protocol_config]
upgrade_timeout = "10s"
keep_alive_timeout = "10s"
outbound_substream_timeout = "10s"

3
fluence/deploy/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
*.pyc
prometheus/data

View File

@ -0,0 +1,42 @@
[root_key_pair]
format = "ed25519"
path = "/node.key"
[builtins_key_pair]
format = "ed25519"
generate_on_absence = true
autodeploy_particle_ttl = "60s"
[root_weights]
12D3KooWDMWjFq1QZG5szbQRbyQ1A8wyrEJSQaFXwMyqkFeLoHTt = 3
12D3KooWLH6rijQ7eTAdA2pcg31ohZCTkWZywYvwTB3sUEjrn5PU = 4
12D3KooWBUJifCTgaxAUrcM9JysqCcS4CS8tiYH5hExbdWCAoNwb = 6
12D3KooWCKCeqLPSgMnDjyFsJuWqREDtKNHx1JEBiwaMXhCLNTRb = 7
12D3KooWLuLi6jvv53LPUtmx9TeV87L7kwiwRiEDBb4BkWGcG7BQ = 4
12D3KooWQT3rhVmP4qrhCX9ki6oaPAF6w1WbSXZVe8MwG9GJcBHZ = 4
12D3KooWBSdm6TkqnEFrgBuSkpVE3dR1kr6952DsWQRNwJZjFZBv = 2
12D3KooWB9P1xmV3c7ZPpBemovbwCiRRTKd3Kq2jsVPQN4ZukDfy = 10
12D3KooWBeLmDMSUfX6wKcPGQKScRedwKhazdoVr6DGsP7zQi88u = 4
12D3KooWJbJFaZ3k5sNd8DjQgg3aERoKtBAnirEvPV8yp76kEXHB = 7
12D3KooWHKcazBBDS64k6vaYWoN4JowG2PsYWf9UkrBaYFAVG19T = 4
12D3KooWKbExFt4VnVtsoDoNkYUqqNTEu74AncC1xkXTRKpgbddj = 4
12D3KooWMhVpgfQxBLkQkJed8VFNvgN4iE6MD7xCybb1ZYWW2Gtz = 1
12D3KooWMxYgPHEmeaU56xjyYY5CxfzxfeRK7ZiqpohSQEy1XkUd = 10
12D3KooWBAvmzZ4dqAFGSHUc5EX1jfMwmzSqhuTHLdafUz96FALs = 3
12D3KooWEXNUbCXooUwHrHBbrmjsrpHXoEphPwbjQXEGyzbqKnE9 = 20
12D3KooWBNuzQgzdM8Uzi2Lbsk8VX42SwdZYQDVfn69BLJWKSCVi = 3
12D3KooWBcV7Tz5jn8zbQKtWp9nrAmojm63GKC7EJ8aJ1f9WdyAD = 4
12D3KooWFnAdKdxExJioeGiS6zZG8n98LAaWLdtHFPfdXqXrNgf6 = 3
12D3KooWMtzMU25aDx4sL2LBvxZNSSZirFm7YPkkEwRYdJg4CJiJ = 4
12D3KooWF7gjXhQ4LaKj6j7ntxsPpGk34psdQicN2KNfBi9bFKXg = 10
12D3KooWHk9BjDQBUqnavciRPhAYFvqKBe4ZiPPvde7vDaqgn5er = 5
12D3KooWG9hh8R1y2VesEnMRjmoxZBikSLgoazYEhpUWZHCDjEia = 40
12D3KooWGR7gsjvAR4XkZfbo5TUxzepr35ocL7U7oo6qZVuBrxud = 40
12D3KooWGzNvhSDsgFoHwpWHAyPf1kcTYCGeRBPfznL8J6qdyu2H = 2
12D3KooWKHRipkS48MNZANFwSLTkTzfgzHcLs8wZ1nUU9DzUPb4G = 3
12D3KooWBaeyjd7FgKXxMcqjt4wLmpGZvxjUjngduRRqfyYSiSxR = 40
12D3KooWRd6dzpnAyUhC42S7JzHAk38egyRSJadTgwPzkxCrY8Mg = 4
12D3KooWDcohxBq6W45MnSrUQsnL63caM3BVEZW7dB9tgP5ky7KL = 40
12D3KooWKnRcsTpYx9axkJ6d69LPfpPXrkVLe96skuPTAo76LLVH = 10
12D3KooWQbFexscJvgiTW3empzEP9o48uYU4EA9Z77bPc9mPQZTe = 4

11
fluence/deploy/Dockerfile Normal file
View File

@ -0,0 +1,11 @@
from python:2-alpine as BUILD
RUN apk add gcc musl-dev libffi-dev rust cargo make openssl-dev
copy ./requirements.txt ./requirements.txt
RUN pip install --user -r requirements.txt
from python:2-alpine
copy --from=BUILD /root/.local /root/.local
copy . /deploy
env PATH=/root/.local/bin:$PATH
workdir /deploy
ENTRYPOINT ["fab"]

18
fluence/deploy/README.md Normal file
View File

@ -0,0 +1,18 @@
# How to deploy Fluence
1. Edit deployment_config.json to your needs (explanations: TBD)
2. Install docker: `fab install_docker`
3. Edit `fluence.yml` and `fluence_bootstrap.yml` to your needs
4. Deploy fluence: `fab deploy_fluence`
5. If you need https, deploy caddy: `fab deploy_caddy`
6. If you need slack notifications about containers state, deploy watchdog: `fab deploy_watchdog`
# Fluence deployment scripts and configs
`deployment_config.json` contains list of IPs to use for deployment
`fab deploy_fluence` deploys fluence, mediated by `fluence.yml` and `fluence_bootstrap.yml`
`fab install_docker` installs docker and docker-compose (+ haveged)
`fab deploy_watchdog` deploys a watchdog to monitor containers (change `SECRET` to desired webhook URL)
`fab deploy_caddy` deploys Caddy 2.0, configured in code
# Prometheus
`/prometheus` contains basic configuration file, HTML consoles are TBD

136
fluence/deploy/compose.py Normal file
View File

@ -0,0 +1,136 @@
# Copyright 2021 Fluence Labs Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from fabric.contrib.files import append
from fabric.api import *
from fabric.utils import *
import json
import time
import yaml
import copy
CONFIG = {
'services': {
'fluence': {
'environment': {
'RUST_BACKTRACE': 'full',
'WASM_LOG': 'info',
'RUST_LOG': 'info,network=trace,aquamarine=info,aquamarine::actor=info,tokio_threadpool=info,tokio_reactor=info,mio=info,tokio_io=info,soketto=info,yamux=info,multistream_select=info,libp2p_secio=info,libp2p_websocket::framed=info,libp2p_ping=info,libp2p_core::upgrade::apply=info,libp2p_kad::kbucket=info,cranelift_codegen=info,wasmer_wasi=info,async_io=info,polling=info,wasmer_interface_types_fl=info ,cranelift_codegen=info,wasmer_wasi=info,async_io=info,polling=info,wasmer_interface_types_fl=info,particle_server::behaviour::identify=info,libp2p_mplex=info,libp2p_identify=info,walrus=info,particle_protocol::libp2p_protocol::upgrade=info',
'FLUENCE_ENV_IPFS_ADAPTER_EXTERNAL_API_MULTIADDR': '/ip4/{host}/tcp/{ipfs_port}',
'FLUENCE_ENV_IPFS_ADAPTER_EXTERNAL_SWARM_MULTIADDR': '/ip4/{host}/tcp/{ipfs_swarm_port}',
},
'command': '-c /Config.toml -f ed25519 -k {keypair} -x {host} -t {tcp_port} -w {ws_port} -m {management_key}',
'volumes': [
'{container_name}:/.fluence',
'{container_name}_config:/config',
],
'container_name': '{container_name}',
'image': 'fluencelabs/node:{container_tag}',
'ports': [
'{tcp_port}:{tcp_port}',
'{ws_port}:{ws_port}',
'{ipfs_port}:5001',
'{ipfs_swarm_port}:4001',
'{ipfs_gateway_port}:8080',
],
'restart': 'always'
}
},
'version': '3.5',
'volumes': {
'{container_name}': None,
'{container_name}_config': None,
}
}
def gen_compose_file(out, container_tag, scale, is_bootstrap, bootstraps, host, management_key, keypairs):
assert len(container_tag) > 0, "container tag must not be empty, was: '{}'".format(container_tag)
if is_bootstrap == True:
container = 'fluence_bootstrap'
tcp_port = 7770
ws_port = 9990
ipfs_port = 5550
ipfs_swarm_port = 4440
ipfs_gateway_port = 8880
else:
container = 'fluence'
tcp_port = 7001
ws_port = 9001
ipfs_port = 5001
ipfs_swarm_port = 4001
ipfs_gateway_port = 8001
config = copy.deepcopy(CONFIG)
service = config['services']['fluence']
del config['services']['fluence']
config['volumes'] = {}
for i in range(0, scale):
container_name = container + '-{}'.format(i)
config['services'][container_name] = copy.deepcopy(service)
container_config = config['services'][container_name]
container_config['container_name'] = container_name
container_config['image'] = container_config['image'].format(
container_tag=container_tag
)
container_config['volumes'] = map(
lambda v: v.format(container_name=container_name),
container_config['volumes']
)
container_config['command'] = container_config['command'].format(
keypair=keypairs[i],
management_key=management_key,
host=host,
tcp_port=tcp_port,
ws_port=ws_port
)
if len(bootstraps) > 0:
container_config['command'] += ' --bootstraps {}'.format(' '.join(bootstraps))
container_config['ports'] = map(lambda p: p.format(
tcp_port=tcp_port,
ws_port=ws_port,
ipfs_port=ipfs_port,
ipfs_swarm_port=ipfs_swarm_port,
ipfs_gateway_port=ipfs_gateway_port,
), container_config['ports'])
for key in container_config['environment']:
container_config['environment'][key] = container_config['environment'][key].format(
host=host,
ipfs_port=ipfs_port,
ipfs_swarm_port=ipfs_swarm_port,
ipfs_gateway_port=ipfs_gateway_port,
)
for key in CONFIG['volumes']:
key = key.format(container_name=container_name)
config['volumes'][key] = None
tcp_port += 1
ws_port += 1
ipfs_port += 1
ipfs_swarm_port += 1
ipfs_gateway_port += 1
puts("Writing config to {}".format(out))
with hide('running'):
run('rm {} || true'.format(out))
append(out, yaml.dump(config))

View File

@ -0,0 +1,183 @@
{
"target": "krasnodar",
"environments": [
{
"name": "krasnodar",
"container_tag": "latest_v233",
"containers_per_host": 1,
"external_bootstraps": [
"/ip4/165.227.164.206/tcp/7001",
"/ip4/139.59.148.53/tcp/7001",
"/ip4/157.230.98.75/tcp/7001"
],
"hosts": [
"164.90.171.139",
"178.128.194.190",
"46.101.159.139",
"161.35.222.178",
"164.90.164.229",
"164.90.168.55",
"161.35.212.85",
"164.90.165.150",
"164.90.172.126",
"164.90.171.156"
],
"user": "root",
"bootstrap": "164.90.171.139",
"management_key": "12D3KooWFRgVmb1uWcmCbmJqLr8tBQghL6ysSpK2VyE2VZbaQ6wy",
"caddy": [
{ "host": "kras-00.fluence.dev", "ports": [9001, 9990, 5001, 5550, 8880, 8001], "addr": "164.90.171.139" },
{ "host": "kras-01.fluence.dev", "ports": [9001, 5001, 8001], "addr": "178.128.194.190" },
{ "host": "kras-02.fluence.dev", "ports": [9001, 5001, 8001], "addr": "46.101.159.139" },
{ "host": "kras-03.fluence.dev", "ports": [9001, 5001, 8001], "addr": "161.35.222.178" },
{ "host": "kras-04.fluence.dev", "ports": [9001, 5001, 8001], "addr": "164.90.164.229" },
{ "host": "kras-05.fluence.dev", "ports": [9001, 5001, 8001], "addr": "164.90.168.55" },
{ "host": "kras-06.fluence.dev", "ports": [9001, 5001, 8001], "addr": "161.35.212.85" },
{ "host": "kras-07.fluence.dev", "ports": [9001, 5001, 8001], "addr": "164.90.165.150" },
{ "host": "kras-08.fluence.dev", "ports": [9001, 5001, 8001], "addr": "164.90.172.126" },
{ "host": "kras-09.fluence.dev", "ports": [9001, 5001, 8001], "addr": "164.90.171.156" }
],
"keypairs": {
"fluence_bootstrap.yml": [
"4EMUFTvXM3D6M8NHwNCMwjkjdSUSHJrnzdHh7L3XcY9E3iUNncUohQ7EcjJaovVwzzqdTcbki5iAnUUHEb59j4pQ"
],
"fluence.yml": [
"29JvD17kkvFXnTNDRhmVehLTinFzaq8trNNADzDEpFTm8cTVJuLVxzf4AysDg9daerBRa2xhgj9QYGCKXkbbUXbd",
"2zzZEHJFsxnKSihpC2GkcQtgaUYBwW742ne5ZB2bjRGwnbgMEmn1MqoHxm5MbnMwEB4DLP2cxKYN4Ag7MGWus8PQ",
"ir4of5Kcqu4Zjkg4GwWujTKUvggqHzHPKS9vpN9G96dMGi9pKqnDRrXpgjKTogzY7i1AnrTfoD464GgjFcK8QUf",
"58kzJ5Xxvj7Mmrq2NTVTa9JRdmnpWQAsnWEUauuqbgwbqqHKZoYC498Yfa6wLJiEBQiuLGCqrBTqmktngYN32Xnz",
"aD93EK3cEFCM4aHFqR3SNs8xUvoFTdhLcm1pn2wsJ8kwxVGC4mCgRC6znxBAfp9ja83wCBsDqrBW3NysqkbG5Gk",
"47L1SWMPu3VfMQRqE7a5bgE6H7K56v9wU5SoFUrJVdDs3obVCM4afMfajBApj1q8Q4itKoD5zb9ydVefZirHLBNa",
"3pSZ7S4zQGSYvt7GDqrxYVJitUCv1YC9oVcoeziDvfwrS5u3knKkFXEQ164ZBZCaTGaKZk9RR3awvhW34gtYDhMN",
"G1LbuJiYq3PHWPzfBkp8j22Wr6Lb8xvyW6vcuLBWLwupz2sy2Ku2SRTw5x28zk91uS8X3AaLEupuHC8Z31FC72W",
"2Y78rG3QrrGzzvVJhvbEdvnya11Z2a3qR3bjP5Yr5PF46BomTF4MXRJb4qiUQFBZfqUuzYBDs7SP1odfixAr3U7Y",
"2RLVwr5F2yxU6ZLEkdCA7GP2X3DTBCJsZ3PmNtjg8Bbbj9VPcfE928cEci1AoqbstNbT28cNm5ji8d6cLknRxJNb",
"2SBto5Nx8gY1TYJCmnhBQMDzkTghAFbQZFu1eLN9piWUz8vdySBQYeXQXrYXb2Yoh1wzQmST5uReRNYWVTHQEdzf",
"36mqbXaCw4p8cJJCNMUPF9Eoj5stbL5BL8digfkrtudR4AFcAS8p2Ja1FHGVVqtqaGWY9haQQwPmw8RT4Gkfdd5T",
"2JQXbwU8FHbTimtwz75SyYP25MZ8RmKKPYE3aXf2eCrpjUfkoZ85aXQkxJPjZiJe5JbES5N6LZLvevtAPZLznSTa",
"2tFtYR2S8kboncrv4xLkTxHuAVXN8KRxFwC2xPGYiHsXLLvYUHYsFQKHgMwn17tG4ZzMXTwYZcDC5BPTDRKiJP1f",
"3vXDEMu7ya5dRQRzpAvazvL5GbmXF6T8nXeESNUxUMax6qHgdfVDywA3zddrZmgAwfcHaxz7sPYtyjaPU9Z7jCj5",
"4ZUye9KKXhCSNs1Cdu2L2kioBUrhNKfYtFLUbujC7QbnweYS3VEWvNbi2Lq5VaEd6iHKMEfz8tzWPoaKut8JpmoS",
"5VxTSWtAwzfqv9cXMZJTFU7BqEv85d59KN1mkreJXeUAVFMnfmW8fwtDV8njnAJErSUokmYTgpYXEkbmcL3nm5WX",
"3Bvaa3HbwBazLxKsHUQkw6swqxV5msqcKBtANDLzqQysDn5XRkp1HBHViKiy7b71zD93oQKHgmQao4jzYBffrVMT",
"4UDVbiMtz6RjfBM1oM6kX781D8qRaZh6d8H51YdLWvQZVPjeMug6pqYooBMg4sjVXnoFkcZ2AHyTkQLn6ZVuWPGP",
"5Cnpvd3VH3SRk2qLjnL7CjD8hyMVNY9StnqebZ4sNndyxMfa5PKeFo34S1EVXwUdWdUeog9q59bTPaWyx51RweQf",
"2JxUxVCYVhFzw8J1jz83Sb73XpBiHoNXuxwR73JGnJa7xWxSKbkNmSKZXZ4r2whR8Qc44F8oq1F1SdnF75tZEepE"
]
}
},
{
"name": "testnet",
"container_tag": "latest_v233",
"containers_per_host": 1,
"external_bootstraps": [],
"hosts": [
"165.227.164.206",
"138.197.189.50",
"157.230.23.49",
"159.65.126.102",
"142.93.169.49",
"139.59.148.53",
"206.81.30.129",
"157.230.98.51",
"159.89.2.70",
"157.230.98.75"
],
"user": "root",
"bootstrap": "165.227.164.206",
"management_key": "12D3KooWFRgVmb1uWcmCbmJqLr8tBQghL6ysSpK2VyE2VZbaQ6wy",
"caddy": [
{ "host": "net01.fluence.dev", "ports": [9001, 9990, 5550, 5001, 8880, 8001], "addr":"165.227.164.206" },
{ "host": "net02.fluence.dev", "ports": [9001, 5001, 8001], "addr":"138.197.189.50" },
{ "host": "net03.fluence.dev", "ports": [9001, 5001, 8001], "addr":"157.230.23.49" },
{ "host": "net04.fluence.dev", "ports": [9001, 5001, 8001], "addr":"159.65.126.102" },
{ "host": "net05.fluence.dev", "ports": [9001, 5001, 8001], "addr":"142.93.169.49" },
{ "host": "net06.fluence.dev", "ports": [9001, 5001, 8001], "addr":"139.59.148.53" },
{ "host": "net07.fluence.dev", "ports": [9001, 5001, 8001], "addr":"206.81.30.129" },
{ "host": "net08.fluence.dev", "ports": [9001, 5001, 8001], "addr":"157.230.98.51" },
{ "host": "net09.fluence.dev", "ports": [9001, 5001, 8001], "addr":"159.89.2.70" },
{ "host": "net10.fluence.dev", "ports": [9001, 5001, 8001], "addr":"157.230.98.75" }
],
"keypairs": {
"fluence_bootstrap.yml": [
"wCUPkGaBypwbeuUnmgVyN37j9iavRoqzAkddDUzx3Yir7q1yuTp3H8cdUZERYxeQ8PEiMYcDuiy1DDkfueNh1Y6"
],
"fluence.yml": [
"4pJqYfv3wXUpodE6Bi4wE8bJkpHuFbGcXrdFnT9L29j782ge7jdov7FPrbwnvwjUm4UhK5BvJvAYikCcmvCPVx9s",
"2zgzUew3bMSgWcZ34FFS36LiJVkn3YphW2H8TDvL8JF8T4apTDxnm7GRsLppkCNGS5ytAQioxEktYq8Wr8SWAHLv",
"52WaZJDHFFZbwL177g497ctE7zqbMYMwWpVMewjc1U63tWjFUCNPuzB472UkdZWBykjiNWA8qtLYNAQEqQCcWfoP",
"23BFr8LKiiAtULuYJTmLGxqDVHnjFCzNFTZcKq6g82H9kcTNwGq8Axkdow4fh4u4w763jF6uYVK6FuGESAQBMEPB",
"3wR6FT1ZGnEwPqYBNz5YVpA6qJ4uUTLcK1SpWrwJennH5Bk4JgCjKKjUiRcjDk3Cwjbm2LAdrLWTYXHjxbogttQ9",
"3KoMfcGUox46Brcnojs8yuNZN2YTH7kvmxW8g5PiRrDE2dCiQeZzhDkaJvmDDnUaHFRp6UvdmBsDrYWywYoNDqHD",
"5yqQfXyjMGKZaykByyrEjCYqxjHaCxQKxLQ3vfzU4M8U51auCiCeKT5UvnZGgMFbwwrjePUMYPvThyXiimGvq16x",
"g59HxPYa1gxDZbMEtt2sry9tncQ1XwJMqoYh47JVsXnXeTf7svtzdag2pbXr6uN35L43nmKN2W4XW9MX7g5AUPB",
"6mFRjb32PY4eMimBJq6sS6L2yW69ShHk46KTycP6v4unfBF7JYQc2Z8m8i4RPr6snWeH7Mq4ae7wCQLqV2xuCrv",
"5EwruNmAyo9MjXSkUzKJLeUnNaxjjbHovTcfkUozzAcE9NgcoYnBs5bsoNnQpmZssk6KEYcM4dTa8nioYKtMHrKd",
"4iUyBDs1w51QzMtT7j4ydeCGnXw7HYzGANHFVVYVio5H9TKmefQodzW3Y8ir9tAbq31mwBbEW79XhUzCGvi16xyj",
"3hL8p1YoBJ4aY8gpxSzq7JTbobLd4iY2RXgLn9oXPJBVPgihfZU4RYxEYRCM2ccEYbQrpHSKqJF6SHoR1khXoNTp",
"CaexYkVKpcniuywCvx3xb3TBEaCsoEh7jLwoW8SBn68RapqsV6gNaETmyi7jTTvsy6k2tjkq4zd9LU2UkD9LSGc",
"3UNUeYDfxzUEkNvzV5Y3ysUQzZSb5bvpBVJKRqtkpavsigL9oxnnFsbPQsbAXYorZdT8NWx8TuGj2mVVWPJhCeNg",
"3UgMLGKzxoc33G3uqjFNRcESRnZvxVUBXuMd9cXdAUBjUp2DqEzqTWmA6skg4i8YsfkYy4nHqiDbU7oMUzi1RZ2j",
"4GjM64v9dZRu5weHMJuvi9JTa9xnD2yQyehyVDz2LMmukvNuRW4niPanxxFT3i9L8arZjn967fCAQoAug1qcpeJY",
"3ZtwTXfgUQ4SspkgjGH5F28feyrXB69e1m3o1MJzTv5SSZWpaT3X1FhRjKmfohDKgiXA6ddKZiqSFfHjewTLMnaJ",
"4nzpwEHwfQZXgxJjsSSvz8TiY7A88rYm2JHGz8BF5U8AzgKXNBECF6d8B2ZByrdL1TWXaKuXjNpNp8kAzCXm1JnM",
"4yBQSVmxZ9Ft9up8gv9A4G3BfvAT52rGvHLjteDuL18KvwbZE3vB1u84VxVMjEzAJSB1YuvNoqB8aoRireV7zcLn",
"w1LS56wM6aW1YSCb22buCm2hVnFdWRjLdacahL2a1iM2vu4DNc8tR68yRi31dVtVbR6ZSsmKU2zzYKeEuDNoFNw",
"55ssYzrcKRc68xMgXA8KC3QjuBNffxzAtUtvWvRSo4vdDBuoZTe8KCwHJR7vEUhKErWqWFmjMQtH9GCBKhdokxeJ"
]
}
},
{
"name": "stage",
"container_tag": "latest_v233",
"containers_per_host": 5,
"hosts": [
"134.209.186.43"
],
"user": "root",
"bootstrap": "134.209.186.43",
"external_bootstraps": [
"/ip4/164.90.171.139/tcp/7770"
],
"management_key": "12D3KooWFRgVmb1uWcmCbmJqLr8tBQghL6ysSpK2VyE2VZbaQ6wy",
"caddy": [
{
"host": "stage.fluence.dev",
"ports": [
9990, 9001, 9002, 9003, 9004, 9005,
5550, 5001, 5002, 5003, 5004, 5005,
8880, 8001, 8002, 8003, 8004, 8005
],
"addr": "134.209.186.43"
}
],
"keypairs": {
"fluence_bootstrap.yml": [
"1eye6JeqbLhXM4gF6C1n9UKMvHdp8niwH9XXh2159eJcYG3Zk4nCNSrG5qfwvkrFka3L6xVssJ2fpHTvpPPhz5p"
],
"fluence.yml": [
"3suwMJBLUSYb2LHdAdCzstEUbMzxmKj144GUJKMGp4syRF45pW249F1je2BBxwAF3pZyFSdzn88sfQNpWByHVKDC",
"e8AFF1TqoF9QTcMG12968KRn1MHemLgzTTW18ef9ji7cRYKc8GdkVcecCvDUj1GyMVvNQHU7dozE94tKz8aj6DS",
"4BMesaouUWJo6ps12sSntZMLJ2EpNivehx1o89NbnxyDbMD5JmUpeeiXSmW4ZL2cYjyJWxSKwwQGaKbHK3oqcuJo",
"4NsvdS4S3SHrh1FYNFc4881t2Pxf6WriREhbLp6vuVoXeXSi8AAHb81rPr9ju21ZYvJ8dyhYS9AHn41jgddCUiPG",
"3iX1GxpUHGJsX4srDAx4KrJSYmb3xgYqm4ZuYN6snuS9upYPF9sqEYAdPXd8cSPNTmN9QKmKnZGn7URzKUr9f9Q2",
"wBTuv6muKu1BrBrnAtwEGZykHJxnMcxYT8RxdcMwGnDNpPNktS4FoFG36tKoVfofMXHepXuW67PoB9A6T5p7tXu",
"5j7RxESUMQSGPFfegAH7MuWi1oddp7qMeA52iWMvrYdEmcoDYzKrU5vbj2vesu7AjqvsLoPhUdJssqAFs8uWAkEJ",
"4ttKiUoQ9XuSJPeRdtj6QuPBqRJZZD5XhcRM9XG8tLGRwESkPcWbjNArWnPW92kijcvYFQevhok8hrGx8DsaU7Jw",
"2kvaPrtJYHTvq52hCTNCqBo4GxbdBk62QpREQmK6QVZPUxJTo9jcJNDXDLERzgaFJV6izhTxB6ieczMpAg8uW1kb",
"3uAKK4VtrTRiEWZX6JRbGajtz59FcJpe4mMztFUYSkepegqeLKejBVgrsTPxsPpq1ueuEKsTy3jchyebzXqbPRJW",
"3FprLUcTG8HaYVqGgsoDj7GuxujJCHa3Ucu8WyqubmrspQ9ND327JT4phfmxy1nB1C93Qvw5U6qtWZ5PNxwGvZaj",
"4VeFHU452ba1cTh1WnThRQt19wHmCPnto4r3TufQWr2DJzxAbTnQJE3HpEbhc4LE4Yj7biFyuNahZznM4DnvVHQj",
"WgQqFRTGCgZS7YMhWrffdfa8sBSJbzp6gKXKPTPstUSxQGsSNJogwLGeBbk9wyRaqJMVGw3gd5MTELd5X7R96mZ",
"3fugBhX8C9LJzgRiKkSuy4vbhVjtoEQvJgjQZKFLGEAfwYo4cWGAnKwqN8QeSvLdqkPbJrbgjSAZ6pEVbEnEfuSq",
"4QSt7vayxy3ZFGkZaomvtBW7Q5zpyFXHGJk91rjvwQBH1Dj9wamvAkTSwh1V26uopGwwU7ZBY26GWvm2A3kMk3hy",
"49jZ96qanxWryTDvSXkKNzTcKW5yfKWzPnG2h4JrLzjBo5zKKL9dYUcJDyPE68kz8XjUnfPbq5CTX4uN8TDLWbac",
"2yKn9UpGS8cSWEqYKtrbCbA1Sy1fbcPwHaNsUtY6YKGZ6udDibQbbmgn2BkYXWro2D3TZs1rbuE6VzdW6msMQc7v",
"5ZHShramB9ST4M2u6rmEdoJcX6BQr9Y889sZZpjCWzBSyWaEqvxxr2uUdxNBWn7m7cHnH9rtuvvxaRJPLnB5RLX9",
"gKdiCSUr1TFGFEgu2t8Ch1XEUsrN5A2UfBLjSZvfci9SPR3NvZpACfcpPGC3eY4zma1pk7UvYv5zb1VjvPHwCjj"
]
}
}
]
}

122
fluence/deploy/docker.py Normal file
View File

@ -0,0 +1,122 @@
# Copyright 2020 Fluence Labs Limited
#
# Licensed under the Apache 'License', Version 2.0 (the "'License'");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in 'writing', 'software'
# distributed under the License is distributed on an "AS IS" ''BASIS'',
# WITHOUT WARRANTIES OR CONDITIONS OF ANY 'KIND', either express or 'implied'.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from fabric.api import *
from utils import *
from docker import *
import json
@task
@runs_once
def install_docker():
load_config()
execute(do_install_docker)
@task
@parallel
def do_install_docker():
puts("TODO: WRITE LOGGING DRIVER SETUP TO daemon.json https://docs.docker.com/config/containers/logging/json-file/")
with hide('running'):
sudo("apt-get remove --yes docker docker-engine docker.io containerd runc || true")
sudo("apt-get update")
puts("preparing to install docker")
sudo("apt-get install --yes haveged apt-transport-https ca-certificates curl gnupg-agent software-properties-common")
sudo("curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -")
sudo("apt-key fingerprint 0EBFCD88")
sudo("""add-apt-repository -y "deb [arch=amd64] https://download.docker.com/linux/ubuntu focal stable" """)
sudo("apt-get update")
puts("installing docker")
sudo("apt-get install --yes docker-ce docker-ce-cli containerd.io")
puts("installing docker-compose")
sudo("""curl -L "https://github.com/docker/compose/releases/download/1.26.0/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose """)
sudo("chmod +x /usr/local/bin/docker-compose")
@task
@runs_once
def deploy_watchdog():
load_config()
execute(do_deploy_watchdog)
@task
@parallel
def do_deploy_watchdog():
# 'running', 'output'
with hide('running', 'output'):
run("docker rm -f docker_watchdog || true")
run(
"docker run --name docker_watchdog --detach --restart=unless-stopped " +
"-e HOST={} ".format(env.host_string) +
"-e SLACK_CHANNEL='#endurance' " +
"-e SLACK_URL=SECRET " +
"-v /var/run/docker.sock:/var/run/docker.sock " +
"leonardofalk/docker-watchdog"
)
@task
@parallel
def deploy_caddy():
load_config()
target = target_environment()
for node in target['caddy']:
env.hosts = [node['addr']]
puts("node: {}".format(node))
execute(do_deploy_caddy, node['ports'], node['host'])
@task
def do_deploy_caddy(ports, host):
ip = env.host_string
fname = 'Caddyfile'
prefix = '1'
container = 'caddy'
run('rm {} || true'.format(fname))
def append(line):
run('echo "{}" >> {}'.format(line, fname))
# Generated config will be as follows:
#
# {
# email alexey@fluence.one
# }
#
# host:prefixport { # add 'prefix', e.g.: 9001 => 19001
# log {
# format console
# }
# reverse_proxy ip:port
# }
append('''
{
email alexey@fluence.one
}
''')
for port in ports:
append('''
wss://{}:{}{} {{
log {{
format console
}}
reverse_proxy wss://{}:{}
}}'''.format(host, prefix, port, ip, port))
# -p prefixport:prefixport
open_ports = " ".join("-p {}{}:{}{}".format(prefix, p, prefix, p) for p in ports)
run('docker rm -f {} || true'.format(container))
run('docker pull caddy:latest')
run('docker run --name {} -d -p 80:80 {} -v $PWD/Caddyfile:/etc/caddy/Caddyfile -v caddy_data:/data caddy:latest'.format(container, open_ports))

23
fluence/deploy/fabfile.py vendored Normal file
View File

@ -0,0 +1,23 @@
# Copyright 2018 Fluence Labs Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from fabric.api import *
from fabric.contrib.files import append
from utils import *
from docker import *
from fluence import *

187
fluence/deploy/fluence.py Normal file
View File

@ -0,0 +1,187 @@
from __future__ import with_statement
from collections import namedtuple
from fabric.api import *
from fabric.contrib.files import append
from utils import *
from compose import *
from collections import namedtuple
from time import sleep
# DTOs
Node = namedtuple("Node", "peer_id tcp ws")
Service = namedtuple("Service", "port multiaddr")
FLUENCE_NODE_PORT = "7777"
FLUENCE_CLIENT_PORT = "9999"
PEER_ID_MARKER = "server peer id"
@task
@runs_once
def deploy_fluence():
# 'running', 'output'
with hide():
load_config()
target = target_environment()
env.hosts = target["bootstrap"]
puts("Fluence: deploying bootstrap")
results = execute(deploy_bootstrap)
bootstraps = fill_addresses(results.items())
bootstrap = bootstraps[0]
env.bootstraps = map(lambda b: b.tcp.multiaddr, bootstraps)
env.hosts = target["hosts"]
puts("Fluence: deploying rest of the nodes")
results = execute(deploy_nodes)
nodes = fill_addresses(results.items())
puts("Fluence: deployed.\nAddresses:\n%s" % "\n".join(
"{} {} {}".format(n.tcp.multiaddr, n.ws.multiaddr, n.peer_id) for n in nodes))
puts("Bootstrap:\n%s" % "\n".join(
"{} {} {}".format(n.tcp.multiaddr, n.ws.multiaddr, n.peer_id) for n in bootstraps))
@task
@parallel
def deploy_bootstrap():
target = target_environment()
yml = "fluence_bootstrap.yml"
keypair = get_keypairs(yml, get_host_idx(containers=1), count=1)
gen_compose_file(
out=yml,
container_tag=target['container_tag'],
scale=1,
is_bootstrap=True,
bootstraps=target['external_bootstraps'],
host=env.host_string,
management_key=target['management_key'],
keypairs=keypair,
)
return do_deploy_fluence(yml)
@task
@parallel
def deploy_nodes():
target = target_environment()
yml = "fluence.yml"
scale = target["containers_per_host"]
keypairs = get_keypairs(yml, get_host_idx(scale), count=scale)
gen_compose_file(
out=yml,
container_tag=target['container_tag'],
scale=scale,
is_bootstrap=False,
bootstraps=env.bootstraps + target['external_bootstraps'],
host=env.host_string,
management_key=target['management_key'],
keypairs=keypairs,
)
return do_deploy_fluence(yml)
@task
@parallel
# returns {ip: Node}
def do_deploy_fluence(yml="fluence.yml"):
with hide():
compose("pull -q", yml)
compose('rm -fs', yml)
compose('up --no-start', yml) # was: 'create'
copy_configs(yml)
compose("restart", yml)
sleep(5)
addrs = get_fluence_addresses(yml)
return addrs
def get_host_idx(containers):
return env.hosts.index(env.host_string) * containers
def copy_configs(yml):
# there's no `cp` in `docker-compose`: https://github.com/docker/compose/issues/5523
put("Config.toml", "./")
containers = compose('ps -q', yml).splitlines()
for id in containers:
run('docker cp ./Config.toml %s:/Config.toml' % id)
# returns [Node]
def get_fluence_addresses(yml="fluence.yml"):
containers = compose('ps -q', yml).splitlines()
nodes = []
for id in containers:
(tcp_port, ws_port) = get_ports(id)
peer_id = get_fluence_peer_ids(id)
node = Node(peer_id=peer_id, tcp=Service(tcp_port, None), ws=Service(ws_port, None))
nodes.append(node)
return nodes
# Assuming Fluence's tcp port starts with 7
# and websocket port starts with 9
def is_fluence_port(host_port):
is_tcp = '0.0.0.0:7' in host_port
is_ws = '0.0.0.0:9' in host_port
return is_tcp or is_ws
# returns (tcp port, ws port)
def get_ports(container):
from itertools import chain
lines = run('docker port %s' % container).splitlines()
ports = chain.from_iterable(l.split('/tcp -> ') for l in lines)
# filter by host port and remove 0.0.0.0 part
ports = list(port.replace('0.0.0.0:', '') for port in ports if is_fluence_port(port))
(a, b) = ports
# tcp port starts with 7
if a.startswith('7'):
return (a, b)
else:
return (b, a)
def get_fluence_peer_ids(container, yml="fluence.yml"):
logs = run('docker logs --tail 10000 %s' % container).splitlines()
return parse_peer_ids(logs)
# returns (node_peer_id, peer_peer_id)
def parse_peer_ids(logs):
def after_eq(line):
return line.split("=")[-1].strip()
peer_id = None
for line in logs:
if PEER_ID_MARKER in line:
peer_id = after_eq(line)
return peer_id
def compose(cmd, yml="fluence.yml"):
return run('docker-compose -f %s %s' % (yml, cmd))
def service(yml):
return yml.replace(".yml", "")
# takes: dict {ip: Node}
# returns: [Node]
def fill_addresses(nodes_dict):
result = []
for ip, nodes in nodes_dict:
for node in nodes:
# node service multiaddr
node = node._replace(tcp=fill_multiaddr(ip, node.tcp))
# peer service multiaddr
node = node._replace(ws=fill_multiaddr(ip, node.ws, suffix="/ws"))
result.append(node)
return result
def fill_multiaddr(ip, service, suffix=""):
return service._replace(multiaddr="/ip4/{}/tcp/{}{}".format(ip, service.port, suffix))

3
fluence/deploy/prometheus/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
console_libraries/
consoles/

View File

@ -0,0 +1,18 @@
global:
scrape_interval: 15s
evaluation_interval: 15s
rule_files:
# - "first.rules"
# - "second.rules"
scrape_configs:
- job_name: node
static_configs:
- targets: ['localhost:19999']
- targets: ['localhost:29999']
- targets: ['localhost:39999']
# histogram_quantile(0.95, sum(irate(kademlia_exporter_random_node_lookup_duration_bucket[10s])) by (le))
# histogram_quantile(0.95, sum(rate(kademlia_exporter_ping_duration_bucket[5m])) by (le))

View File

@ -0,0 +1,13 @@
# On Alpine, you'd need to install the following before doing pip install
# apk add gcc musl-dev libffi-dev rust cargo make openssl-dev
bcrypt==3.1.7
cffi==1.14.5
cryptography==2.2.2
enum34==1.1.10
Fabric==1.14.1
ipaddress==1.0.23
paramiko==2.7.2
pycparser==2.20
PyNaCl==1.4.0
six==1.15.0

108
fluence/deploy/utils.py Normal file
View File

@ -0,0 +1,108 @@
# Copyright 2018 Fluence Labs Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import with_statement
from fabric.api import *
from fabric.utils import *
import json
import time
def ensure_docker_group(user):
from fabric.api import run
run("groupadd docker &>/dev/null || true")
run("usermod -aG docker %s || true" % user)
def chown_docker_sock(user):
from fabric.api import run
run("chmod a+r /var/run/docker.sock")
run("chown %s:docker /var/run/docker.sock" % user)
def get_docker_pgid():
from fabric.api import run
output = run("grep docker /etc/group | cut -d ':' -f 3")
output = output.stdout.splitlines()
assert len(output) == 1
return output[0]
def wait_node_started(api_port):
echo("waiting for node to start ",prefix=True)
while run('curl localhost:%s/' % api_port, quiet=True).failed:
time.sleep(0.5)
echo(".")
echo('\n')
def echo(s,prefix=False):
puts(s, end='', flush=True, show_prefix=prefix)
def get_config(environment):
file = open("deployment_config.json", "r")
info_json = file.read().rstrip()
file.close()
return json.loads(info_json)[environment]
def get_image_tag(env):
if not hasattr(env, 'image_tag'):
return "v0.3.0"
else:
return env.image_tag
# copies all necessary files for deploying
def copy_resources():
puts("Copying deployment files to node")
# cleans up old scripts
run('rm -rf scripts')
run('mkdir scripts -p')
run('mkdir scripts/functions -p')
# copy local directory `script` to remote machine
put('scripts/deploy.sh', 'scripts/')
put('scripts/node.yml', 'scripts/')
put('scripts/functions/asserts.sh', 'scripts/functions/')
put('scripts/functions/docker.sh', 'scripts/functions/')
def home_dir():
with hide('output'):
return run('echo $HOME').stdout
def load_config():
# Set to False to disable `[ip.ad.dre.ss] out:` prefix
env.output_prefix = True
cfg_file = open("deployment_config.json", "r")
env.config = json.loads(cfg_file.read().rstrip())
cfg_file.close()
target = target_environment()
# Set the username
env.user = target["user"]
if not env.hosts:
# use addresses from config as fabric hosts
env.hosts = target['hosts']
else:
puts("will use hosts: %s" % env.hosts)
def target_environment():
target = env.config["target"]
environment = filter(lambda e: e["name"] == target, env.config["environments"])
return environment[0]
def docker_tag():
return target_environment['docker_tag']
def get_keypairs(yml, idx, count):
keypairs = target_environment()['keypairs'][yml]
return keypairs[idx:idx+count]

View File

@ -1,20 +1,12 @@
#!/usr/bin/env bash
set -o pipefail -o errexit -o nounset
if [ "$#" -ne 1 ]; then
echo "$0 expects a single argument: URL of services.json" >&1
exit 1
fi
BUILTINS_DIR=./builtins/
BUILTINS_DIR=/.fluence/v1/builtins/
TMP_BUILTINS=./tmp/builtins
mkdir -p $BUILTINS_DIR
mkdir -p $TMP_BUILTINS
echo "*** download services.json ***"
curl -sL "$1" -o services.json
jq -r '
to_entries | .[] | .key, .value.url, .value.sha256, .value.version
' services.json |

22
fluence/download_fluence.sh Executable file
View File

@ -0,0 +1,22 @@
#!/usr/bin/env bash
set -o pipefail -o errexit -o nounset
jq -r '.url, .sha256, .version' fluence.json |
while
IFS=''
read -r url
read -r sha256
read -r version
do
echo "*** download $version ***"
# TODO: use --fail-with-body
curl -sL --fail $url -o /fluence || (
echo "failed to download $url" >&2
exit 1
)
echo "$sha256 /fluence" | sha256sum --check --status || (
echo "incorrect SHA256" >&2
exit 1
)
chmod +x /fluence
done

7
fluence/fluence.json Normal file
View File

@ -0,0 +1,7 @@
{
"version": "v1.6.0",
"url": "https://github.com/fluencelabs/fluence/releases/download/v1.6.0/particle-node",
"sha256": "b35617f36e0b5bb5f95d47def18aec5389185edd8ab2c3bab24a32a59e8cc9c4"
}

12
fluence/services.json Normal file
View File

@ -0,0 +1,12 @@
{
"aqua-ipfs": {
"version": "v0.4.0",
"url": "https://github.com/fluencelabs/aqua-ipfs/releases/download/v0.4.0/aqua-ipfs.tar.gz",
"sha256": "efc07354511d450819a2210debb0cc7996c1482e15213674bed025bec4d2bc94"
},
"aqua-dht": {
"version": "v0.1.37",
"url": "https://github.com/fluencelabs/aqua-dht/releases/download/v0.1.37/aqua-dht.tar.gz",
"sha256": "2d3e57c34e6140e51dc41de08806573cbc109d190643b0966f5d5b552078c7ee"
}
}