22 Commits
v0.2a ... v0.4a

Author SHA1 Message Date
Sven Holz
b41527a2e6 workflow added 2022-06-21 11:34:28 +02:00
Sven Holz
8b173ff9ee workflow added 2022-06-21 11:21:43 +02:00
Sven Holz
8407bf1250 workflow added 2022-06-21 11:19:15 +02:00
Sven Holz
b51db3e64d moved from self build docker image to gockerhub 2022-06-21 11:13:49 +02:00
Sven Holz
cb6b8e1aae add default values to env variables description 2022-06-20 17:50:43 +02:00
Sven Holz
1772f7d929 add default values to env variables description 2022-06-20 17:50:08 +02:00
Sven Holz
1850598038 add default values to env variables description 2022-06-20 17:46:30 +02:00
Sven Holz
c6e1cc3448 add default values to env variables description 2022-06-20 17:45:18 +02:00
Sven Holz
06cfd448da add default values to env variables description 2022-06-20 17:38:50 +02:00
Sven Holz
21e5627d58 ping timeout added 2022-06-20 17:06:18 +02:00
Sven Holz
7ed91947ab ping timeout added 2022-06-20 17:03:07 +02:00
Sven Holz
5c07ade336 ping timeout added 2022-06-20 15:24:17 +02:00
Sven Holz
6980539a02 ping timeout added 2022-06-20 15:21:48 +02:00
Sven Holz
99d934d041 config.ini to template 2022-06-17 00:46:26 +02:00
Sven Holz
4c5e9ceb90 fix 2022-06-17 00:46:06 +02:00
Sven Holz
5f5aea0332 renamed config.ini to config-template.ini 2022-06-17 00:36:33 +02:00
Sven Holz
b400e34c9a * v0.3
* setup-script fixed and backup added
  * fixed latency value problem (was sometimes string instead of float)
  * cleanup
2022-06-17 00:13:53 +02:00
Sven Holz
dd52e9ef41 fixes of setup script, creating better template files, ensure coorect latency output to influx 2022-06-16 23:19:53 +02:00
Sven Holz
682c943b2e fixes of setup script, creating better template files, ensure coorect latency output to influx 2022-06-16 23:19:41 +02:00
Sven Holz
c3d367c4d9 v0.2b 2022-06-15 11:59:24 +02:00
Sven Holz
fed102361d documentation cleanup 2022-06-15 11:52:46 +02:00
Sven Holz
8d139554f5 documentation cleanup 2022-06-15 11:51:45 +02:00
11 changed files with 188 additions and 42 deletions

65
.github/workflows/git2docker.yml vendored Normal file
View File

@@ -0,0 +1,65 @@
# This is a basic workflow to help you get started with Actions
name: CI to Docker Hub
# Controls when the workflow will run
##on:
# Triggers the workflow on push or pull request events but only for the "master" branch
## push:
## branches: [ "master" ]
# Only update with new tag pushed
on:
push:
tags:
- "v*.*"
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# A workflow run is made up of one or more jobs that can run sequentially or in parallel
jobs:
# This workflow contains a single job called "build"
build:
# The type of runner that the job will run on
runs-on: ubuntu-latest
steps:
- name: Cache Docker layers
uses: actions/cache@v2
with:
path: /tmp/.buildx-cache
key: ${{ runner.os }}-buildx-${{ github.sha }}
restore-keys: |
${{ runner.os }}-buildx-
- name: Check Out Repo
uses: actions/checkout@v3
- name: Login to Docker Hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
- name: Set up Docker Buildx
id: buildx
uses: docker/setup-buildx-action@v1
- name: Build and push
id: docker_build
uses: docker/build-push-action@v2
with:
context: ./Docker_Build/
file: ./Docker_Build/Dockerfile
push: true
tags: ${{ secrets.DOCKER_HUB_USERNAME }}/pylatemon:latest
cache-from: type=local,src=/tmp/.buildx-cache
cache-to: type=local,dest=/tmp/.buildx-cache
- name: Image digest
run: echo ${{ steps.docker_build.outputs.digest }}

4
.gitignore vendored
View File

@@ -1,3 +1,7 @@
.DS_Store .DS_Store
.vscode .vscode
.env .env
*.backup
grafana-datasource.yml
docker-compose.yml
config.ini

View File

@@ -23,6 +23,11 @@
# host2 = 8.8.4.4 # host2 = 8.8.4.4
# host3 = 1.1.1.1 # host3 = 1.1.1.1
[hosts_timeout] # OPTIONAL; ping timeout in seconds (float); default is '1'
# host1 = 1
# host2 = 0.5
# host3 = 0.2
[hosts_timer] # OPTIONAL; ping interval in seconds; default is '5' [hosts_timer] # OPTIONAL; ping interval in seconds; default is '5'
# host1 = 1 # host1 = 1
# host2 = 3 # host2 = 3

View File

@@ -29,7 +29,7 @@ class MyInfluxDB():
writes received data to Influxdb: writes received data to Influxdb:
host: string which includes IP or FQDN host: string which includes IP or FQDN
host_location: string which includes location of the host host_location: string which includes location of the host
ping_response: string which includes the ping reply in ms ping_response: float which includes the ping reply in ms
""" """
def __init__(self): def __init__(self):
@@ -82,7 +82,7 @@ class MyInfluxDB():
def write(self, host, host_location, ping_response): def write(self, host, host_location, ping_response):
self.host = host self.host = host
self.host_location = host_location self.host_location = host_location
self.ping_response = int(ping_response) self.ping_response = float(ping_response)
self.influx_timestamp = int(time_ns()) self.influx_timestamp = int(time_ns())
self.data_point = Point("latency_monitor").tag("location", self.host_location).tag("host", self.host).field("latency", self.ping_response).time(self.influx_timestamp) self.data_point = Point("latency_monitor").tag("location", self.host_location).tag("host", self.host).field("latency", self.ping_response).time(self.influx_timestamp)
self.write_api.write(bucket=self.INFX_BUCKET, self.write_api.write(bucket=self.INFX_BUCKET,
@@ -100,21 +100,24 @@ class ThreadPing(Thread):
---------- ----------
db: InfluxDB Object db: InfluxDB Object
host: string which includes IP or FQDN host: string which includes IP or FQDN
host_timeout: float which defines how long we wait for a reply
host_timer: integer which defines how often pings are send in seconds (min. 1) host_timer: integer which defines how often pings are send in seconds (min. 1)
host_location: string which includes location of the host host_location: string which includes location of the host
""" """
def __init__(self, db, host, host_timer, host_location):
def __init__(self, db, host, host_timeout, host_timer, host_location):
Thread.__init__(self) Thread.__init__(self)
self.MyDB = db self.MyDB = db
self.host = host self.host = host
self.host_timeout = host_timeout
self.host_timer = host_timer self.host_timer = host_timer
self.host_location = host_location self.host_location = host_location
def run(self): def run(self):
self.starttime = time() self.starttime = time()
while True: while True:
self.ping_response_list = ping(self.host, count=1) self.ping_response_list = ping(self.host, timeout=self.host_timeout, count=1)
self.ping_response = self.ping_response_list.rtt_avg_ms self.ping_response = "{:.2f}".format(self.ping_response_list.rtt_avg_ms)
self.MyDB.write(self.host, self.host_location, self.ping_response) self.MyDB.write(self.host, self.host_location, self.ping_response)
sleep(self.host_timer - ((time() - self.starttime) % 1)) sleep(self.host_timer - ((time() - self.starttime) % 1))
@@ -129,12 +132,14 @@ def main():
## IF ENVIRONMENT VARIABLES ARE PASSED IGNORE CONFIG FILE ## IF ENVIRONMENT VARIABLES ARE PASSED IGNORE CONFIG FILE
if 'TARGET_HOST' in os.environ: if 'TARGET_HOST' in os.environ:
host = os.environ['TARGET_HOST'] host = os.environ['TARGET_HOST']
host_timeout = float(os.getenv('TARGET_TIMEOUT', 1))
host_timer = int(os.getenv('TARGET_TIMER', 5)) host_timer = int(os.getenv('TARGET_TIMER', 5))
host_location = os.getenv('TARGET_LOCATION', 'unknown') host_location = os.getenv('TARGET_LOCATION', 'unknown')
# Create Thread # Create Thread
print("Creating thread for: %s, with interval: %s and location: %s" %(host, host_timer, host_location)) print("Creating thread for: %s, with timeout: %s, with interval: %s and location: %s" % (
thread = ThreadPing(MyDB, host, host_timer, host_location) host, host_timeout, host_timer, host_location))
thread = ThreadPing(MyDB, host, host_timeout, host_timer, host_location)
my_threads.append(thread) my_threads.append(thread)
thread.start() thread.start()
@@ -149,6 +154,9 @@ def main():
# Create thread for each configured host # Create thread for each configured host
for key, host in host_items: for key, host in host_items:
# Check if hosts timeout is set otherwise use "1" (means 1 seconds)
host_timeout = float(config.get('hosts_timeout', key, fallback=1))
# Check if hosts timer is set otherwise use "5" (means 5 seconds) # Check if hosts timer is set otherwise use "5" (means 5 seconds)
host_timer = int(config.get('hosts_timer', key, fallback=5)) host_timer = int(config.get('hosts_timer', key, fallback=5))
@@ -156,8 +164,8 @@ def main():
host_location = config.get('hosts_location', key, fallback="unknown") host_location = config.get('hosts_location', key, fallback="unknown")
# Create Thread # Create Thread
print("Creating thread for: %s, with interval: %s and location: %s" %(host, host_timer, host_location)) print("Creating thread for: %s, with timeout: %s, with interval: %s and location: %s" % (host, host_timeout, host_timer, host_location))
thread = ThreadPing(MyDB, host, host_timer, host_location) thread = ThreadPing(MyDB, host, host_timeout, host_timer, host_location)
my_threads.append(thread) my_threads.append(thread)
thread.start() thread.start()

View File

@@ -1,6 +1,6 @@
# Docker Based Latency Monitor # Docker Based Latency Monitor
Docker container which tracks latency of one or many hosts and reports to InfluxDBv2. Docker container(s) which tracks latency of one or many hosts and reports to InfluxDBv2.
## Description ## Description
@@ -31,7 +31,7 @@ You can use it in *standalone* or *full stack* mode.
## Requirements ## Requirements
- Docker - Docker (CE)
- Docker-Compose - Docker-Compose
- InfluxDB Version >= 2 - InfluxDB Version >= 2
- pythonping needs root privileges so same for the container - pythonping needs root privileges so same for the container
@@ -47,9 +47,9 @@ Also some influx connection options are just configurable via config file but no
### Behaviour ### Behaviour
Per default the python influx connector will cache all replies and sends them bundled every 30 seconds to the Influx DB. Per default the used python influxdb connector will cache all replies and sends them bundled every 30 seconds to the Influx DB.
The container will be build at 1st start. Actually the latency-monitor container is build on demand, a dockerhub image is on the roadmap...
You can find everything under *./Docker_Build/* and in the python program itself [latency_monitor.py](./Docker_Build/latency_monitor.py) You can find everything under *./Docker_Build/* and in the python program itself [latency_monitor.py](./Docker_Build/latency_monitor.py)
@@ -57,15 +57,16 @@ You can find everything under *./Docker_Build/* and in the python program itself
### ENV Variables ### ENV Variables
Name | Example | Usage | Option/Must Name | Example | Usage | Option/Must | Type | Default
:------: | :-----: | :-----: | :-----: :------: | :-----: | :-----: | :-----: | :-----: | :-----:
INFLUX_URL | http://10.0.0.1:8086 | InfluxDB Host | must INFLUX_URL | http://10.0.0.1:8086 | InfluxDB Host | must | URL | ---
INFLUX_TOKEN | eWOcp-MCv2Y3IJPlER7wc...ICKirhw0lwEczRNnrIoTqZAg== | InfluxDB API Token | must INFLUX_TOKEN | eWOcp-MCv2YPlER7wc...0zRNnrIoTqZAg== | InfluxDB API Token | must | String | ---
INFLUX_BUCKET | latency | InfluxDB Bucket | must INFLUX_BUCKET | latency | InfluxDB Bucket | must | String | ---
INFLUX_ORG | MyOrg | InfluxDB Organization | must INFLUX_ORG | MyOrg | InfluxDB Organization | must | String | ---
TARGET_HOST | 8.8.8.8 | Monitored Host (IP/FQDN) | must TARGET_HOST | 8.8.8.8 | Monitored Host (IP/FQDN) | must | FQDN or IP | ---
TARGET_TIMER | 3 | ping frequency in sec. | option TARGET_TIMEOUT | 0.5 | ping timeout in sec. | optional | Float >0 | **1**
TARGET_LOCATION | Google | decript. location | option TARGET_TIMER | 3 | ping frequency in sec. | optional | Int >1 | **5**
TARGET_LOCATION | Google | decript. location | optional | String |**unknown**
----- -----
@@ -75,7 +76,9 @@ TARGET_LOCATION | Google | decript. location | option
**Keep in mind it´s a OR decision not a AND** **Keep in mind it´s a OR decision not a AND**
See [./latency-monitor/config.ini](./latency-monitor/config.ini) See [./latency-monitor/config-template.ini](./latency-monitor/config-template.ini)
**ENV wins over file**
#### Docker-Compose Style #### Docker-Compose Style
@@ -97,7 +100,7 @@ docker latency-monitor -v ./latency-monitor/config.ini:/app/config.ini:ro
## Configuration (Standalone) ## Configuration (Standalone)
1st thing to do is creating the *docker-compose.yml from [docker-compose-standalone.yml](./docker-compose-standalone.yml): 1st thing to do is creating the *docker-compose.yml* from [docker-compose-standalone.yml](./docker-compose-standalone.yml):
``` ```
cp docker-compose-standalone.yml docker-compose.yml cp docker-compose-standalone.yml docker-compose.yml
@@ -119,6 +122,7 @@ in the **.env** file *(env needs to be renamed to .env)* configure following var
- YOUR_BUCKET_NAME - YOUR_BUCKET_NAME
- YOUR_ADMIN_TOKEN - YOUR_ADMIN_TOKEN
- YOUR_MONITORED_TARGET - YOUR_MONITORED_TARGET
- YOUR_MONITORED_TARGET_TIMEOUT
- YOUR_MONITORED_TARGET_TIMER - YOUR_MONITORED_TARGET_TIMER
- YOUR_MONITORED_TARGET_LOCATION - YOUR_MONITORED_TARGET_LOCATION
@@ -147,7 +151,7 @@ Just create a valid *.env* File by:
cp env .env cp env .env
``` ```
and editing it to your needs. and edit it to your needs.
After everyting within *.env* is in order just do: After everyting within *.env* is in order just do:
@@ -159,10 +163,22 @@ Everything should be right in place now.
Just the certificates are missing look [here](#certificate) Just the certificates are missing look [here](#certificate)
Now run it and mybe pick a example dashboard for grafana from [here](#grafana-dashboard-examples)
#### BACKUPS FILES ???
The script will backup following files if found:
- *./docker-compose.yml*
- *./grafana/provisioning/datasources/grafana-datasource.yml*
----- -----
----- -----
### WTF maual mode ### WTF manual mode
REALLY???
You need to set all on your own: You need to set all on your own:
@@ -177,7 +193,7 @@ You need to configure Variables in following files to make the compose work:
----- -----
- **docker-compose.yml** *(was docker-compose-full_stack.yml before)* - **docker-compose.yml** *(generated from docker-compose-full_stack.yml)*
- PLACE_YOUR_FQDN_HERE (3 times) - PLACE_YOUR_FQDN_HERE (3 times)
----- -----
@@ -190,12 +206,13 @@ You need to configure Variables in following files to make the compose work:
- YOUR_BUCKET_NAME - YOUR_BUCKET_NAME
- YOUR_ADMIN_TOKEN - YOUR_ADMIN_TOKEN
- YOUR_MONITORED_TARGET - YOUR_MONITORED_TARGET
- YOUR_MONITORED_TARGET_TIMEOUT
- YOUR_MONITORED_TARGET_TIMER - YOUR_MONITORED_TARGET_TIMER
- YOUR_MONITORED_TARGET_LOCATION - YOUR_MONITORED_TARGET_LOCATION
----- -----
- **grafana/provisioning/datasources/grafana-datasource.yml** - **grafana/provisioning/datasources/grafana-datasource.yml** *(generated from grafana/grafana-datasource-template.yml)*
- YOUR_ADMIN_TOKEN - YOUR_ADMIN_TOKEN
- YOUR_ORGANIZATION - YOUR_ORGANIZATION
- YOUR_BUCKET_NAME - YOUR_BUCKET_NAME
@@ -228,6 +245,8 @@ Everything should be right in place now.
Just the certificates are missing look [here](#certificate) Just the certificates are missing look [here](#certificate)
Now just start over and maybe pick an example dashboard for grafana from [here](#grafana-dashboard-examples)
----- -----
----- -----
@@ -253,7 +272,7 @@ Thats it
## Grafana Dashboard Examples ## Grafana Dashboard Examples
Within the local path *./examples/grafana/* you can find example *.json* files which can be imported to grafana as dashboards to give you a first point to start with. Within the local path [./examples/grafana/](./examples/grafana/) you can find example *.json* files which can be imported to grafana as dashboards to give you a first point to start with.
----- -----
@@ -274,6 +293,27 @@ Contributors names and contact info
## Version History ## Version History
- v0.4
- moved from self build iamge to dockerhub
- v0.3a
- ping timeout added
- cleanup
* v0.3
* setup-script fixed and backup added
* fixed latency value problem (was sometimes string instead of float)
* cleanup
* v0.2b
* cleanup
* v0.2a
* fixed some missing variables
* fixe a missing integer declaration in latency-monitor
* added automatic config creation for full-stack
* cleanups
* v0.1 * v0.1
* Initial Release * Initial Release

View File

@@ -120,7 +120,7 @@ services:
latency-monitor: latency-monitor:
container_name: latency-monitor container_name: latency-monitor
hostname: latency-monitor hostname: latency-monitor
build: ./Docker_Build image: planetespresso/pylatemon
depends_on: depends_on:
- influxdb - influxdb
restart: always restart: always
@@ -134,6 +134,7 @@ services:
- INFLUX_BUCKET - INFLUX_BUCKET
- INFLUX_ORG - INFLUX_ORG
- TARGET_HOST - TARGET_HOST
- TARGET_TIMEOUT
- TARGET_TIMER - TARGET_TIMER
- TARGET_LOCATION - TARGET_LOCATION
networks: networks:

View File

@@ -6,7 +6,7 @@ services:
latency-monitor: latency-monitor:
container_name: latency-monitor container_name: latency-monitor
hostname: latency-monitor hostname: latency-monitor
build: ./Docker_Build image: planetespresso/pylatemon
restart: always restart: always
volumes: volumes:
# - ./latency-monitor/config.ini:/app/config.ini:ro # UNCOMMENT IF NEEDED # - ./latency-monitor/config.ini:/app/config.ini:ro # UNCOMMENT IF NEEDED

1
env
View File

@@ -36,5 +36,6 @@ INFLUX_TOKEN=YOUR_ADMIN_TOKEN
INFLUX_BUCKET=YOUR_BUCKET_NAME INFLUX_BUCKET=YOUR_BUCKET_NAME
INFLUX_ORG=YOUR_ORGANIZATION INFLUX_ORG=YOUR_ORGANIZATION
TARGET_HOST=YOUR_MONITORED_TARGET TARGET_HOST=YOUR_MONITORED_TARGET
TARGET_TIMEOUT=YOUR_MONITORED_TARGET_TIMEOUT
TARGET_TIMER=YOUR_MONITORED_TARGET_TIMER TARGET_TIMER=YOUR_MONITORED_TARGET_TIMER
TARGET_LOCATION=YOUR_MONITORED_TARGET_LOCATION TARGET_LOCATION=YOUR_MONITORED_TARGET_LOCATION

View File

@@ -23,6 +23,11 @@
# host2 = 8.8.4.4 # host2 = 8.8.4.4
# host3 = 1.1.1.1 # host3 = 1.1.1.1
[hosts_timeout] # OPTIONAL; ping timeout in seconds (float); default is '1'
# host1 = 1
# host2 = 0.5
# host3 = 0.2
[hosts_timer] # OPTIONAL; ping interval in seconds; default is '5' [hosts_timer] # OPTIONAL; ping interval in seconds; default is '5'
# host1 = 1 # host1 = 1
# host2 = 3 # host2 = 3

View File

@@ -1,24 +1,37 @@
#!/bin/bash #!/bin/bash
# Check if .env allready exists Date=`date +%Y%m%d_%H%M%s`
if ! test -e .env; then
echo "FAIL: You need to copy file env to .env and edit it!!!"
exit 1
fi
# copy compose template to final compose file (OVERWRITTEN!!!)
cp -f ./docker-compose-full_stack.yml ./docker-compose.yml
# locate my path # locate my path
MyScriptPath=`dirname $0` MyScriptPath=`dirname $0`
MyScriptPathContainer="$MyScriptPath/CONTAINER/" MyScriptPathContainer="$MyScriptPath/CONTAINER/"
# Check if .env allready exists
if ! test -e $MyScriptPath/.env; then
echo "FAIL: You need to copy file env to .env and edit it!!!"
exit 1
fi
# backup old compose files
cp -f $MyScriptPath/docker-compose.yml $MyScriptPath/docker-compose-$Date.backup
# copy compose template to final compose file (OVERWRITTEN!!!)
cp -f $MyScriptPath/docker-compose-full_stack.yml $MyScriptPath/docker-compose.yml
# Make relevant grafana templating direcotries
echo "MKDIR: creating $MyScriptPath/grafana/provisioning/datasources"
mkdir -p $MyScriptPath/grafana/provisioning/datasources
# backup old grafana datasource file
cp -f $MyScriptPath/grafana/provisioning/datasources/grafana-datasource.yml $MyScriptPath/grafana/grafana-datasource-$Date.backup
# copy grafana datasource file template to grafana datasource file
cp -f $MyScriptPath/grafana/grafana-datasource-template.yml $MyScriptPath/grafana/provisioning/datasources/grafana-datasource.yml
# Replace .env MyPath Path with local path if NOT changed # Replace .env MyPath Path with local path if NOT changed
sed -i -e "s#/YOUR_PATH_TO_CONTAINER_STATIC_DATA#$MyScriptPathContainer#g" .env sed -i -e "s#/YOUR_PATH_TO_CONTAINER_STATIC_DATA#$MyScriptPathContainer#g" $MyScriptPath/.env
# Read variables from .env file # Read variables from .env file
source .env source $MyScriptPath/.env
echo "INFO: MyPath is $MyPath" echo "INFO: MyPath is $MyPath"
@@ -35,6 +48,8 @@ echo "CHANGE: replace PLACE_YOUR_FQDN_HERE with $MyFQDN in $MyScriptPath/docker-
sed -i -e "s/PLACE_YOUR_FQDN_HERE/$MyFQDN/g" $MyScriptPath/docker-compose.yml sed -i -e "s/PLACE_YOUR_FQDN_HERE/$MyFQDN/g" $MyScriptPath/docker-compose.yml
# Changes in grafana/provisioning/datasources/grafana-datasource.yml # Changes in grafana/provisioning/datasources/grafana-datasource.yml
echo "CHANGE: replace YOUR_INFLUXDB_URL with $INFLUX_URL in $MyScriptPath/grafana/provisioning/datasources/grafana-datasource.yml"
sed -i -e "s#YOUR_INFLUXDB_URL#$INFLUX_URL#g" $MyScriptPath/grafana/provisioning/datasources/grafana-datasource.yml
echo "CHANGE: replace YOUR_ADMIN_TOKEN with $INFLUX_TOKEN in $MyScriptPath/grafana/provisioning/datasources/grafana-datasource.yml" echo "CHANGE: replace YOUR_ADMIN_TOKEN with $INFLUX_TOKEN in $MyScriptPath/grafana/provisioning/datasources/grafana-datasource.yml"
sed -i -e "s/YOUR_ADMIN_TOKEN/$INFLUX_TOKEN/g" $MyScriptPath/grafana/provisioning/datasources/grafana-datasource.yml sed -i -e "s/YOUR_ADMIN_TOKEN/$INFLUX_TOKEN/g" $MyScriptPath/grafana/provisioning/datasources/grafana-datasource.yml
echo "CHANGE: replace YOUR_ORGANIZATION with $INFLUX_ORG in $MyScriptPath/grafana/provisioning/datasources/grafana-datasource.yml" echo "CHANGE: replace YOUR_ORGANIZATION with $INFLUX_ORG in $MyScriptPath/grafana/provisioning/datasources/grafana-datasource.yml"
@@ -45,6 +60,8 @@ sed -i -e "s/YOUR_BUCKET_NAME/$INFLUX_BUCKET/g" $MyScriptPath/grafana/provisioni
# Correct owner and permissions to satisfy the containers # Correct owner and permissions to satisfy the containers
echo "CHMOD: chmod -R 755 $MyPath" echo "CHMOD: chmod -R 755 $MyPath"
chmod -R 755 $MyPath chmod -R 755 $MyPath
echo "CHMOD: chmod -R 755 $MyScriptPath/grafana/provisioning"
chmod -R 755 $MyScriptPath/grafana/provisioning
echo "CHMOD: chmod 644 $MyScriptPath/grafana/provisioning/datasources/grafana-datasource.yml" echo "CHMOD: chmod 644 $MyScriptPath/grafana/provisioning/datasources/grafana-datasource.yml"
chmod 644 $MyScriptPath/grafana/provisioning/datasources/grafana-datasource.yml chmod 644 $MyScriptPath/grafana/provisioning/datasources/grafana-datasource.yml
echo "CHOWN: chown -R 472.472 $MyPath/grafana/var_lib" echo "CHOWN: chown -R 472.472 $MyPath/grafana/var_lib"