adding one-hit monitoring with prometheus, influxdb, telegraf and automatic dashboards
This commit is contained in:
parent
9487fb10aa
commit
14630e7592
13
deploy_all.sh
Executable file
13
deploy_all.sh
Executable file
@ -0,0 +1,13 @@
|
|||||||
|
## INSTALL docker-compose
|
||||||
|
## uncomment if you don't have docker-compose installed #sudo curl -L https://github.com/docker/compose/releases/download/1.21.2/docker-compose-$(uname -s)-$(uname -m) -o /usr/local/bin/docker-compose
|
||||||
|
## uncomment if you don't have docker-compose installed #sudo chmod +x /usr/local/bin/docker-compose
|
||||||
|
|
||||||
|
# START docker-compose
|
||||||
|
docker-compose up -d
|
||||||
|
|
||||||
|
# ADD DATASOURCES AND DASHBOARDS
|
||||||
|
echo "adding dashboards..."
|
||||||
|
docker exec -it -u 0 grafana /var/lib/grafana/ds/add_dashboards.sh
|
||||||
|
|
||||||
|
echo "adding datasources..."
|
||||||
|
docker exec -it -u 0 grafana /var/lib/grafana/ds/add_datasources.sh
|
@ -2,6 +2,7 @@ version: '3'
|
|||||||
|
|
||||||
networks:
|
networks:
|
||||||
public: {}
|
public: {}
|
||||||
|
private: {}
|
||||||
|
|
||||||
volumes:
|
volumes:
|
||||||
grafana_lib: {}
|
grafana_lib: {}
|
||||||
@ -15,6 +16,7 @@ services:
|
|||||||
- "8086:8086"
|
- "8086:8086"
|
||||||
networks:
|
networks:
|
||||||
- public
|
- public
|
||||||
|
- private
|
||||||
volumes:
|
volumes:
|
||||||
- ./data/influxdb:/var/lib/influxdb
|
- ./data/influxdb:/var/lib/influxdb
|
||||||
environment:
|
environment:
|
||||||
@ -27,13 +29,13 @@ services:
|
|||||||
image: grafana/grafana:5.1.3
|
image: grafana/grafana:5.1.3
|
||||||
container_name: grafana
|
container_name: grafana
|
||||||
ports:
|
ports:
|
||||||
- "3000:3000"
|
- "3001:3000"
|
||||||
networks:
|
networks:
|
||||||
- public
|
- public
|
||||||
|
- private
|
||||||
volumes:
|
volumes:
|
||||||
- grafana_lib:/var/lib/grafana
|
- grafana_lib:/var/lib/grafana
|
||||||
- grafana_ds:/var/lib/grafana/ds:rw
|
- ${PWD}/grafana/:/var/lib/grafana/ds/
|
||||||
- ${PWD}/grafana/add_datasources.sh:/var/lib/grafana/ds/add_datasources.sh
|
|
||||||
environment:
|
environment:
|
||||||
GF_AUTH_ANONYMOUS_ENABLED: "true"
|
GF_AUTH_ANONYMOUS_ENABLED: "true"
|
||||||
GF_AUTH_ANONYMOUS_ORG_ROLE: "Admin"
|
GF_AUTH_ANONYMOUS_ORG_ROLE: "Admin"
|
||||||
@ -52,3 +54,38 @@ services:
|
|||||||
environment:
|
environment:
|
||||||
# real influx host
|
# real influx host
|
||||||
INFLUXDB_URI: "http://localhost:8086"
|
INFLUXDB_URI: "http://localhost:8086"
|
||||||
|
|
||||||
|
prometheus:
|
||||||
|
image: quay.io/prometheus/prometheus:v2.0.0
|
||||||
|
container_name: prometheus
|
||||||
|
volumes:
|
||||||
|
- ./prometheus/prometheus.yml:/etc/prometheus/prometheus.yml
|
||||||
|
networks:
|
||||||
|
- private
|
||||||
|
command: "--config.file=/etc/prometheus/prometheus.yml --storage.tsdb.path=/prometheus"
|
||||||
|
ports:
|
||||||
|
- 9090:9090
|
||||||
|
depends_on:
|
||||||
|
- node_exporter
|
||||||
|
|
||||||
|
node_exporter:
|
||||||
|
image: prom/node-exporter:v0.13.0
|
||||||
|
container_name: node_exporter
|
||||||
|
volumes:
|
||||||
|
- ${PWD}/node_exporter/justrun.py:/justrun.py:rw
|
||||||
|
- ${PWD}/node_exporter/hmon:/hmon:rw
|
||||||
|
- ${PWD}/node_exporter/smoothlogging:/smoothlogging:rw
|
||||||
|
- ${PWD}/node_exporter/textfile_collector:/var/lib/node_exporter/textfile_collector/
|
||||||
|
networks:
|
||||||
|
- public
|
||||||
|
- private
|
||||||
|
environment:
|
||||||
|
DOGSNAME: "Gula&Bodka"
|
||||||
|
command:
|
||||||
|
- "--collector.textfile"
|
||||||
|
- "--collector.textfile.directory=/var/lib/node_exporter/textfile_collector/"
|
||||||
|
expose:
|
||||||
|
- 9100
|
||||||
|
ports:
|
||||||
|
- 9100:9100
|
||||||
|
|
||||||
|
13
grafana/add_dashboards.sh
Normal file → Executable file
13
grafana/add_dashboards.sh
Normal file → Executable file
@ -50,7 +50,8 @@ wait_for_api() {
|
|||||||
replace_datasource() {
|
replace_datasource() {
|
||||||
local dashboard_file=$1
|
local dashboard_file=$1
|
||||||
local datasource_name=$2
|
local datasource_name=$2
|
||||||
cmd="sed -i.bak_remove \"s/\\\${DS_INFLUXDB}/${datasource_name}/g\" ${dashboard_file}"
|
local old_datasource_name=$3
|
||||||
|
cmd="sed -i.bak_remove \"s/${old_datasource_name}/${datasource_name}/g\" ${dashboard_file}"
|
||||||
eval ${cmd} || return 1
|
eval ${cmd} || return 1
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
@ -58,22 +59,22 @@ replace_datasource() {
|
|||||||
install_dashboards() {
|
install_dashboards() {
|
||||||
local dashboard
|
local dashboard
|
||||||
|
|
||||||
for dashboard in dashboards/*.json
|
for dashboard in /var/lib/grafana/ds/dashboards/*.json
|
||||||
|
|
||||||
do
|
do
|
||||||
if [[ $(grep "\"name\": \"DS_INFLUXDB\"," ${dashboard}) ]]; then
|
if [[ $(grep "\"name\": \"DS_INFLUXDB\"," ${dashboard}) ]]; then
|
||||||
echo -e "${PURPLE}Dashboard ${dashboard} seems to be for InfluxDB datasource${NC}"
|
echo -e "${PURPLE}Dashboard ${dashboard} seems to be for InfluxDB datasource${NC}"
|
||||||
|
old_datasource_name="\\\${DS_INFLUXDB}"
|
||||||
datasource_name="influxdb"
|
datasource_name="influxdb"
|
||||||
fi
|
fi
|
||||||
if [[ $(grep "\"name\": \"DS_PROMETHEUS\"," ${dashboard}) ]]; then
|
if [[ $(grep "\"name\": \"DS_PROMETHEUS\"," ${dashboard}) ]]; then
|
||||||
echo -e "${PURPLE}Dashboard ${dashboard} seems to be for Prometheus datasource${NC}"
|
echo -e "${PURPLE}Dashboard ${dashboard} seems to be for Prometheus datasource${NC}"
|
||||||
|
old_datasource_name="\\\${DS_PROMETHEUS}"
|
||||||
datasource_name="prometheus"
|
datasource_name="prometheus"
|
||||||
fi
|
fi
|
||||||
if [[ -f "${dashboard}" ]]; then
|
if [[ -f "${dashboard}" ]]; then
|
||||||
echo -e "${LCYAN}Installing dashboard ${dashboard}${NC}"
|
echo -e "${LCYAN}Installing dashboard ${dashboard}${NC}"
|
||||||
replace_datasource ${dashboard} ${datasource_name}
|
replace_datasource ${dashboard} ${datasource_name} ${old_datasource_name}
|
||||||
# backup will be created before wrapping dashboard ^
|
|
||||||
#echo -e "{\"dashboard\": `cat $dashboard`}" > "${dashboard}.wrapped"
|
|
||||||
cp ${dashboard} ${dashboard}.wrapped
|
cp ${dashboard} ${dashboard}.wrapped
|
||||||
sed -i '1s/^/{"dashboard":\n/' ${dashboard}.wrapped
|
sed -i '1s/^/{"dashboard":\n/' ${dashboard}.wrapped
|
||||||
echo "}" >> ${dashboard}.wrapped
|
echo "}" >> ${dashboard}.wrapped
|
||||||
@ -84,7 +85,6 @@ install_dashboards() {
|
|||||||
echo -e "\n** ${RED}installation of: ${PURPLE}\"${dashboard}\"${RED} failed **${NC}"
|
echo -e "\n** ${RED}installation of: ${PURPLE}\"${dashboard}\"${RED} failed **${NC}"
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
#rm ${dashboard}.wrapped
|
|
||||||
done
|
done
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -94,4 +94,5 @@ configure_grafana() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
configure_grafana
|
configure_grafana
|
||||||
|
rm -vf /var/lib/grafana/ds/dashboards/*.{wrapped,bak_remove}
|
||||||
|
|
||||||
|
4
grafana/add_datasources.sh
Normal file → Executable file
4
grafana/add_datasources.sh
Normal file → Executable file
@ -2,7 +2,7 @@
|
|||||||
#set -e
|
#set -e
|
||||||
|
|
||||||
# ADD INFLUXDB DATASOURCE
|
# ADD INFLUXDB DATASOURCE
|
||||||
curl -s -v -H "Content-Type: application/json" \
|
curl -s -H "Content-Type: application/json" \
|
||||||
-XPOST http://admin:admin@localhost:3000/api/datasources \
|
-XPOST http://admin:admin@localhost:3000/api/datasources \
|
||||||
-d @- <<EOF
|
-d @- <<EOF
|
||||||
{
|
{
|
||||||
@ -18,7 +18,7 @@ curl -s -v -H "Content-Type: application/json" \
|
|||||||
EOF
|
EOF
|
||||||
|
|
||||||
## ADD PROMETHEUS DATASOURCE
|
## ADD PROMETHEUS DATASOURCE
|
||||||
curl -s -v -H "Content-Type: application/json" \
|
curl -s -H "Content-Type: application/json" \
|
||||||
-XPOST http://admin:admin@localhost:3000/api/datasources \
|
-XPOST http://admin:admin@localhost:3000/api/datasources \
|
||||||
-d @- <<EOF
|
-d @- <<EOF
|
||||||
{
|
{
|
||||||
|
1243
grafana/dashboards/prometheus-2-stats.json
Normal file
1243
grafana/dashboards/prometheus-2-stats.json
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
34
prometheus/prometheus.yml
Normal file
34
prometheus/prometheus.yml
Normal file
@ -0,0 +1,34 @@
|
|||||||
|
# my global config
|
||||||
|
global:
|
||||||
|
scrape_interval: 15s # By default, scrape targets every 15 seconds.
|
||||||
|
evaluation_interval: 15s # By default, scrape targets every 15 seconds.
|
||||||
|
# scrape_timeout is set to the global default (10s).
|
||||||
|
|
||||||
|
# Attach these labels to any time series or alerts when communicating with
|
||||||
|
# external systems (federation, remote storage, Alertmanager).
|
||||||
|
external_labels:
|
||||||
|
monitor: 'codelab-monitor'
|
||||||
|
|
||||||
|
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
|
||||||
|
rule_files:
|
||||||
|
# - "first.rules"
|
||||||
|
# - "second.rules"
|
||||||
|
|
||||||
|
# A scrape configuration containing exactly one endpoint to scrape:
|
||||||
|
# Here it's Prometheus itself.
|
||||||
|
scrape_configs:
|
||||||
|
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
|
||||||
|
- job_name: 'prometheus'
|
||||||
|
|
||||||
|
# Override the global default and scrape targets from this job every 5 seconds.
|
||||||
|
scrape_interval: 5s
|
||||||
|
|
||||||
|
# metrics_path defaults to '/metrics'
|
||||||
|
# scheme defaults to 'http'.
|
||||||
|
|
||||||
|
static_configs:
|
||||||
|
- targets: ['localhost:9090']
|
||||||
|
- job_name: "node"
|
||||||
|
scrape_interval: "15s"
|
||||||
|
static_configs:
|
||||||
|
- targets: ['node_exporter:9100']
|
Loading…
Reference in New Issue
Block a user