Skip to content
Snippets Groups Projects
Commit 87940c3f authored by Nikolay Stanchev's avatar Nikolay Stanchev
Browse files

Merging integration into clmcservice

parents a050d439 77a5cae1
No related branches found
No related tags found
No related merge requests found
Showing
with 2790 additions and 313 deletions
......@@ -54,10 +54,10 @@ test:all:
- sudo cp build/clmcservice-SNAPSHOT.tar.gz /var/lib/lxc/test-runner/rootfs/vagrant/build
- sudo lxc-attach -n test-runner -- pip3 install /vagrant/build/clmctest-SNAPSHOT.tar.gz
- sudo lxc-attach -n test-runner -- pip3 install /vagrant/build/clmcservice-SNAPSHOT.tar.gz
- sudo lxc-attach -n test-runner -- pytest -s --pyargs clmctest.scripts
- sudo lxc-attach -n test-runner -- pytest -s --pyargs clmcservice.tests
- sudo lxc-attach -n test-runner -- pytest -s --pyargs clmctest.inputs
- sudo lxc-attach -n test-runner -- pytest -s --pyargs clmctest.monitoring
- sudo lxc-attach -n test-runner -- pytest -s --tb=short -rfp --pyargs clmctest.scripts
- sudo lxc-attach -n test-runner -- pytest -s --tb=short -rfp --pyargs clmcservice.tests
- sudo lxc-attach -n test-runner -- pytest -s --tb=short -rfp --pyargs clmctest.inputs
- sudo lxc-attach -n test-runner -- pytest -s --tb=short -rfp --pyargs clmctest.monitoring
when: on_success
clean:
......
......@@ -17,6 +17,21 @@ sed -i s/10.0.3/172.40.231/g /etc/default/lxc-net
sed -i s/#LXC_DHCP_CONFILE/LXC_DHCP_CONFILE/g /etc/default/lxc-net
service lxc-net restart
# enable NTP
# use network time to make sure we are synchronised
echo "Disabling timesyncd..."
timedatectl set-ntp no
until timedatectl | grep -m 1 "Network time on: no";
do
echo "Waiting for timesyncd to turn off.."
sleep 1
done
apt-get install ntp
echo "timesync set to ntpd"
# set timezone to London
timedatectl set-timezone Europe/London
SCRIPT
Vagrant.configure("2") do |config|
......
......@@ -17,6 +17,21 @@ sed -i s/10.0.3/172.40.231/g /etc/default/lxc-net
sed -i s/#LXC_DHCP_CONFILE/LXC_DHCP_CONFILE/g /etc/default/lxc-net
service lxc-net restart
# enable NTP
# use network time to make sure we are synchronised
echo "Disabling timesyncd..."
timedatectl set-ntp no
until timedatectl | grep -m 1 "Network time on: no";
do
echo "Waiting for timesyncd to turn off.."
sleep 1
done
apt-get install ntp
echo "timesync set to ntpd"
# set timezone to London
timedatectl set-timezone Europe/London
SCRIPT
Vagrant.configure("2") do |config|
......
......@@ -47,23 +47,23 @@ INFLUXDB_URL=$8
DATABASE_NAME=$9
TELEGRAF_CONF_DIR="/etc/telegraf"
TELEGRAF_CONF_FILE=$TELEGRAF_CONF_DIR"/telegraf.conf"
TELEGRAF_INCLUDE_CONF_DIR=$TELEGRAF_CONF_DIR"/telegraf.d"
TELEGRAF_OUTPUT_CONF_FILE=$TELEGRAF_INCLUDE_CONF_DIR"/telegraf_output.conf"
TELEGRAF_CONF_FILE=${TELEGRAF_CONF_DIR}"/telegraf.conf"
TELEGRAF_INCLUDE_CONF_DIR=${TELEGRAF_CONF_DIR}"/telegraf.d"
TELEGRAF_OUTPUT_CONF_FILE=${TELEGRAF_INCLUDE_CONF_DIR}"/telegraf_output.conf"
#cat ${TELEGRAF_OUTPUT_CONF_FILE}
# Replace template parameters on general configuration
sed -i 's/$LOCATION/'$LOCATION'/g' $TELEGRAF_CONF_FILE
sed -i 's/$SFC_ID/'$SFC_ID'/g' $TELEGRAF_CONF_FILE
sed -i 's/$SFC_ID_INSTANCE/'$SFC_ID_INSTANCE'/g' $TELEGRAF_CONF_FILE
sed -i 's/$SF_ID/'$SF_ID'/g' $TELEGRAF_CONF_FILE
sed -i 's/$SF_ID_INSTANCE}}/'$SF_ID_INSTANCE'/g' $TELEGRAF_CONF_FILE
sed -i 's/$IP_ENDPOINT_ID/'$IP_ENDPOINT_ID'/g' $TELEGRAF_CONF_FILE
sed -i 's/$SR_ID/'$SR_ID'/g' $TELEGRAF_CONF_FILE
sed -i 's/${LOCATION}/'${LOCATION}'/g' ${TELEGRAF_CONF_FILE}
sed -i 's/${SFC_ID}/'${SFC_ID}'/g' ${TELEGRAF_CONF_FILE}
sed -i 's/${SFC_ID_INSTANCE}/'${SFC_ID_INSTANCE}'/g' ${TELEGRAF_CONF_FILE}
sed -i 's/${SF_ID}/'${SF_ID}'/g' ${TELEGRAF_CONF_FILE}
sed -i 's/${SF_ID_INSTANCE}/'${SF_ID_INSTANCE}'/g' ${TELEGRAF_CONF_FILE}
sed -i 's/${IP_ENDPOINT_ID}/'${IP_ENDPOINT_ID}'/g' ${TELEGRAF_CONF_FILE}
sed -i 's/${SR_ID}/'${SR_ID}'/g' ${TELEGRAF_CONF_FILE}
echo "Telegraf Output Configuration File: ${TELEGRAF_OUTPUT_CONF_FILE}"
# Replace parameters in output configuration file
sed -i 's|$INFLUXDB_URL|'$INFLUXDB_URL'|g' $TELEGRAF_OUTPUT_CONF_FILE
sed -i 's/$DATABASE_NAME/'$DATABASE_NAME'/g' $TELEGRAF_OUTPUT_CONF_FILE
\ No newline at end of file
sed -i 's|${INFLUXDB_URL}|'${INFLUXDB_URL}'|g' ${TELEGRAF_OUTPUT_CONF_FILE}
sed -i 's/${DATABASE_NAME}/'${DATABASE_NAME}'/g' ${TELEGRAF_OUTPUT_CONF_FILE}
\ No newline at end of file
#!/bin/bash
#/////////////////////////////////////////////////////////////////////////
#//
#// (c) University of Southampton IT Innovation Centre, 2017
#//
#// Copyright in this software belongs to University of Southampton
#// IT Innovation Centre of Gamma House, Enterprise Road,
#// Chilworth Science Park, Southampton, SO16 7NS, UK.
#//
#// This software may not be used, sold, licensed, transferred, copied
#// or reproduced in whole or in part in any manner or form or in or
#// on any media by any person other than in accordance with the terms
#// of the Licence Agreement supplied with the software, or otherwise
#// without the prior written consent of the copyright owners.
#//
#// This software is distributed WITHOUT ANY WARRANTY, without even the
#// implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
#// PURPOSE, except where stated in the Licence Agreement supplied with
#// the software.
#//
#// Created By : Michael Boniface
#// Created Date : 18/12/2017
#// Created for Project : FLAME
#//
#/////////////////////////////////////////////////////////////////////////
TELEGRAF_CONF_DIR="/etc/telegraf"
TELEGRAF_CONF_FILE=$TELEGRAF_CONF_DIR"/telegraf.conf"
TELEGRAF_INCLUDE_CONF_DIR=$TELEGRAF_CONF_DIR"/telegraf.d"
TELEGRAF_OUTPUT_CONF_FILE=$TELEGRAF_INCLUDE_CONF_DIR"/telegraf_output.conf"
echo "Checking Telegraf installation"
# Check the target telegraf directory exists
if [ ! -d "$TELEGRAF_CONF_DIR" ]; then
echo "Error: Telegraf conf directory does not exist on target machine. Check that telegraf is installed "$TELEGRAF_CONF_DIR
exit 1
fi
# Check the target telegraf directory exists
if [ ! -d $TELEGRAF_INCLUDE_CONF_DIR ]; then
echo "Error: Telegraf conf include directory does not exist on target machine. Check that telegraf is installed "$TELEGRAF_INCLUDE_CONF_DIR
exit 1
fi
# Copy configuration
echo "Telegraf general config file: " $TELEGRAF_CONF_FILE
(cat <<'EOF'
[global_tags]
location="$LOCATION"
sfc="$SFC_ID"
sfc_i="$SFC_ID_INSTANCE"
sf="$SF_ID"
sf_i="$SF_ID_INSTANCE"
ipendpoint="$IP_ENDPOINT_ID"
sr="$SR_ID"
[agent]
interval = "10s"
round_interval = true
metric_buffer_limit = 1000
flush_buffer_when_full = true
collection_jitter = "0s"
flush_interval = "10s"
flush_jitter = "0s"
debug = false
quiet = false
logfile = "/var/log/telegraf/telegraf.log"
hostname = ""
EOF
) > $TELEGRAF_CONF_FILE
echo "Telegraf output config file: " $TELEGRAF_OUTPUT_CONF_FILE
(cat <<'EOF'
[[outputs.influxdb]]
urls = ["$INFLUXDB_URL"]
database = "$DATABASE_NAME"
precision = "s"
timeout = "5s"
EOF
) > $TELEGRAF_OUTPUT_CONF_FILE
......@@ -32,7 +32,9 @@ apt-get install wget -y
echo "Installing Telegraf agent"
TELEGRAF_VERSION=1.7.0~5618bb0-0
TELEGRAF_VERSION=1.8.0~2736fa0-0
#TELEGRAF_VERSION=1.7.0~5618bb0-0
TELEGRAF_CHECKSUM=dc24932fa1aef9392582880c077dd2493b9f2c66babd7733a0654540bbb5003b
# Install telegraf
......@@ -40,7 +42,7 @@ TELEGRAF_CHECKSUM=dc24932fa1aef9392582880c077dd2493b9f2c66babd7733a0654540bbb500
# load the runtime configuration for the artefact repository
if [ ! -f ${REPO_ROOT}/reporc ]; then
echo "Cannot download FLIPS binaries as reporc file containing artefact repository credentials does not exist within user's home folder"
echo "Cannot download telegraf binary: reporc file containing artefact repository credentials does not exist in ${REPO_ROOT}/reporc"
exit 1
fi
source ${REPO_ROOT}/reporc
......@@ -48,10 +50,11 @@ source ${REPO_ROOT}/reporc
wget --user ${REPO_USER} --password ${REPO_PASS} https://flame-nexus.it-innovation.soton.ac.uk/repository/flame-general/it-innovation/telegraf/${TELEGRAF_VERSION}/telegraf-${TELEGRAF_VERSION}.deb -O telegraf-${TELEGRAF_VERSION}.deb
#sha256sum telegraf_${TELEGRAF_VERSION}_amd64.deb | grep $TELEGRAF_CHECKSUM > /dev/null
#if [ $? == 1 ]; then
# echo "Telegraf download failed checksum"
# exit 1
#fi
dpkg -i telegraf-${TELEGRAF_VERSION}.deb
# add telegraf to run as root for systemctl input plugin
# this should not be required as the "systemctl status" command is available to no priviledged users
# issued raised
sed -i s/User=telegraf/User=root/g /lib/systemd/system/telegraf.service
systemctl daemon-reload
systemctl restart telegraf.service
\ No newline at end of file
......@@ -33,19 +33,19 @@
# Global tags can be specified here in key="value" format.
[global_tags]
# location of the data centre
location="$LOCATION"
location="${LOCATION}"
# media service template id
sfc="$SFC_ID"
sfc="${SFC_ID}"
# media service instance
sfc_i="$SFC_ID_INSTANCE"
sfc_i="${SFC_ID_INSTANCE}"
# service function type
sf="$SF_ID"
sf="${SF_ID}"
# service function instance id
sf_i="$SF_ID_INSTANCE"
sf_i="${SF_ID_INSTANCE}"
# ipendpoint id aka surrogate instance
ipendpoint="$IP_ENDPOINT_ID"
ipendpoint="${IP_ENDPOINT_ID}"
# the service router providing access to the network
sr="$SR_ID"
sr="${SR_ID}"
# Configuration for telegraf agent
[agent]
......
......@@ -29,9 +29,9 @@
# Multiple urls can be specified but it is assumed that they are part of the same
# cluster, this means that only ONE of the urls will be written to each interval.
# urls = ["udp://127.0.0.1:8089"] # UDP endpoint example
urls = ["$INFLUXDB_URL"] # required
urls = ["${INFLUXDB_URL}"] # required
# The target database for metrics (telegraf will create it if not exists)
database = "$DATABASE_NAME" # required
database = "${DATABASE_NAME}" # required
# Precision of writes, valid values are "ns", "us" (or "µs"), "ms", "s", "m", "h".
# note: using second precision greatly helps InfluxDB compression
precision = "s"
......
#!/bin/bash
# Get command line parameters
if [ "$#" -ne 3 ]; then
echo "Error: illegal number of arguments: "$#
echo "Usage: install.sh INFLUX_URL DATABASE_NAME REPORT_PERIOD"
exit 1
fi
INFLUX_URL=$1
DATABASE_NAME=$2
REPORT_PERIOD=$3
## CLMC-SERVICE
## ----------------------------------------------------------------------------------
echo "----> Configuring virtualenvwrapper"
export WORKON_HOME=$HOME/.virtualenvs
source /usr/local/bin/virtualenvwrapper.sh
# check the mkvirtualenv with a return value of 1 if version comes back correctly
mkvirtualenv --version
if [ $? -ne 1 ] ; then
echo "Failed: installing virtualenvwrapper"
exit 1
fi
# create CLMC virtual environment - and check
echo "----> Making CLMC Python environment"
mkvirtualenv CLMC
if [ $? -ne 0 ] ; then
echo "Failed: creating CLMC python environment"
exit 1
fi
# switch the CLMC environment - and check
echo "----> Switching to use CLMC python environment"
workon CLMC
if [ $? -ne 0 ] ; then
echo "Failed: switching to CLMC python environment"
exit 1
fi
# install tox - and check
echo "----> Installing TOX"
pip3 install tox
tox --version
if [ $? -ne 0 ] ; then
echo "Failed: installing tox"
exit 1
fi
# navigate to the clmc-webservice - and check
echo "----> Moving to CLMC webservice"
cd ${REPO_ROOT}/src/service
if [ $? -ne 0 ] ; then
echo "Failed: could not find clmc-webservice"
exit 1
fi
# running tests using tox
echo "----> Running tox"
TOX_OUTPUT="$(tox)"
# check if tox output contains the 'congratulations :)' bit for tests passed
if [[ $TOX_OUTPUT != *"congratulations :)"* ]]; then
echo $TOX_OUTPUT
echo "CLMC service unit tests failed."
exit 1
fi
echo "----> Tox execution of unit tests passed successfully"
# install the service
echo "----> Installing CLMC web service"
pip3 install .
if [ $? -ne 0 ] ; then
echo "Failed: installing clmc-webservice"
exit 1
fi
# create directory for CLMC service logs
echo "----> Creating CLMC web service log directory"
mkdir -p /var/log/flame/clmc
# Install minioclmc as systemctl service
# -----------------------------------------------------------------------
mkdir -p /opt/flame/clmc
start_script_file="/opt/flame/clmc/start.sh"
echo "#!/bin/bash" > $start_script_file
echo "export WORKON_HOME=${HOME}/.virtualenvs" >> $start_script_file
echo "source /usr/local/bin/virtualenvwrapper.sh" >> $start_script_file
echo "workon CLMC" >> $start_script_file
echo "pserve ${REPO_ROOT}/src/service/production.ini &" >> $start_script_file
chmod 755 $start_script_file
file="/lib/systemd/system/flameclmc.service"
echo "[Unit]" > $file
echo "Description=flameclmc" >> $file
echo "After=network.target" >> $file
echo "" >> $file
echo "[Service]" >> $file
echo "Type=forking" >> $file
echo "ExecStart=${start_script_file}" >> $file
echo "" >> $file
echo "[Install]" >> $file
echo "WantedBy=multi-user.target" >> $file
systemctl daemon-reload
systemctl enable flameclmc.service
systemctl start flameclmc.service
# wait for the clmc service to start
while ! nc -z localhost 9080
do
echo "Waiting for clmc service port 9080 to be ready on localhost..."
sleep 5
done
# configure the CLMC service
JSON="{\"aggregator_report_period\": ${REPORT_PERIOD}, \"aggregator_database_name\": \"${DATABASE_NAME}\", \"aggregator_database_url\": \"${INFLUX_URL}\"}"
echo "CONFIG JSON=${JSON}"
curl -H 'Content-Type: application/json' -X PUT -d "${JSON}" http://localhost:9080/aggregator/config
# start the aggregator
JSON="{\"action\": \"start\"}"
echo "START ACTION JSON=${JSON}"
curl -H 'Content-Type: application/json' -X PUT -d "${JSON}" http://localhost:9080/aggregator/control
\ No newline at end of file
#!/bin/bash
# status of neo4j can be found using: journalctl -e -u neo4j
# Directories:
#/etc/neo4j/neo4j.conf
#/var/lib/neo4j/data
#/var/log/neo4j
#/var/lib/neo4j/metrics
#/var/lib/neo4j/import
#/usr/bin
#/usr/share/neo4j/lib
#/var/lib/neo4j/plugins
#
#admin tool is neo4j-admin
#
# bolt 7687
# http 7474 including the browser on localhost:7474
# https 7473
sudo apt update
sudo apt install wget openjdk-8-jdk -y
# configure apt
wget -O - https://debian.neo4j.org/neotechnology.gpg.key | sudo apt-key add -
echo 'deb https://debian.neo4j.org/repo stable/' | sudo tee -a /etc/apt/sources.list.d/neo4j.list
apt-get update
# add neo4j user with sudo permissions
useradd --comment 'neo4j' --create-home neo4j --shell /bin/bash
echo "neo4j ALL=(ALL) NOPASSWD:ALL" >> /etc/sudoers
# install neo4j as neo4j user
# when installed as root the systemctl service fails to start as it expects to be run as neo4j user
# there's some directories owned as root that neo4j wants to write to but cannot
# the following set of commands install everything as neo4j with some run as sudo.
# Expected to only have to run the install as neo4j however it does not work unless all of these are
# run as neo4j. hence the weird su neo4j -c "sudo..."
su neo4j -c "sudo apt-get install neo4j=1:3.4.0 -y"
su neo4j -c "sed -i s/\#dbms.connectors.default_listen_address=0.0.0.0/dbms.connectors.default_listen_address=0.0.0.0/g /etc/neo4j/neo4j.conf"
su neo4j -c "sed -i s/\#dbms.connector.bolt.listen_address=:7687/dbms.connector.bolt.listen_address=0.0.0.0:7687/g /etc/neo4j/neo4j.conf"
# set initial password before starting
su neo4j -c "neo4j-admin set-initial-password admin"
# install service
su neo4j -c "sudo systemctl enable neo4j"
su neo4j -c "sudo systemctl start neo4j"
### waiting for the service to start
end="$((SECONDS+60))"
while true; do
nc -w 2 localhost 7687 && break
[[ "${SECONDS}" -ge "${end}" ]] && exit 1
sleep 1
done
apt-get -y install python3 python3-pip
update-alternatives --install /usr/bin/python python /usr/bin/python3 10
apt-get update
pip3 install influxdb py2neo
#!/bin/bash
echo "----> Installing Tick Stack"
# Define tickstack software versions
INFLUX_VERSION=1.5.2
INFLUX_CHECKSUM=42fede7b497bdf30d4eb5138db218d1add986fca4fce4a8bcd9c7d6dabaf572a
KAPACITOR_VERSION=1.4.1
KAPACITOR_CHECKSUM=eea9b215f241906570eafe3857e1d4c5
CHRONOGRAF_VERSION=1.4.4.2
CHRONOGRAF_CHECKSUM=eea6915aa6db8f134fcd3b095e863b773bfb3a16a26e346dd65904a07df97963
# install virtualenvwrapper to manage python environments - and check
apt-get update
echo "----> Installing Python3 and Pip3"
apt-get install -y python3 python3-pip wget curl
update-alternatives --install /usr/bin/python python /usr/bin/python3 10
echo "----> Installing virtualenv and wrapper"
apt-get install -y python3-virtualenv virtualenvwrapper
pip3 install virtualenv
pip3 install virtualenvwrapper
# install influx
echo "----> Installing InfluxDB"
wget https://dl.influxdata.com/influxdb/releases/influxdb_${INFLUX_VERSION}_amd64.deb 2> /dev/null
sha256sum influxdb_${INFLUX_VERSION}_amd64.deb | grep $INFLUX_CHECKSUM > /dev/null
if [ $? == 1 ]; then
echo "influx download failed checksum"
exit 1
fi
dpkg -i influxdb_${INFLUX_VERSION}_amd64.deb
# install kapacitor
echo "----> Installing Kapacitor"
wget https://dl.influxdata.com/kapacitor/releases/kapacitor_${KAPACITOR_VERSION}_amd64.deb 2> /dev/null
md5sum kapacitor_${KAPACITOR_VERSION}_amd64.deb | grep $KAPACITOR_CHECKSUM > /dev/null
if [ $? == 1 ]; then
echo "Kapacitor download failed checksum"
exit 1
fi
dpkg -i kapacitor_${KAPACITOR_VERSION}_amd64.deb
# install Chronograf
echo "----> Installing Chronograph"
wget https://dl.influxdata.com/chronograf/releases/chronograf_${CHRONOGRAF_VERSION}_amd64.deb 2> /dev/null
sha256sum chronograf_${CHRONOGRAF_VERSION}_amd64.deb | grep $CHRONOGRAF_CHECKSUM > /dev/null
if [ $? == 1 ]; then
echo "Chronograf download failed checksum"
exit 1
fi
dpkg -i chronograf_${CHRONOGRAF_VERSION}_amd64.deb
systemctl start influxdb
systemctl start kapacitor
systemctl start chronograf
......@@ -27,187 +27,11 @@
# Force fail on command fail (off for now as virtualenvwrapper install fails)
# set -euo pipefail
# Ensure everything runs in directory of the parent script
cd `dirname $0`
echo "Configuring CLMC service"
# Get command line parameters
if [ "$#" -ne 3 ]; then
echo "Error: illegal number of arguments: "$#
echo "Usage: install.sh INFLUX_URL DATABASE_NAME REPORT_PERIOD"
exit 1
fi
INFLUX_URL=$1
DATABASE_NAME=$2
REPORT_PERIOD=$3
# Define tickstack software versions
INFLUX_VERSION=1.5.2
INFLUX_CHECKSUM=42fede7b497bdf30d4eb5138db218d1add986fca4fce4a8bcd9c7d6dabaf572a
KAPACITOR_VERSION=1.4.1
KAPACITOR_CHECKSUM=eea9b215f241906570eafe3857e1d4c5
CHRONOGRAF_VERSION=1.4.4.2
CHRONOGRAF_CHECKSUM=eea6915aa6db8f134fcd3b095e863b773bfb3a16a26e346dd65904a07df97963
# install virtualenvwrapper to manage python environments - and check
apt-get update
echo "----> Installing Python3 and Pip3"
apt-get install -y python3 python3-pip wget curl
update-alternatives --install /usr/bin/python python /usr/bin/python3 10
echo "----> Installing virtualenv and wrapper"
apt-get install -y python3-virtualenv virtualenvwrapper
pip3 install virtualenv
pip3 install virtualenvwrapper
# install influx
wget https://dl.influxdata.com/influxdb/releases/influxdb_${INFLUX_VERSION}_amd64.deb 2> /dev/null
sha256sum influxdb_${INFLUX_VERSION}_amd64.deb | grep $INFLUX_CHECKSUM > /dev/null
if [ $? == 1 ]; then
echo "influx download failed checksum"
exit 1
fi
dpkg -i influxdb_${INFLUX_VERSION}_amd64.deb
# install kapacitor
wget https://dl.influxdata.com/kapacitor/releases/kapacitor_${KAPACITOR_VERSION}_amd64.deb 2> /dev/null
md5sum kapacitor_${KAPACITOR_VERSION}_amd64.deb | grep $KAPACITOR_CHECKSUM > /dev/null
if [ $? == 1 ]; then
echo "Kapacitor download failed checksum"
exit 1
fi
dpkg -i kapacitor_${KAPACITOR_VERSION}_amd64.deb
# install Chronograf
wget https://dl.influxdata.com/chronograf/releases/chronograf_${CHRONOGRAF_VERSION}_amd64.deb 2> /dev/null
sha256sum chronograf_${CHRONOGRAF_VERSION}_amd64.deb | grep $CHRONOGRAF_CHECKSUM > /dev/null
if [ $? == 1 ]; then
echo "Chronograf download failed checksum"
exit 1
fi
dpkg -i chronograf_${CHRONOGRAF_VERSION}_amd64.deb
systemctl start influxdb
systemctl start kapacitor
systemctl start chronograf
## CLMC-SERVICE
## ----------------------------------------------------------------------------------
echo "----> Configuring virtualenvwrapper"
export WORKON_HOME=$HOME/.virtualenvs
source /usr/local/bin/virtualenvwrapper.sh
# check the mkvirtualenv with a return value of 1 if version comes back correctly
mkvirtualenv --version
if [ $? -ne 1 ] ; then
echo "Failed: installing virtualenvwrapper"
exit 1
fi
# create CLMC virtual environment - and check
echo "----> Making CLMC Python environment"
mkvirtualenv CLMC
if [ $? -ne 0 ] ; then
echo "Failed: creating CLMC python environment"
exit 1
fi
# switch the CLMC environment - and check
echo "----> Switching to use CLMC python environment"
workon CLMC
if [ $? -ne 0 ] ; then
echo "Failed: switching to CLMC python environment"
exit 1
fi
# install tox - and check
echo "----> Installing TOX"
pip3 install tox
tox --version
if [ $? -ne 0 ] ; then
echo "Failed: installing tox"
exit 1
fi
# navigate to the clmc-webservice - and check
echo "----> Moving to CLMC webservice"
cd ${REPO_ROOT}/src/service
if [ $? -ne 0 ] ; then
echo "Failed: could not find clmc-webservice"
exit 1
fi
# running tests using tox
echo "----> Running tox"
TOX_OUTPUT="$(tox)"
# check if tox output contains the 'congratulations :)' bit for tests passed
if [[ $TOX_OUTPUT != *"congratulations :)"* ]]; then
echo $TOX_OUTPUT
echo "CLMC service unit tests failed."
exit 1
fi
echo "----> Tox execution of unit tests passed successfully"
# install the service
echo "----> Installing CLMC web service"
pip3 install .
if [ $? -ne 0 ] ; then
echo "Failed: installing clmc-webservice"
exit 1
fi
# create directory for CLMC service logs
echo "----> Creating CLMC web service log directory"
mkdir -p /var/log/flame/clmc
# create directory for CLMC service config
echo "----> Creating CLMC web service config directory"
mkdir -p /etc/flame/clmc
# Install minioclmc as systemctl service
# -----------------------------------------------------------------------
mkdir -p /opt/flame/clmc
start_script_file="/opt/flame/clmc/start.sh"
echo "#!/bin/bash" > $start_script_file
echo "export WORKON_HOME=${HOME}/.virtualenvs" >> $start_script_file
echo "source /usr/local/bin/virtualenvwrapper.sh" >> $start_script_file
echo "workon CLMC" >> $start_script_file
echo "pserve ${REPO_ROOT}/src/service/production.ini &" >> $start_script_file
chmod 755 $start_script_file
file="/lib/systemd/system/flameclmc.service"
echo "[Unit]" > $file
echo "Description=flameclmc" >> $file
echo "After=network.target" >> $file
echo "" >> $file
echo "[Service]" >> $file
echo "Type=forking" >> $file
echo "ExecStart=${start_script_file}" >> $file
echo "" >> $file
echo "[Install]" >> $file
echo "WantedBy=multi-user.target" >> $file
systemctl daemon-reload
systemctl enable flameclmc.service
systemctl start flameclmc.service
# wait for the clmc service to start
while ! nc -z localhost 9080
do
echo "Waiting for clmc service port 9080 to be ready on localhost..."
sleep 5
done
# configure the CLMC service
JSON="{\"aggregator_report_period\": ${REPORT_PERIOD}, \"aggregator_database_name\": \"${DATABASE_NAME}\", \"aggregator_database_url\": \"${INFLUX_URL}\"}"
echo "CONFIG JSON=${JSON}"
curl -H 'Content-Type: application/json' -X PUT -d "${JSON}" http://localhost:9080/aggregator/config
# start the aggregator
JSON="{\"action\": \"start\"}"
echo "START ACTION JSON=${JSON}"
curl -H 'Content-Type: application/json' -X PUT -d "${JSON}" http://localhost:9080/aggregator/control
echo "Provisioning CLMC service"
./install-tick-stack.sh $@
./install-clmc-service.sh $@
./install-neo4j.sh $@
#!/bin/bash
usage() {
usage() {
echo "Usage: $0 create|start|stop|destroy [-f config_file] [-r repo_root] [-c service_name]" 1>&2
exit 1
exit 1
}
create() {
......@@ -30,11 +30,11 @@ create() {
cp -rf ${repo_root}/src ${container_vagrant_dir}
# start the container
echo "Starting: ${service_name}"
echo "Starting: ${service_name}"
lxc-start -n ${service_name}
echo "Waiting for container to start: ${service_name}"
STARTED="0"
while [ "$STARTED" == "0" ]; do STARTED=$(lxc-info -n ${service_name} -i | wc -l); done;
STARTED="0"
while [ "$STARTED" == "0" ]; do STARTED=$(lxc-info -n ${service_name} -i | wc -l); done;
# provision software into each container
echo "Provisioning: ${service_name}"
......@@ -69,20 +69,23 @@ create() {
cmd=/vagrant/scripts/clmc-agent/install.sh
lxc-attach -n ${service_name} -v REPO_ROOT="/vagrant" -- ${cmd}
# stop telegraf before changing the configs
lxc-attach -n ${service_name} -- service telegraf stop
# copy telegraf configuration templates
cp -f ${repo_root}/scripts/clmc-agent/telegraf.conf ${container_dir}/etc/telegraf/
cp -f ${repo_root}/scripts/clmc-agent/telegraf_output.conf ${container_dir}/etc/telegraf/telegraf.d/
cp ${repo_root}/src/test/clmctest/services/${sf_id}/telegraf_${sf_id}.conf ${container_dir}/etc/telegraf/telegraf.d/
# copy the 'host' config into all service containers
cp ${repo_root}/src/test/clmctest/services/host/telegraf*.conf ${container_dir}/etc/telegraf/telegraf.d/
# copy the service-specific config
cp ${repo_root}/src/test/clmctest/services/${sf_id}/telegraf*.conf ${container_dir}/etc/telegraf/telegraf.d/
# replace telegraf template with container parameters
# @todo do we really need both scripts to do this?
cmd=/vagrant/scripts/clmc-agent/configure_template.sh
lxc-attach -n ${service_name} -- ${cmd}
cmd="/vagrant/scripts/clmc-agent/configure.sh ${location} ${sfc_id} ${sfc_id_instance} ${sf_id} ${sf_id_instance} ${ipendpoint_id} ${sr_id} ${influxdb_url} ${database_name}"
lxc-attach -n ${service_name} -- ${cmd}
# start telegraf
lxc-attach -n ${service_name} -- service telegraf restart
lxc-attach -n ${service_name} -- service telegraf start
fi
# set forward ports
......@@ -96,12 +99,12 @@ create() {
host_port=$(_jq '.host')
iptables -t nat -A PREROUTING -p tcp -i enp0s3 --dport ${host_port} -j DNAT --to-destination ${ip}:${guest_port}
done
fi
fi
fi
}
start() {
service_name=$1
service_name=$1
if lxc-info -n ${service_name}; then
echo "Starting container: ${service_name}"
lxc-start -n ${service_name}
......@@ -109,7 +112,7 @@ start() {
}
stop() {
service_name=$1
service_name=$1
if lxc-info -n ${service_name}; then
echo "Stopping container: ${service_name}"
lxc-stop -n ${service_name}
......@@ -142,8 +145,8 @@ destroy() {
host_port=$(_jq '.host')
iptables -t nat -D PREROUTING -p tcp -i enp0s3 --dport ${host_port} -j DNAT --to-destination ${ip}:${guest_port}
done
fi
fi
fi
fi
}
# inc option index by 1 as first argument is the command and not parsed by getopts
......@@ -159,7 +162,7 @@ while getopts "hf:r:c:" opt; do
case $opt in
h) usage; exit ;;
f) config_file=${OPTARG} ;;
r) repo_root=${OPTARG} ;;
r) repo_root=${OPTARG} ;;
c) container=${OPTARG} ;;
\?) usage ;;
esac
......@@ -179,7 +182,7 @@ if [ ! -d ${repo_root} ]; then
exit 1
fi
# iterate of list of services in configuration file
# iterate of list of services in configuration file
command=$1
service_names=$(jq -r '.[].name' ${config_file})
for service_name in $service_names; do
......@@ -193,10 +196,10 @@ for service_name in $service_names; do
start ${service_name} ${config_file} ${repo_root}
;;
stop)
stop ${service_name} ${config_file} ${repo_root}
stop ${service_name} ${config_file} ${repo_root}
;;
destroy)
destroy ${service_name} ${config_file} ${repo_root}
destroy ${service_name} ${config_file} ${repo_root}
;;
*)
usage
......
......@@ -349,3 +349,41 @@ class RoundTripTimeQuery(object):
reverse_data_delay = (8/10**6) * (response_size / bandwidth) * (packet_size / (packet_size - packet_header_size))
return forward_latency + forward_data_delay + service_delay + reverse_latency + reverse_data_delay
@view_defaults(route_name='sfemc_config', renderer='json')
class SFEMCMockConfig(object):
"""
A class-based view for accessing and mutating the configuration of the aggregator.
"""
def __init__(self, request):
"""
Initialises the instance of the view with the request argument.
:param request: client's call request
"""
self.request = request
def get(self):
"""
A GET API call for endpoint configuration.
:return: A JSON response with the configuration of the aggregator.
"""
log.debug("\Getting endpoint configuration\n")
config = {"key": "hello"}
return config
def put(self):
"""
A PUT API call for the status of the aggregator.
:return: A JSON response to the PUT call - essentially with the new configured data and comment of the state of the aggregator
:raises HTTPBadRequest: if request body is not a valid JSON for the configurator
"""
log.debug("\Putting endpoint configuration\n")
import pytest
from pyramid import testing
from pyramid.httpexceptions import HTTPBadRequest
from time import sleep
from clmcservice.utilities import CONFIG_ATTRIBUTES, PROCESS_ATTRIBUTE, RUNNING_FLAG, MALFORMED_FLAG, URL_REGEX
import os
import signal
class TestSFEMCMockAPI(object):
@pytest.fixture(autouse=True)
def app_config(self):
print("app_config")
self.config = testing.setUp()
# endpoint
# sr
# sfc_i
# sf_i
self.config.add_settings({
'my_endpoint_1': {'sfc_i': 'my_sfc_i_1', 'sf_i': 'my_sf_i_1', 'sr': 'my_sr_1'},
'my_endpoint_2': {'sfc_i': 'my_sfc_i_2', 'sf_i': 'my_sf_i_2', 'sr': 'my_sr_2'}})
yield
testing.tearDown()
def test_GET_config(self):
print("Test get")
# nested import so that importing the class view is part of the test itself
from clmcservice.views import SFEMCMockConfig
request = testing.DummyRequest()
response = SFEMCMockConfig(request).get()
print("response={0}".format(response))
@pytest.mark.parametrize("input_body, output_value", [
('{"aggregator_report_period": 10, "aggregator_database_name": "CLMCMetrics", "aggregator_database_url": "http://171.40.231.51:8086"}',
{'aggregator_report_period': 10, 'aggregator_database_name': "CLMCMetrics", 'aggregator_database_url': "http://171.40.231.51:8086"}),
])
def test_PUT_config(self, input_body, output_value):
print("Test put")
\ No newline at end of file
# Sample Chronograf Dashboards
This folder contains several sample dashboards for use in Chronograf.
## Loading a dashboard
To load a dashboard into an existing Chronograf service, use the following `curl` command:
```shell
curl -i -XPOST -H "Content-Type: application/json" http://<service IP>:8888/chronograf/v1/dashboards -d @someFile.json
```
## Saving a dashboard
To save a dashboard use:
```shell
wget http://<service IP>:8888/chronograf/v1/dashboards/<dashboard ID> -O - | jq '.' > someFile.json
```
The pipe into the `jq` command is not strictly necessary, it is there to pretty-print the JSON.
## Overview
### dc_dash.json
Displays the average CPU usage over time for an entire data centre. It has a dashboard variable for the `location` field.
### sf_dash.json
The service function dashboard has two dashboard variables to choose two different service functions to display side by side (left and right column).
Each column displays the total network traffic sent and received in MB over time (1 minute intervals) in the top chart and the average network traffic rate in MB/s for sent and received traffic in the bottom chart (1 minute intervals).
To get the top chart, a nested select statement is used:
```sql
select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last("bytes_recv") / 1048576 AS "RX_MB", last("bytes_sent") / 1048576 AS "TX_MB" FROM "MSDemo"."autogen"."net" WHERE time > :dashboardTime: AND "sf"=:sf1: GROUP BY time(1m), ipendpoint FILL(null)) group by time(1m)
```
(The constant 1048576 is 1024*1024)
The inner select groups by ipendpoint and time, taking the maximum value in each time period for each ipendpoint and the outer select queries over the result of the inner select but then groups only by time.
The derivative of the first chart requires a further nested select:
```sql
select derivative(total_RX_MB, 1m) / 60 as RX_MB_per_s, derivative(total_TX_MB, 1m) / 60 as TX_MB_per_s from (select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last("bytes_recv") / 1048576 AS "RX_MB", last("bytes_sent") / 1048576 AS "TX_MB" FROM "MSDemo"."autogen"."net" WHERE time > :dashboardTime: AND "sf"=:sf1: GROUP BY time(1m), ipendpoint FILL(null)) group by time(1m))
```
The outer-most select taakes the derivative of the first chart for each data set separately. The derivative function is parameterised to understand it is over a 1m period but then the result is divided by 60 to give an average MB/s in each 1 minute period.
### minio_dash.json
The minio dashboard has two dashboard variables to choose two different ipendpoints to display side by side (left and right column). Minio endpoints must be chosen for all features to work.
The top chart shows the percentage of requests being served in less than a fixed set up time periods. This performance metric would highlight a service endpoint which was struggling to service demand.
The other charts show network traffic using a similar formulation to the `sf_dash` described above.
### nginx_dash.json
There are no dashboard variables on this dashboard: it is hard-coded to show the `nginx_1_ep1` and `nginx_1_ep2` endpoints. Various charts (network, CPU, responses per second) are displayed.
{
"id": 4,
"cells": [
{
"i": "5df7003f-6367-40e1-9eb4-c3ac9d92b05c",
"x": 0,
"y": 0,
"w": 6,
"h": 4,
"name": "Mean %CPU",
"queries": [
{
"query": "SELECT mean(\"cpu_usage\") AS \"mean_cpu_usage\" FROM \"MSDemo\".\"autogen\".\"procstat\" WHERE time > :dashboardTime: AND \"location\"= :location: GROUP BY time(:interval:) FILL(null)",
"queryConfig": {
"database": "",
"measurement": "",
"retentionPolicy": "",
"fields": [],
"tags": {},
"groupBy": {
"time": "",
"tags": []
},
"areTagsAccepted": false,
"rawText": "SELECT mean(\"cpu_usage\") AS \"mean_cpu_usage\" FROM \"MSDemo\".\"autogen\".\"procstat\" WHERE time > :dashboardTime: AND \"location\"= :location: GROUP BY time(:interval:) FILL(null)",
"range": null,
"shifts": null
},
"source": "/chronograf/v1/sources/1"
}
],
"axes": {
"x": {
"bounds": [
"",
""
],
"label": "",
"prefix": "",
"suffix": "",
"base": "10",
"scale": "linear"
},
"y": {
"bounds": [
"",
""
],
"label": "Mean %CPU",
"prefix": "",
"suffix": "",
"base": "10",
"scale": "linear"
},
"y2": {
"bounds": [
"",
""
],
"label": "",
"prefix": "",
"suffix": "",
"base": "10",
"scale": "linear"
}
},
"type": "line",
"colors": [
{
"id": "2f5981b6-6f85-4efc-989c-9da90fe54189",
"type": "scale",
"hex": "#31C0F6",
"name": "Nineteen Eighty Four",
"value": "0"
},
{
"id": "696e12a0-4b7d-4a8f-9d95-a5be5ef3e8b8",
"type": "scale",
"hex": "#A500A5",
"name": "Nineteen Eighty Four",
"value": "0"
},
{
"id": "f3788c7b-bdbe-4d14-8439-c6f3b4db75e9",
"type": "scale",
"hex": "#FF7E27",
"name": "Nineteen Eighty Four",
"value": "0"
}
],
"legend": {},
"tableOptions": {
"timeFormat": "MM/DD/YYYY HH:mm:ss",
"verticalTimeAxis": true,
"sortBy": {
"internalName": "time",
"displayName": "",
"visible": true
},
"wrapping": "truncate",
"fieldNames": [
{
"internalName": "time",
"displayName": "",
"visible": true
}
],
"fixFirstColumn": true
},
"links": {
"self": "/chronograf/v1/dashboards/4/cells/5df7003f-6367-40e1-9eb4-c3ac9d92b05c"
}
}
],
"templates": [
{
"tempVar": ":location:",
"values": [
{
"value": "DC3",
"type": "tagValue",
"selected": true
}
],
"id": "51e972f9-c4c3-4bcf-a264-9f95ffa86017",
"type": "tagValues",
"label": "",
"query": {
"influxql": "SHOW TAG VALUES ON :database: FROM :measurement: WITH KEY=:tagKey:",
"db": "MSDemo",
"measurement": "cpu",
"tagKey": "location",
"fieldKey": ""
},
"links": {
"self": "/chronograf/v1/dashboards/4/templates/51e972f9-c4c3-4bcf-a264-9f95ffa86017"
}
}
],
"name": "Data Centre",
"organization": "default",
"links": {
"self": "/chronograf/v1/dashboards/4",
"cells": "/chronograf/v1/dashboards/4/cells",
"templates": "/chronograf/v1/dashboards/4/templates"
}
}
This diff is collapsed.
This diff is collapsed.
{
"id": 2,
"cells": [
{
"i": "d4dad017-395e-4192-9a89-cfde2b5d133a",
"x": 5,
"y": 4,
"w": 5,
"h": 4,
"name": "Average MB/s sent/recv for service-function 2",
"queries": [
{
"query": "select derivative(total_RX_MB, 1m) / 60 as RX_MB_per_s, derivative(total_TX_MB, 1m) / 60 as TX_MB_per_s from (select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"sf\"=:sf2: GROUP BY time(1m), ipendpoint FILL(null)) group by time(1m)) ",
"queryConfig": {
"database": "",
"measurement": "",
"retentionPolicy": "",
"fields": [],
"tags": {},
"groupBy": {
"time": "",
"tags": []
},
"areTagsAccepted": false,
"rawText": "select derivative(total_RX_MB, 1m) / 60 as RX_MB_per_s, derivative(total_TX_MB, 1m) / 60 as TX_MB_per_s from (select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"sf\"=:sf2: GROUP BY time(1m), ipendpoint FILL(null)) group by time(1m)) ",
"range": null,
"shifts": null
},
"source": "/chronograf/v1/sources/2"
}
],
"axes": {
"x": {
"bounds": [
"",
""
],
"label": "",
"prefix": "",
"suffix": "",
"base": "10",
"scale": "linear"
},
"y": {
"bounds": [
"",
""
],
"label": "MB / sec",
"prefix": "",
"suffix": "",
"base": "10",
"scale": "linear"
},
"y2": {
"bounds": [
"",
""
],
"label": "",
"prefix": "",
"suffix": "",
"base": "10",
"scale": "linear"
}
},
"type": "line-stepplot",
"colors": [
{
"id": "2f5981b6-6f85-4efc-989c-9da90fe54189",
"type": "scale",
"hex": "#31C0F6",
"name": "Nineteen Eighty Four",
"value": "0"
},
{
"id": "696e12a0-4b7d-4a8f-9d95-a5be5ef3e8b8",
"type": "scale",
"hex": "#A500A5",
"name": "Nineteen Eighty Four",
"value": "0"
},
{
"id": "f3788c7b-bdbe-4d14-8439-c6f3b4db75e9",
"type": "scale",
"hex": "#FF7E27",
"name": "Nineteen Eighty Four",
"value": "0"
}
],
"legend": {},
"tableOptions": {
"timeFormat": "MM/DD/YYYY HH:mm:ss",
"verticalTimeAxis": true,
"sortBy": {
"internalName": "time",
"displayName": "",
"visible": true
},
"wrapping": "truncate",
"fieldNames": [
{
"internalName": "time",
"displayName": "",
"visible": true
}
],
"fixFirstColumn": true
},
"links": {
"self": "/chronograf/v1/dashboards/2/cells/d4dad017-395e-4192-9a89-cfde2b5d133a"
}
},
{
"i": "8522ec33-51a1-4cd4-a02b-02525800c25e",
"x": 5,
"y": 0,
"w": 5,
"h": 4,
"name": "Total MB sent/recv for service-function 2",
"queries": [
{
"query": "select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"sf\"=:sf2: GROUP BY time(1m), ipendpoint FILL(null)) group by time(1m)",
"queryConfig": {
"database": "",
"measurement": "",
"retentionPolicy": "",
"fields": [],
"tags": {},
"groupBy": {
"time": "",
"tags": []
},
"areTagsAccepted": false,
"rawText": "select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"sf\"=:sf2: GROUP BY time(1m), ipendpoint FILL(null)) group by time(1m)",
"range": null,
"shifts": null
},
"source": "/chronograf/v1/sources/2"
}
],
"axes": {
"x": {
"bounds": [
"",
""
],
"label": "",
"prefix": "",
"suffix": "",
"base": "10",
"scale": "linear"
},
"y": {
"bounds": [
"",
""
],
"label": "MB",
"prefix": "",
"suffix": "",
"base": "10",
"scale": "linear"
},
"y2": {
"bounds": [
"",
""
],
"label": "",
"prefix": "",
"suffix": "",
"base": "10",
"scale": "linear"
}
},
"type": "line-stepplot",
"colors": [
{
"id": "2f5981b6-6f85-4efc-989c-9da90fe54189",
"type": "scale",
"hex": "#31C0F6",
"name": "Nineteen Eighty Four",
"value": "0"
},
{
"id": "696e12a0-4b7d-4a8f-9d95-a5be5ef3e8b8",
"type": "scale",
"hex": "#A500A5",
"name": "Nineteen Eighty Four",
"value": "0"
},
{
"id": "f3788c7b-bdbe-4d14-8439-c6f3b4db75e9",
"type": "scale",
"hex": "#FF7E27",
"name": "Nineteen Eighty Four",
"value": "0"
}
],
"legend": {},
"tableOptions": {
"timeFormat": "MM/DD/YYYY HH:mm:ss",
"verticalTimeAxis": true,
"sortBy": {
"internalName": "time",
"displayName": "",
"visible": true
},
"wrapping": "truncate",
"fieldNames": [
{
"internalName": "time",
"displayName": "",
"visible": true
}
],
"fixFirstColumn": true
},
"links": {
"self": "/chronograf/v1/dashboards/2/cells/8522ec33-51a1-4cd4-a02b-02525800c25e"
}
},
{
"i": "dd7693ca-0622-41f0-9f2c-d9a468434097",
"x": 0,
"y": 4,
"w": 5,
"h": 4,
"name": "Average MB/s sent/recv for service-function 1",
"queries": [
{
"query": "select derivative(total_RX_MB, 1m) / 60 as RX_MB_per_s, derivative(total_TX_MB, 1m) / 60 as TX_MB_per_s from (select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"sf\"=:sf1: GROUP BY time(1m), ipendpoint FILL(null)) group by time(1m)) ",
"queryConfig": {
"database": "",
"measurement": "",
"retentionPolicy": "",
"fields": [],
"tags": {},
"groupBy": {
"time": "",
"tags": []
},
"areTagsAccepted": false,
"rawText": "select derivative(total_RX_MB, 1m) / 60 as RX_MB_per_s, derivative(total_TX_MB, 1m) / 60 as TX_MB_per_s from (select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"sf\"=:sf1: GROUP BY time(1m), ipendpoint FILL(null)) group by time(1m)) ",
"range": null,
"shifts": null
},
"source": "/chronograf/v1/sources/2"
}
],
"axes": {
"x": {
"bounds": [
"",
""
],
"label": "",
"prefix": "",
"suffix": "",
"base": "10",
"scale": "linear"
},
"y": {
"bounds": [
"",
""
],
"label": "MB / sec",
"prefix": "",
"suffix": "",
"base": "10",
"scale": "linear"
},
"y2": {
"bounds": [
"",
""
],
"label": "",
"prefix": "",
"suffix": "",
"base": "10",
"scale": "linear"
}
},
"type": "line-stepplot",
"colors": [
{
"id": "2f5981b6-6f85-4efc-989c-9da90fe54189",
"type": "scale",
"hex": "#31C0F6",
"name": "Nineteen Eighty Four",
"value": "0"
},
{
"id": "696e12a0-4b7d-4a8f-9d95-a5be5ef3e8b8",
"type": "scale",
"hex": "#A500A5",
"name": "Nineteen Eighty Four",
"value": "0"
},
{
"id": "f3788c7b-bdbe-4d14-8439-c6f3b4db75e9",
"type": "scale",
"hex": "#FF7E27",
"name": "Nineteen Eighty Four",
"value": "0"
}
],
"legend": {},
"tableOptions": {
"timeFormat": "MM/DD/YYYY HH:mm:ss",
"verticalTimeAxis": true,
"sortBy": {
"internalName": "time",
"displayName": "",
"visible": true
},
"wrapping": "truncate",
"fieldNames": [
{
"internalName": "time",
"displayName": "",
"visible": true
}
],
"fixFirstColumn": true
},
"links": {
"self": "/chronograf/v1/dashboards/2/cells/dd7693ca-0622-41f0-9f2c-d9a468434097"
}
},
{
"i": "419b4e3c-1ca3-49ac-bc91-410bfbeb879a",
"x": 0,
"y": 0,
"w": 5,
"h": 4,
"name": "Total MB sent/recv for service-function 1",
"queries": [
{
"query": "select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"sf\"=:sf1: GROUP BY time(1m), ipendpoint FILL(null)) group by time(1m)",
"queryConfig": {
"database": "",
"measurement": "",
"retentionPolicy": "",
"fields": [],
"tags": {},
"groupBy": {
"time": "",
"tags": []
},
"areTagsAccepted": false,
"rawText": "select sum(RX_MB) as total_RX_MB, sum(TX_MB) as total_TX_MB from (SELECT last(\"bytes_recv\") / 1048576 AS \"RX_MB\", last(\"bytes_sent\") / 1048576 AS \"TX_MB\" FROM \"MSDemo\".\"autogen\".\"net\" WHERE time > :dashboardTime: AND \"sf\"=:sf1: GROUP BY time(1m), ipendpoint FILL(null)) group by time(1m)",
"range": null,
"shifts": null
},
"source": "/chronograf/v1/sources/2"
}
],
"axes": {
"x": {
"bounds": [
"",
""
],
"label": "",
"prefix": "",
"suffix": "",
"base": "10",
"scale": "linear"
},
"y": {
"bounds": [
"",
""
],
"label": "MB",
"prefix": "",
"suffix": "",
"base": "10",
"scale": "linear"
},
"y2": {
"bounds": [
"",
""
],
"label": "",
"prefix": "",
"suffix": "",
"base": "10",
"scale": "linear"
}
},
"type": "line-stepplot",
"colors": [
{
"id": "2f5981b6-6f85-4efc-989c-9da90fe54189",
"type": "scale",
"hex": "#31C0F6",
"name": "Nineteen Eighty Four",
"value": "0"
},
{
"id": "696e12a0-4b7d-4a8f-9d95-a5be5ef3e8b8",
"type": "scale",
"hex": "#A500A5",
"name": "Nineteen Eighty Four",
"value": "0"
},
{
"id": "f3788c7b-bdbe-4d14-8439-c6f3b4db75e9",
"type": "scale",
"hex": "#FF7E27",
"name": "Nineteen Eighty Four",
"value": "0"
}
],
"legend": {},
"tableOptions": {
"timeFormat": "MM/DD/YYYY HH:mm:ss",
"verticalTimeAxis": true,
"sortBy": {
"internalName": "time",
"displayName": "",
"visible": true
},
"wrapping": "truncate",
"fieldNames": [
{
"internalName": "time",
"displayName": "",
"visible": true
}
],
"fixFirstColumn": true
},
"links": {
"self": "/chronograf/v1/dashboards/2/cells/419b4e3c-1ca3-49ac-bc91-410bfbeb879a"
}
}
],
"templates": [
{
"tempVar": ":sf1:",
"values": [
{
"value": "nginx",
"type": "tagValue",
"selected": true
}
],
"id": "2160b8b2-a885-4518-90dc-f2363eb3fc83",
"type": "tagValues",
"label": "",
"query": {
"influxql": "SHOW TAG VALUES ON :database: FROM :measurement: WITH KEY=:tagKey:",
"db": "MSDemo",
"measurement": "cpu",
"tagKey": "sf",
"fieldKey": ""
},
"links": {
"self": "/chronograf/v1/dashboards/2/templates/2160b8b2-a885-4518-90dc-f2363eb3fc83"
}
},
{
"tempVar": ":sf2:",
"values": [
{
"value": "minio",
"type": "tagValue",
"selected": true
}
],
"id": "beb094ee-5bed-4956-a551-83fe8e905c19",
"type": "tagValues",
"label": "",
"query": {
"influxql": "SHOW TAG VALUES ON :database: FROM :measurement: WITH KEY=:tagKey:",
"db": "MSDemo",
"measurement": "cpu",
"tagKey": "sf",
"fieldKey": ""
},
"links": {
"self": "/chronograf/v1/dashboards/2/templates/beb094ee-5bed-4956-a551-83fe8e905c19"
}
}
],
"name": "Service Functions",
"organization": "default",
"links": {
"self": "/chronograf/v1/dashboards/2",
"cells": "/chronograf/v1/dashboards/2/cells",
"templates": "/chronograf/v1/dashboards/2/templates"
}
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment