mirror of
https://github.com/NixOS/nixpkgs.git
synced 2025-06-12 04:35:41 +03:00
nixos/tests/prometheus: migrate to runTest
This commit is contained in:
parent
9f489ef1a6
commit
008a4657dd
8 changed files with 597 additions and 676 deletions
|
@ -1115,7 +1115,7 @@ in
|
||||||
private-gpt = handleTest ./private-gpt.nix { };
|
private-gpt = handleTest ./private-gpt.nix { };
|
||||||
privatebin = runTest ./privatebin.nix;
|
privatebin = runTest ./privatebin.nix;
|
||||||
privoxy = handleTest ./privoxy.nix { };
|
privoxy = handleTest ./privoxy.nix { };
|
||||||
prometheus = handleTest ./prometheus { };
|
prometheus = import ./prometheus { inherit runTest; };
|
||||||
prometheus-exporters = handleTest ./prometheus-exporters.nix { };
|
prometheus-exporters = handleTest ./prometheus-exporters.nix { };
|
||||||
prosody = handleTest ./xmpp/prosody.nix { };
|
prosody = handleTest ./xmpp/prosody.nix { };
|
||||||
prosody-mysql = handleTest ./xmpp/prosody-mysql.nix { };
|
prosody-mysql = handleTest ./xmpp/prosody-mysql.nix { };
|
||||||
|
|
|
@ -1,160 +1,146 @@
|
||||||
import ../make-test-python.nix (
|
{ pkgs, ... }:
|
||||||
{ lib, pkgs, ... }:
|
|
||||||
|
|
||||||
{
|
{
|
||||||
name = "prometheus-alertmanager";
|
name = "prometheus-alertmanager";
|
||||||
|
|
||||||
nodes = {
|
nodes = {
|
||||||
prometheus =
|
prometheus =
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
environment.systemPackages = [ pkgs.jq ];
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
|
|
||||||
services.prometheus = {
|
services.prometheus = {
|
||||||
enable = true;
|
enable = true;
|
||||||
globalConfig.scrape_interval = "2s";
|
globalConfig.scrape_interval = "2s";
|
||||||
|
|
||||||
alertmanagers = [
|
alertmanagers = [
|
||||||
{
|
{
|
||||||
scheme = "http";
|
scheme = "http";
|
||||||
static_configs = [
|
static_configs = [
|
||||||
{
|
{ targets = [ "alertmanager:${toString config.services.prometheus.alertmanager.port}" ]; }
|
||||||
targets = [
|
|
||||||
"alertmanager:${toString config.services.prometheus.alertmanager.port}"
|
|
||||||
];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
rules = [
|
|
||||||
''
|
|
||||||
groups:
|
|
||||||
- name: test
|
|
||||||
rules:
|
|
||||||
- alert: InstanceDown
|
|
||||||
expr: up == 0
|
|
||||||
for: 5s
|
|
||||||
labels:
|
|
||||||
severity: page
|
|
||||||
annotations:
|
|
||||||
summary: "Instance {{ $labels.instance }} down"
|
|
||||||
''
|
|
||||||
];
|
|
||||||
|
|
||||||
scrapeConfigs = [
|
|
||||||
{
|
|
||||||
job_name = "alertmanager";
|
|
||||||
static_configs = [
|
|
||||||
{
|
|
||||||
targets = [
|
|
||||||
"alertmanager:${toString config.services.prometheus.alertmanager.port}"
|
|
||||||
];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
}
|
|
||||||
{
|
|
||||||
job_name = "node";
|
|
||||||
static_configs = [
|
|
||||||
{
|
|
||||||
targets = [
|
|
||||||
"node:${toString config.services.prometheus.exporters.node.port}"
|
|
||||||
];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
alertmanager =
|
|
||||||
{ config, pkgs, ... }:
|
|
||||||
{
|
|
||||||
services.prometheus.alertmanager = {
|
|
||||||
enable = true;
|
|
||||||
openFirewall = true;
|
|
||||||
|
|
||||||
configuration = {
|
|
||||||
global = {
|
|
||||||
resolve_timeout = "1m";
|
|
||||||
};
|
|
||||||
|
|
||||||
route = {
|
|
||||||
# Root route node
|
|
||||||
receiver = "test";
|
|
||||||
group_by = [ "..." ];
|
|
||||||
continue = false;
|
|
||||||
group_wait = "1s";
|
|
||||||
group_interval = "15s";
|
|
||||||
repeat_interval = "24h";
|
|
||||||
};
|
|
||||||
|
|
||||||
receivers = [
|
|
||||||
{
|
|
||||||
name = "test";
|
|
||||||
webhook_configs = [
|
|
||||||
{
|
|
||||||
url = "http://logger:6725";
|
|
||||||
send_resolved = true;
|
|
||||||
max_alerts = 0;
|
|
||||||
}
|
|
||||||
];
|
|
||||||
}
|
|
||||||
];
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
|
||||||
|
rules = [
|
||||||
|
''
|
||||||
|
groups:
|
||||||
|
- name: test
|
||||||
|
rules:
|
||||||
|
- alert: InstanceDown
|
||||||
|
expr: up == 0
|
||||||
|
for: 5s
|
||||||
|
labels:
|
||||||
|
severity: page
|
||||||
|
annotations:
|
||||||
|
summary: "Instance {{ $labels.instance }} down"
|
||||||
|
''
|
||||||
|
];
|
||||||
|
|
||||||
|
scrapeConfigs = [
|
||||||
|
{
|
||||||
|
job_name = "alertmanager";
|
||||||
|
static_configs = [
|
||||||
|
{ targets = [ "alertmanager:${toString config.services.prometheus.alertmanager.port}" ]; }
|
||||||
|
];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
job_name = "node";
|
||||||
|
static_configs = [
|
||||||
|
{ targets = [ "node:${toString config.services.prometheus.exporters.node.port}" ]; }
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
|
alertmanager =
|
||||||
|
{ config, pkgs, ... }:
|
||||||
|
{
|
||||||
|
services.prometheus.alertmanager = {
|
||||||
|
enable = true;
|
||||||
|
openFirewall = true;
|
||||||
|
|
||||||
|
configuration = {
|
||||||
|
global = {
|
||||||
|
resolve_timeout = "1m";
|
||||||
};
|
};
|
||||||
|
|
||||||
|
route = {
|
||||||
|
# Root route node
|
||||||
|
receiver = "test";
|
||||||
|
group_by = [ "..." ];
|
||||||
|
continue = false;
|
||||||
|
group_wait = "1s";
|
||||||
|
group_interval = "15s";
|
||||||
|
repeat_interval = "24h";
|
||||||
|
};
|
||||||
|
|
||||||
|
receivers = [
|
||||||
|
{
|
||||||
|
name = "test";
|
||||||
|
webhook_configs = [
|
||||||
|
{
|
||||||
|
url = "http://logger:6725";
|
||||||
|
send_resolved = true;
|
||||||
|
max_alerts = 0;
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
};
|
||||||
|
|
||||||
logger =
|
logger =
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
networking.firewall.allowedTCPPorts = [ 6725 ];
|
networking.firewall.allowedTCPPorts = [ 6725 ];
|
||||||
|
|
||||||
services.prometheus.alertmanagerWebhookLogger.enable = true;
|
services.prometheus.alertmanagerWebhookLogger.enable = true;
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
testScript = ''
|
testScript = ''
|
||||||
alertmanager.wait_for_unit("alertmanager")
|
alertmanager.wait_for_unit("alertmanager")
|
||||||
alertmanager.wait_for_open_port(9093)
|
alertmanager.wait_for_open_port(9093)
|
||||||
alertmanager.wait_until_succeeds("curl -s http://127.0.0.1:9093/-/ready")
|
alertmanager.wait_until_succeeds("curl -s http://127.0.0.1:9093/-/ready")
|
||||||
#alertmanager.wait_until_succeeds("journalctl -o cat -u alertmanager.service | grep 'version=${pkgs.prometheus-alertmanager.version}'")
|
#alertmanager.wait_until_succeeds("journalctl -o cat -u alertmanager.service | grep 'version=${pkgs.prometheus-alertmanager.version}'")
|
||||||
|
|
||||||
logger.wait_for_unit("alertmanager-webhook-logger")
|
logger.wait_for_unit("alertmanager-webhook-logger")
|
||||||
logger.wait_for_open_port(6725)
|
logger.wait_for_open_port(6725)
|
||||||
|
|
||||||
prometheus.wait_for_unit("prometheus")
|
prometheus.wait_for_unit("prometheus")
|
||||||
prometheus.wait_for_open_port(9090)
|
prometheus.wait_for_open_port(9090)
|
||||||
|
|
||||||
prometheus.wait_until_succeeds(
|
prometheus.wait_until_succeeds(
|
||||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"alertmanager\"\}==1)' | "
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"alertmanager\"\}==1)' | "
|
||||||
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||||
)
|
)
|
||||||
|
|
||||||
prometheus.wait_until_succeeds(
|
prometheus.wait_until_succeeds(
|
||||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(alertmanager_build_info)%20by%20(version)' | "
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(alertmanager_build_info)%20by%20(version)' | "
|
||||||
+ "jq '.data.result[0].metric.version' | grep '\"${pkgs.prometheus-alertmanager.version}\"'"
|
+ "jq '.data.result[0].metric.version' | grep '\"${pkgs.prometheus-alertmanager.version}\"'"
|
||||||
)
|
)
|
||||||
|
|
||||||
prometheus.wait_until_succeeds(
|
prometheus.wait_until_succeeds(
|
||||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"node\"\}!=1)' | "
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"node\"\}!=1)' | "
|
||||||
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||||
)
|
)
|
||||||
|
|
||||||
prometheus.wait_until_succeeds(
|
prometheus.wait_until_succeeds(
|
||||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=alertmanager_notifications_total\{integration=\"webhook\"\}' | "
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=alertmanager_notifications_total\{integration=\"webhook\"\}' | "
|
||||||
+ "jq '.data.result[0].value[1]' | grep -v '\"0\"'"
|
+ "jq '.data.result[0].value[1]' | grep -v '\"0\"'"
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.wait_until_succeeds(
|
logger.wait_until_succeeds(
|
||||||
"journalctl -o cat -u alertmanager-webhook-logger.service | grep '\"alertname\":\"InstanceDown\"'"
|
"journalctl -o cat -u alertmanager-webhook-logger.service | grep '\"alertname\":\"InstanceDown\"'"
|
||||||
)
|
)
|
||||||
|
|
||||||
logger.log(logger.succeed("systemd-analyze security alertmanager-webhook-logger.service | grep -v '✓'"))
|
logger.log(logger.succeed("systemd-analyze security alertmanager-webhook-logger.service | grep -v '✓'"))
|
||||||
|
|
||||||
alertmanager.log(alertmanager.succeed("systemd-analyze security alertmanager.service | grep -v '✓'"))
|
alertmanager.log(alertmanager.succeed("systemd-analyze security alertmanager.service | grep -v '✓'"))
|
||||||
'';
|
'';
|
||||||
}
|
}
|
||||||
)
|
|
||||||
|
|
|
@ -1,120 +1,108 @@
|
||||||
import ../make-test-python.nix (
|
{
|
||||||
{ lib, pkgs, ... }:
|
name = "prometheus-config-reload";
|
||||||
|
|
||||||
{
|
nodes = {
|
||||||
name = "prometheus-config-reload";
|
prometheus =
|
||||||
|
{ config, pkgs, ... }:
|
||||||
|
{
|
||||||
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
nodes = {
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
prometheus =
|
|
||||||
{ config, pkgs, ... }:
|
|
||||||
{
|
|
||||||
environment.systemPackages = [ pkgs.jq ];
|
|
||||||
|
|
||||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
services.prometheus = {
|
||||||
|
enable = true;
|
||||||
|
enableReload = true;
|
||||||
|
globalConfig.scrape_interval = "2s";
|
||||||
|
scrapeConfigs = [
|
||||||
|
{
|
||||||
|
job_name = "prometheus";
|
||||||
|
static_configs = [ { targets = [ "prometheus:${toString config.services.prometheus.port}" ]; } ];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
};
|
||||||
|
|
||||||
services.prometheus = {
|
specialisation = {
|
||||||
enable = true;
|
"prometheus-config-change" = {
|
||||||
enableReload = true;
|
configuration = {
|
||||||
globalConfig.scrape_interval = "2s";
|
environment.systemPackages = [ pkgs.yq ];
|
||||||
scrapeConfigs = [
|
|
||||||
{
|
# This configuration just adds a new prometheus job
|
||||||
job_name = "prometheus";
|
# to scrape the node_exporter metrics of the s3 machine.
|
||||||
static_configs = [
|
services.prometheus = {
|
||||||
|
scrapeConfigs = [
|
||||||
{
|
{
|
||||||
targets = [
|
job_name = "node";
|
||||||
"prometheus:${toString config.services.prometheus.port}"
|
static_configs = [
|
||||||
|
{ targets = [ "node:${toString config.services.prometheus.exporters.node.port}" ]; }
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
}
|
|
||||||
];
|
|
||||||
};
|
|
||||||
|
|
||||||
specialisation = {
|
|
||||||
"prometheus-config-change" = {
|
|
||||||
configuration = {
|
|
||||||
environment.systemPackages = [ pkgs.yq ];
|
|
||||||
|
|
||||||
# This configuration just adds a new prometheus job
|
|
||||||
# to scrape the node_exporter metrics of the s3 machine.
|
|
||||||
services.prometheus = {
|
|
||||||
scrapeConfigs = [
|
|
||||||
{
|
|
||||||
job_name = "node";
|
|
||||||
static_configs = [
|
|
||||||
{
|
|
||||||
targets = [ "node:${toString config.services.prometheus.exporters.node.port}" ];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
};
|
||||||
|
|
||||||
testScript = ''
|
testScript = ''
|
||||||
prometheus.wait_for_unit("prometheus")
|
prometheus.wait_for_unit("prometheus")
|
||||||
prometheus.wait_for_open_port(9090)
|
prometheus.wait_for_open_port(9090)
|
||||||
|
|
||||||
# Check if switching to a NixOS configuration that changes the prometheus
|
# Check if switching to a NixOS configuration that changes the prometheus
|
||||||
# configuration reloads (instead of restarts) prometheus before the switch
|
# configuration reloads (instead of restarts) prometheus before the switch
|
||||||
# finishes successfully:
|
# finishes successfully:
|
||||||
with subtest("config change reloads prometheus"):
|
with subtest("config change reloads prometheus"):
|
||||||
import json
|
import json
|
||||||
# We check if prometheus has finished reloading by looking for the message
|
# We check if prometheus has finished reloading by looking for the message
|
||||||
# "Completed loading of configuration file" in the journal between the start
|
# "Completed loading of configuration file" in the journal between the start
|
||||||
# and finish of switching to the new NixOS configuration.
|
# and finish of switching to the new NixOS configuration.
|
||||||
#
|
#
|
||||||
# To mark the start we record the journal cursor before starting the switch:
|
# To mark the start we record the journal cursor before starting the switch:
|
||||||
cursor_before_switching = json.loads(
|
cursor_before_switching = json.loads(
|
||||||
prometheus.succeed("journalctl -n1 -o json --output-fields=__CURSOR")
|
prometheus.succeed("journalctl -n1 -o json --output-fields=__CURSOR")
|
||||||
)["__CURSOR"]
|
)["__CURSOR"]
|
||||||
|
|
||||||
# Now we switch:
|
# Now we switch:
|
||||||
prometheus_config_change = prometheus.succeed(
|
prometheus_config_change = prometheus.succeed(
|
||||||
"readlink /run/current-system/specialisation/prometheus-config-change"
|
"readlink /run/current-system/specialisation/prometheus-config-change"
|
||||||
).strip()
|
).strip()
|
||||||
prometheus.succeed(prometheus_config_change + "/bin/switch-to-configuration test")
|
prometheus.succeed(prometheus_config_change + "/bin/switch-to-configuration test")
|
||||||
|
|
||||||
# Next we retrieve all logs since the start of switching:
|
# Next we retrieve all logs since the start of switching:
|
||||||
logs_after_starting_switching = prometheus.succeed(
|
logs_after_starting_switching = prometheus.succeed(
|
||||||
"""
|
|
||||||
journalctl --after-cursor='{cursor_before_switching}' -o json --output-fields=MESSAGE
|
|
||||||
""".format(
|
|
||||||
cursor_before_switching=cursor_before_switching
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Finally we check if the message "Completed loading of configuration file"
|
|
||||||
# occurs before the "finished switching to system configuration" message:
|
|
||||||
finished_switching_msg = (
|
|
||||||
"finished switching to system configuration " + prometheus_config_change
|
|
||||||
)
|
|
||||||
reloaded_before_switching_finished = False
|
|
||||||
finished_switching = False
|
|
||||||
for log_line in logs_after_starting_switching.split("\n"):
|
|
||||||
msg = json.loads(log_line)["MESSAGE"]
|
|
||||||
if "Completed loading of configuration file" in msg:
|
|
||||||
reloaded_before_switching_finished = True
|
|
||||||
if msg == finished_switching_msg:
|
|
||||||
finished_switching = True
|
|
||||||
break
|
|
||||||
|
|
||||||
assert reloaded_before_switching_finished
|
|
||||||
assert finished_switching
|
|
||||||
|
|
||||||
# Check if the reloaded config includes the new node job:
|
|
||||||
prometheus.succeed(
|
|
||||||
"""
|
"""
|
||||||
curl -sf http://127.0.0.1:9090/api/v1/status/config \
|
journalctl --after-cursor='{cursor_before_switching}' -o json --output-fields=MESSAGE
|
||||||
| jq -r .data.yaml \
|
""".format(
|
||||||
| yq '.scrape_configs | any(.job_name == "node")' \
|
cursor_before_switching=cursor_before_switching
|
||||||
| grep true
|
)
|
||||||
"""
|
)
|
||||||
)
|
|
||||||
'';
|
# Finally we check if the message "Completed loading of configuration file"
|
||||||
}
|
# occurs before the "finished switching to system configuration" message:
|
||||||
)
|
finished_switching_msg = (
|
||||||
|
"finished switching to system configuration " + prometheus_config_change
|
||||||
|
)
|
||||||
|
reloaded_before_switching_finished = False
|
||||||
|
finished_switching = False
|
||||||
|
for log_line in logs_after_starting_switching.split("\n"):
|
||||||
|
msg = json.loads(log_line)["MESSAGE"]
|
||||||
|
if "Completed loading of configuration file" in msg:
|
||||||
|
reloaded_before_switching_finished = True
|
||||||
|
if msg == finished_switching_msg:
|
||||||
|
finished_switching = True
|
||||||
|
break
|
||||||
|
|
||||||
|
assert reloaded_before_switching_finished
|
||||||
|
assert finished_switching
|
||||||
|
|
||||||
|
# Check if the reloaded config includes the new node job:
|
||||||
|
prometheus.succeed(
|
||||||
|
"""
|
||||||
|
curl -sf http://127.0.0.1:9090/api/v1/status/config \
|
||||||
|
| jq -r .data.yaml \
|
||||||
|
| yq '.scrape_configs | any(.job_name == "node")' \
|
||||||
|
| grep true
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
|
|
@ -1,14 +1,10 @@
|
||||||
{
|
{ runTest }:
|
||||||
system ? builtins.currentSystem,
|
|
||||||
config ? { },
|
|
||||||
pkgs ? import ../../.. { inherit system config; },
|
|
||||||
}:
|
|
||||||
|
|
||||||
{
|
{
|
||||||
alertmanager = import ./alertmanager.nix { inherit system pkgs; };
|
alertmanager = runTest ./alertmanager.nix;
|
||||||
config-reload = import ./config-reload.nix { inherit system pkgs; };
|
config-reload = runTest ./config-reload.nix;
|
||||||
federation = import ./federation.nix { inherit system pkgs; };
|
federation = runTest ./federation.nix;
|
||||||
prometheus-pair = import ./prometheus-pair.nix { inherit system pkgs; };
|
prometheus-pair = runTest ./prometheus-pair.nix;
|
||||||
pushgateway = import ./pushgateway.nix { inherit system pkgs; };
|
pushgateway = runTest ./pushgateway.nix;
|
||||||
remote-write = import ./remote-write.nix { inherit system pkgs; };
|
remote-write = runTest ./remote-write.nix;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,227 +1,203 @@
|
||||||
import ../make-test-python.nix (
|
{
|
||||||
{ lib, pkgs, ... }:
|
name = "prometheus-federation";
|
||||||
|
|
||||||
{
|
nodes = {
|
||||||
name = "prometheus-federation";
|
global1 =
|
||||||
|
{ config, pkgs, ... }:
|
||||||
|
{
|
||||||
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
nodes = {
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
global1 =
|
|
||||||
{ config, pkgs, ... }:
|
|
||||||
{
|
|
||||||
environment.systemPackages = [ pkgs.jq ];
|
|
||||||
|
|
||||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
services.prometheus = {
|
||||||
|
enable = true;
|
||||||
|
globalConfig.scrape_interval = "2s";
|
||||||
|
|
||||||
services.prometheus = {
|
scrapeConfigs = [
|
||||||
enable = true;
|
{
|
||||||
globalConfig.scrape_interval = "2s";
|
job_name = "federate";
|
||||||
|
honor_labels = true;
|
||||||
|
metrics_path = "/federate";
|
||||||
|
|
||||||
scrapeConfigs = [
|
params = {
|
||||||
{
|
"match[]" = [
|
||||||
job_name = "federate";
|
"{job=\"node\"}"
|
||||||
honor_labels = true;
|
"{job=\"prometheus\"}"
|
||||||
metrics_path = "/federate";
|
];
|
||||||
|
};
|
||||||
|
|
||||||
params = {
|
static_configs = [
|
||||||
"match[]" = [
|
{
|
||||||
"{job=\"node\"}"
|
targets = [
|
||||||
"{job=\"prometheus\"}"
|
"prometheus1:${toString config.services.prometheus.port}"
|
||||||
|
"prometheus2:${toString config.services.prometheus.port}"
|
||||||
];
|
];
|
||||||
};
|
}
|
||||||
|
];
|
||||||
static_configs = [
|
}
|
||||||
{
|
{
|
||||||
targets = [
|
job_name = "prometheus";
|
||||||
"prometheus1:${toString config.services.prometheus.port}"
|
static_configs = [
|
||||||
"prometheus2:${toString config.services.prometheus.port}"
|
{
|
||||||
];
|
targets = [
|
||||||
}
|
"global1:${toString config.services.prometheus.port}"
|
||||||
];
|
"global2:${toString config.services.prometheus.port}"
|
||||||
}
|
|
||||||
{
|
|
||||||
job_name = "prometheus";
|
|
||||||
static_configs = [
|
|
||||||
{
|
|
||||||
targets = [
|
|
||||||
"global1:${toString config.services.prometheus.port}"
|
|
||||||
"global2:${toString config.services.prometheus.port}"
|
|
||||||
];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
global2 =
|
|
||||||
{ config, pkgs, ... }:
|
|
||||||
{
|
|
||||||
environment.systemPackages = [ pkgs.jq ];
|
|
||||||
|
|
||||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
|
||||||
|
|
||||||
services.prometheus = {
|
|
||||||
enable = true;
|
|
||||||
globalConfig.scrape_interval = "2s";
|
|
||||||
|
|
||||||
scrapeConfigs = [
|
|
||||||
{
|
|
||||||
job_name = "federate";
|
|
||||||
honor_labels = true;
|
|
||||||
metrics_path = "/federate";
|
|
||||||
|
|
||||||
params = {
|
|
||||||
"match[]" = [
|
|
||||||
"{job=\"node\"}"
|
|
||||||
"{job=\"prometheus\"}"
|
|
||||||
];
|
];
|
||||||
};
|
}
|
||||||
|
];
|
||||||
static_configs = [
|
}
|
||||||
{
|
];
|
||||||
targets = [
|
|
||||||
"prometheus1:${toString config.services.prometheus.port}"
|
|
||||||
"prometheus2:${toString config.services.prometheus.port}"
|
|
||||||
];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
}
|
|
||||||
{
|
|
||||||
job_name = "prometheus";
|
|
||||||
static_configs = [
|
|
||||||
{
|
|
||||||
targets = [
|
|
||||||
"global1:${toString config.services.prometheus.port}"
|
|
||||||
"global2:${toString config.services.prometheus.port}"
|
|
||||||
];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
};
|
||||||
|
|
||||||
prometheus1 =
|
global2 =
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
environment.systemPackages = [ pkgs.jq ];
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
|
|
||||||
services.prometheus = {
|
services.prometheus = {
|
||||||
enable = true;
|
enable = true;
|
||||||
globalConfig.scrape_interval = "2s";
|
globalConfig.scrape_interval = "2s";
|
||||||
|
|
||||||
scrapeConfigs = [
|
scrapeConfigs = [
|
||||||
{
|
{
|
||||||
job_name = "node";
|
job_name = "federate";
|
||||||
static_configs = [
|
honor_labels = true;
|
||||||
{
|
metrics_path = "/federate";
|
||||||
targets = [
|
|
||||||
"node1:${toString config.services.prometheus.exporters.node.port}"
|
params = {
|
||||||
];
|
"match[]" = [
|
||||||
}
|
"{job=\"node\"}"
|
||||||
|
"{job=\"prometheus\"}"
|
||||||
];
|
];
|
||||||
}
|
};
|
||||||
{
|
|
||||||
job_name = "prometheus";
|
static_configs = [
|
||||||
static_configs = [
|
{
|
||||||
{
|
targets = [
|
||||||
targets = [
|
"prometheus1:${toString config.services.prometheus.port}"
|
||||||
"prometheus1:${toString config.services.prometheus.port}"
|
"prometheus2:${toString config.services.prometheus.port}"
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
];
|
{
|
||||||
};
|
job_name = "prometheus";
|
||||||
|
static_configs = [
|
||||||
|
{
|
||||||
|
targets = [
|
||||||
|
"global1:${toString config.services.prometheus.port}"
|
||||||
|
"global2:${toString config.services.prometheus.port}"
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
|
}
|
||||||
|
];
|
||||||
};
|
};
|
||||||
|
};
|
||||||
|
|
||||||
prometheus2 =
|
prometheus1 =
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
environment.systemPackages = [ pkgs.jq ];
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
|
|
||||||
services.prometheus = {
|
services.prometheus = {
|
||||||
enable = true;
|
enable = true;
|
||||||
globalConfig.scrape_interval = "2s";
|
globalConfig.scrape_interval = "2s";
|
||||||
|
|
||||||
scrapeConfigs = [
|
scrapeConfigs = [
|
||||||
{
|
{
|
||||||
job_name = "node";
|
job_name = "node";
|
||||||
static_configs = [
|
static_configs = [
|
||||||
{
|
{ targets = [ "node1:${toString config.services.prometheus.exporters.node.port}" ]; }
|
||||||
targets = [
|
];
|
||||||
"node2:${toString config.services.prometheus.exporters.node.port}"
|
}
|
||||||
];
|
{
|
||||||
}
|
job_name = "prometheus";
|
||||||
];
|
static_configs = [ { targets = [ "prometheus1:${toString config.services.prometheus.port}" ]; } ];
|
||||||
}
|
}
|
||||||
{
|
];
|
||||||
job_name = "prometheus";
|
|
||||||
static_configs = [
|
|
||||||
{
|
|
||||||
targets = [
|
|
||||||
"prometheus2:${toString config.services.prometheus.port}"
|
|
||||||
];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
};
|
||||||
|
|
||||||
node1 =
|
prometheus2 =
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
services.prometheus.exporters.node = {
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
enable = true;
|
|
||||||
openFirewall = true;
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
};
|
|
||||||
|
services.prometheus = {
|
||||||
|
enable = true;
|
||||||
|
globalConfig.scrape_interval = "2s";
|
||||||
|
|
||||||
|
scrapeConfigs = [
|
||||||
|
{
|
||||||
|
job_name = "node";
|
||||||
|
static_configs = [
|
||||||
|
{ targets = [ "node2:${toString config.services.prometheus.exporters.node.port}" ]; }
|
||||||
|
];
|
||||||
|
}
|
||||||
|
{
|
||||||
|
job_name = "prometheus";
|
||||||
|
static_configs = [ { targets = [ "prometheus2:${toString config.services.prometheus.port}" ]; } ];
|
||||||
|
}
|
||||||
|
];
|
||||||
};
|
};
|
||||||
|
};
|
||||||
|
|
||||||
node2 =
|
node1 =
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
services.prometheus.exporters.node = {
|
services.prometheus.exporters.node = {
|
||||||
enable = true;
|
enable = true;
|
||||||
openFirewall = true;
|
openFirewall = true;
|
||||||
};
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
|
||||||
testScript = ''
|
node2 =
|
||||||
for machine in node1, node2:
|
{ config, pkgs, ... }:
|
||||||
machine.wait_for_unit("prometheus-node-exporter")
|
{
|
||||||
machine.wait_for_open_port(9100)
|
services.prometheus.exporters.node = {
|
||||||
|
enable = true;
|
||||||
|
openFirewall = true;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
for machine in prometheus1, prometheus2, global1, global2:
|
testScript = ''
|
||||||
machine.wait_for_unit("prometheus")
|
for machine in node1, node2:
|
||||||
machine.wait_for_open_port(9090)
|
machine.wait_for_unit("prometheus-node-exporter")
|
||||||
|
machine.wait_for_open_port(9100)
|
||||||
|
|
||||||
# Verify both servers got the same data from the exporter
|
for machine in prometheus1, prometheus2, global1, global2:
|
||||||
for machine in prometheus1, prometheus2:
|
machine.wait_for_unit("prometheus")
|
||||||
machine.wait_until_succeeds(
|
machine.wait_for_open_port(9090)
|
||||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"node\"\})' | "
|
|
||||||
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
|
||||||
)
|
|
||||||
machine.wait_until_succeeds(
|
|
||||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(prometheus_build_info)' | "
|
|
||||||
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
|
||||||
)
|
|
||||||
|
|
||||||
for machine in global1, global2:
|
# Verify both servers got the same data from the exporter
|
||||||
machine.wait_until_succeeds(
|
for machine in prometheus1, prometheus2:
|
||||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"node\"\})' | "
|
machine.wait_until_succeeds(
|
||||||
+ "jq '.data.result[0].value[1]' | grep '\"2\"'"
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"node\"\})' | "
|
||||||
)
|
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||||
|
)
|
||||||
|
machine.wait_until_succeeds(
|
||||||
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(prometheus_build_info)' | "
|
||||||
|
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||||
|
)
|
||||||
|
|
||||||
machine.wait_until_succeeds(
|
for machine in global1, global2:
|
||||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(prometheus_build_info)' | "
|
machine.wait_until_succeeds(
|
||||||
+ "jq '.data.result[0].value[1]' | grep '\"4\"'"
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"node\"\})' | "
|
||||||
)
|
+ "jq '.data.result[0].value[1]' | grep '\"2\"'"
|
||||||
'';
|
)
|
||||||
}
|
|
||||||
)
|
machine.wait_until_succeeds(
|
||||||
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(prometheus_build_info)' | "
|
||||||
|
+ "jq '.data.result[0].value[1]' | grep '\"4\"'"
|
||||||
|
)
|
||||||
|
'';
|
||||||
|
}
|
||||||
|
|
|
@ -1,93 +1,91 @@
|
||||||
import ../make-test-python.nix (
|
{ pkgs, ... }:
|
||||||
{ lib, pkgs, ... }:
|
|
||||||
|
|
||||||
{
|
{
|
||||||
name = "prometheus-pair";
|
name = "prometheus-pair";
|
||||||
|
|
||||||
nodes = {
|
nodes = {
|
||||||
prometheus1 =
|
prometheus1 =
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
environment.systemPackages = [ pkgs.jq ];
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
|
|
||||||
services.prometheus = {
|
services.prometheus = {
|
||||||
enable = true;
|
enable = true;
|
||||||
globalConfig.scrape_interval = "2s";
|
globalConfig.scrape_interval = "2s";
|
||||||
scrapeConfigs = [
|
scrapeConfigs = [
|
||||||
{
|
{
|
||||||
job_name = "prometheus";
|
job_name = "prometheus";
|
||||||
static_configs = [
|
static_configs = [
|
||||||
{
|
{
|
||||||
targets = [
|
targets = [
|
||||||
"prometheus1:${toString config.services.prometheus.port}"
|
"prometheus1:${toString config.services.prometheus.port}"
|
||||||
"prometheus2:${toString config.services.prometheus.port}"
|
"prometheus2:${toString config.services.prometheus.port}"
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
};
|
||||||
|
|
||||||
prometheus2 =
|
prometheus2 =
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
environment.systemPackages = [ pkgs.jq ];
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
|
|
||||||
services.prometheus = {
|
services.prometheus = {
|
||||||
enable = true;
|
enable = true;
|
||||||
globalConfig.scrape_interval = "2s";
|
globalConfig.scrape_interval = "2s";
|
||||||
scrapeConfigs = [
|
scrapeConfigs = [
|
||||||
{
|
{
|
||||||
job_name = "prometheus";
|
job_name = "prometheus";
|
||||||
static_configs = [
|
static_configs = [
|
||||||
{
|
{
|
||||||
targets = [
|
targets = [
|
||||||
"prometheus1:${toString config.services.prometheus.port}"
|
"prometheus1:${toString config.services.prometheus.port}"
|
||||||
"prometheus2:${toString config.services.prometheus.port}"
|
"prometheus2:${toString config.services.prometheus.port}"
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
}
|
}
|
||||||
];
|
];
|
||||||
};
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
};
|
||||||
|
|
||||||
testScript = ''
|
testScript = ''
|
||||||
for machine in prometheus1, prometheus2:
|
for machine in prometheus1, prometheus2:
|
||||||
machine.wait_for_unit("prometheus")
|
machine.wait_for_unit("prometheus")
|
||||||
machine.wait_for_open_port(9090)
|
machine.wait_for_open_port(9090)
|
||||||
machine.wait_until_succeeds("journalctl -o cat -u prometheus.service | grep 'version=${pkgs.prometheus.version}'")
|
machine.wait_until_succeeds("journalctl -o cat -u prometheus.service | grep 'version=${pkgs.prometheus.version}'")
|
||||||
machine.wait_until_succeeds("curl -sSf http://localhost:9090/-/healthy")
|
machine.wait_until_succeeds("curl -sSf http://localhost:9090/-/healthy")
|
||||||
|
|
||||||
# Prometheii ready - run some queries
|
# Prometheii ready - run some queries
|
||||||
for machine in prometheus1, prometheus2:
|
for machine in prometheus1, prometheus2:
|
||||||
machine.wait_until_succeeds(
|
machine.wait_until_succeeds(
|
||||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=prometheus_build_info\{instance=\"prometheus1:9090\",version=\"${pkgs.prometheus.version}\"\}' | "
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=prometheus_build_info\{instance=\"prometheus1:9090\",version=\"${pkgs.prometheus.version}\"\}' | "
|
||||||
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||||
)
|
)
|
||||||
|
|
||||||
machine.wait_until_succeeds(
|
machine.wait_until_succeeds(
|
||||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=prometheus_build_info\{instance=\"prometheus1:9090\"\}' | "
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=prometheus_build_info\{instance=\"prometheus1:9090\"\}' | "
|
||||||
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||||
)
|
)
|
||||||
|
|
||||||
machine.wait_until_succeeds(
|
machine.wait_until_succeeds(
|
||||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(prometheus_build_info)%20by%20(version)' | "
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(prometheus_build_info)%20by%20(version)' | "
|
||||||
+ "jq '.data.result[0].metric.version' | grep '\"${pkgs.prometheus.version}\"'"
|
+ "jq '.data.result[0].metric.version' | grep '\"${pkgs.prometheus.version}\"'"
|
||||||
)
|
)
|
||||||
|
|
||||||
machine.wait_until_succeeds(
|
machine.wait_until_succeeds(
|
||||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(prometheus_build_info)%20by%20(version)' | "
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(prometheus_build_info)%20by%20(version)' | "
|
||||||
+ "jq '.data.result[0].value[1]' | grep '\"2\"'"
|
+ "jq '.data.result[0].value[1]' | grep '\"2\"'"
|
||||||
)
|
)
|
||||||
|
|
||||||
prometheus1.log(prometheus1.succeed("systemd-analyze security prometheus.service | grep -v '✓'"))
|
prometheus1.log(prometheus1.succeed("systemd-analyze security prometheus.service | grep -v '✓'"))
|
||||||
'';
|
'';
|
||||||
}
|
}
|
||||||
)
|
|
||||||
|
|
|
@ -1,102 +1,91 @@
|
||||||
import ../make-test-python.nix (
|
{ pkgs, ... }:
|
||||||
{ lib, pkgs, ... }:
|
|
||||||
|
|
||||||
{
|
{
|
||||||
name = "prometheus-pushgateway";
|
name = "prometheus-pushgateway";
|
||||||
|
|
||||||
nodes = {
|
nodes = {
|
||||||
prometheus =
|
prometheus =
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
environment.systemPackages = [ pkgs.jq ];
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
|
|
||||||
services.prometheus = {
|
services.prometheus = {
|
||||||
enable = true;
|
enable = true;
|
||||||
globalConfig.scrape_interval = "2s";
|
globalConfig.scrape_interval = "2s";
|
||||||
|
|
||||||
scrapeConfigs = [
|
scrapeConfigs = [
|
||||||
{
|
{
|
||||||
job_name = "pushgateway";
|
job_name = "pushgateway";
|
||||||
static_configs = [
|
static_configs = [ { targets = [ "pushgateway:9091" ]; } ];
|
||||||
{
|
}
|
||||||
targets = [
|
];
|
||||||
"pushgateway:9091"
|
|
||||||
];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
};
|
||||||
|
|
||||||
pushgateway =
|
pushgateway =
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
networking.firewall.allowedTCPPorts = [ 9091 ];
|
networking.firewall.allowedTCPPorts = [ 9091 ];
|
||||||
|
|
||||||
services.prometheus.pushgateway = {
|
services.prometheus.pushgateway = {
|
||||||
enable = true;
|
enable = true;
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
};
|
||||||
|
|
||||||
client =
|
client = { config, pkgs, ... }: { };
|
||||||
{ config, pkgs, ... }:
|
};
|
||||||
{
|
|
||||||
};
|
|
||||||
};
|
|
||||||
|
|
||||||
testScript = ''
|
testScript = ''
|
||||||
pushgateway.wait_for_unit("pushgateway")
|
pushgateway.wait_for_unit("pushgateway")
|
||||||
pushgateway.wait_for_open_port(9091)
|
pushgateway.wait_for_open_port(9091)
|
||||||
pushgateway.wait_until_succeeds("curl -s http://127.0.0.1:9091/-/ready")
|
pushgateway.wait_until_succeeds("curl -s http://127.0.0.1:9091/-/ready")
|
||||||
pushgateway.wait_until_succeeds("journalctl -o cat -u pushgateway.service | grep 'version=${pkgs.prometheus-pushgateway.version}'")
|
pushgateway.wait_until_succeeds("journalctl -o cat -u pushgateway.service | grep 'version=${pkgs.prometheus-pushgateway.version}'")
|
||||||
|
|
||||||
prometheus.wait_for_unit("prometheus")
|
prometheus.wait_for_unit("prometheus")
|
||||||
prometheus.wait_for_open_port(9090)
|
prometheus.wait_for_open_port(9090)
|
||||||
|
|
||||||
prometheus.wait_until_succeeds(
|
prometheus.wait_until_succeeds(
|
||||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"pushgateway\"\})' | "
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=count(up\{job=\"pushgateway\"\})' | "
|
||||||
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||||
)
|
)
|
||||||
|
|
||||||
prometheus.wait_until_succeeds(
|
prometheus.wait_until_succeeds(
|
||||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(pushgateway_build_info)%20by%20(version)' | "
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=sum(pushgateway_build_info)%20by%20(version)' | "
|
||||||
+ "jq '.data.result[0].metric.version' | grep '\"${pkgs.prometheus-pushgateway.version}\"'"
|
+ "jq '.data.result[0].metric.version' | grep '\"${pkgs.prometheus-pushgateway.version}\"'"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Add a metric and check in Prometheus
|
# Add a metric and check in Prometheus
|
||||||
client.wait_until_succeeds(
|
client.wait_until_succeeds(
|
||||||
"echo 'some_metric 3.14' | curl --data-binary @- http://pushgateway:9091/metrics/job/some_job"
|
"echo 'some_metric 3.14' | curl --data-binary @- http://pushgateway:9091/metrics/job/some_job"
|
||||||
)
|
)
|
||||||
|
|
||||||
prometheus.wait_until_succeeds(
|
prometheus.wait_until_succeeds(
|
||||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=some_metric' | "
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=some_metric' | "
|
||||||
+ "jq '.data.result[0].value[1]' | grep '\"3.14\"'"
|
+ "jq '.data.result[0].value[1]' | grep '\"3.14\"'"
|
||||||
)
|
)
|
||||||
|
|
||||||
prometheus.wait_until_succeeds(
|
prometheus.wait_until_succeeds(
|
||||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=absent(some_metric)' | "
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=absent(some_metric)' | "
|
||||||
+ "jq '.data.result[0].value[1]' | grep 'null'"
|
+ "jq '.data.result[0].value[1]' | grep 'null'"
|
||||||
)
|
)
|
||||||
|
|
||||||
# Delete the metric, check not in Prometheus
|
# Delete the metric, check not in Prometheus
|
||||||
client.wait_until_succeeds(
|
client.wait_until_succeeds(
|
||||||
"curl -X DELETE http://pushgateway:9091/metrics/job/some_job"
|
"curl -X DELETE http://pushgateway:9091/metrics/job/some_job"
|
||||||
)
|
)
|
||||||
|
|
||||||
prometheus.wait_until_fails(
|
prometheus.wait_until_fails(
|
||||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=some_metric' | "
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=some_metric' | "
|
||||||
+ "jq '.data.result[0].value[1]' | grep '\"3.14\"'"
|
+ "jq '.data.result[0].value[1]' | grep '\"3.14\"'"
|
||||||
)
|
)
|
||||||
|
|
||||||
prometheus.wait_until_succeeds(
|
prometheus.wait_until_succeeds(
|
||||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=absent(some_metric)' | "
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=absent(some_metric)' | "
|
||||||
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||||
)
|
)
|
||||||
|
|
||||||
pushgateway.log(pushgateway.succeed("systemd-analyze security pushgateway.service | grep -v '✓'"))
|
pushgateway.log(pushgateway.succeed("systemd-analyze security pushgateway.service | grep -v '✓'"))
|
||||||
'';
|
'';
|
||||||
}
|
}
|
||||||
)
|
|
||||||
|
|
|
@ -1,81 +1,69 @@
|
||||||
import ../make-test-python.nix (
|
{
|
||||||
{ lib, pkgs, ... }:
|
name = "prometheus-remote-write";
|
||||||
|
|
||||||
{
|
nodes = {
|
||||||
name = "prometheus-remote-write";
|
receiver =
|
||||||
|
{ config, pkgs, ... }:
|
||||||
|
{
|
||||||
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
nodes = {
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
receiver =
|
|
||||||
{ config, pkgs, ... }:
|
|
||||||
{
|
|
||||||
environment.systemPackages = [ pkgs.jq ];
|
|
||||||
|
|
||||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
services.prometheus = {
|
||||||
|
enable = true;
|
||||||
|
globalConfig.scrape_interval = "2s";
|
||||||
|
|
||||||
services.prometheus = {
|
extraFlags = [ "--web.enable-remote-write-receiver" ];
|
||||||
enable = true;
|
|
||||||
globalConfig.scrape_interval = "2s";
|
|
||||||
|
|
||||||
extraFlags = [ "--web.enable-remote-write-receiver" ];
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
};
|
||||||
|
|
||||||
prometheus =
|
prometheus =
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
environment.systemPackages = [ pkgs.jq ];
|
environment.systemPackages = [ pkgs.jq ];
|
||||||
|
|
||||||
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
networking.firewall.allowedTCPPorts = [ config.services.prometheus.port ];
|
||||||
|
|
||||||
services.prometheus = {
|
services.prometheus = {
|
||||||
enable = true;
|
enable = true;
|
||||||
globalConfig.scrape_interval = "2s";
|
globalConfig.scrape_interval = "2s";
|
||||||
|
|
||||||
remoteWrite = [
|
remoteWrite = [ { url = "http://receiver:9090/api/v1/write"; } ];
|
||||||
{
|
|
||||||
url = "http://receiver:9090/api/v1/write";
|
|
||||||
}
|
|
||||||
];
|
|
||||||
|
|
||||||
scrapeConfigs = [
|
scrapeConfigs = [
|
||||||
{
|
{
|
||||||
job_name = "node";
|
job_name = "node";
|
||||||
static_configs = [
|
static_configs = [
|
||||||
{
|
{ targets = [ "node:${toString config.services.prometheus.exporters.node.port}" ]; }
|
||||||
targets = [
|
];
|
||||||
"node:${toString config.services.prometheus.exporters.node.port}"
|
}
|
||||||
];
|
];
|
||||||
}
|
|
||||||
];
|
|
||||||
}
|
|
||||||
];
|
|
||||||
};
|
|
||||||
};
|
};
|
||||||
|
};
|
||||||
|
|
||||||
node =
|
node =
|
||||||
{ config, pkgs, ... }:
|
{ config, pkgs, ... }:
|
||||||
{
|
{
|
||||||
services.prometheus.exporters.node = {
|
services.prometheus.exporters.node = {
|
||||||
enable = true;
|
enable = true;
|
||||||
openFirewall = true;
|
openFirewall = true;
|
||||||
};
|
|
||||||
};
|
};
|
||||||
};
|
};
|
||||||
|
};
|
||||||
|
|
||||||
testScript = ''
|
testScript = ''
|
||||||
node.wait_for_unit("prometheus-node-exporter")
|
node.wait_for_unit("prometheus-node-exporter")
|
||||||
node.wait_for_open_port(9100)
|
node.wait_for_open_port(9100)
|
||||||
|
|
||||||
for machine in prometheus, receiver:
|
for machine in prometheus, receiver:
|
||||||
machine.wait_for_unit("prometheus")
|
machine.wait_for_unit("prometheus")
|
||||||
machine.wait_for_open_port(9090)
|
machine.wait_for_open_port(9090)
|
||||||
|
|
||||||
# Verify both servers got the same data from the exporter
|
# Verify both servers got the same data from the exporter
|
||||||
for machine in prometheus, receiver:
|
for machine in prometheus, receiver:
|
||||||
machine.wait_until_succeeds(
|
machine.wait_until_succeeds(
|
||||||
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=node_exporter_build_info\{instance=\"node:9100\"\}' | "
|
"curl -sf 'http://127.0.0.1:9090/api/v1/query?query=node_exporter_build_info\{instance=\"node:9100\"\}' | "
|
||||||
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
+ "jq '.data.result[0].value[1]' | grep '\"1\"'"
|
||||||
)
|
)
|
||||||
'';
|
'';
|
||||||
}
|
}
|
||||||
)
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue