113 lines
4.8 KiB
JavaScript
113 lines
4.8 KiB
JavaScript
import fetch from "node-fetch";
|
|
|
|
const PROMETHEUS_URL = "http://localhost:9090";
|
|
|
|
const SERVER_JOBS = {
|
|
"vrising": "vrising",
|
|
"factorio": "factorio",
|
|
"minecraft": "minecraft",
|
|
"zomboid": "zomboid",
|
|
"palworld": "palworld",
|
|
"terraria": "terraria",
|
|
"openttd": "openttd"
|
|
};
|
|
|
|
export async function queryPrometheus(query) {
|
|
const url = `${PROMETHEUS_URL}/api/v1/query?query=${encodeURIComponent(query)}`;
|
|
const res = await fetch(url);
|
|
const data = await res.json();
|
|
if (data.status !== "success") {
|
|
throw new Error(`Prometheus query failed: ${data.error}`);
|
|
}
|
|
return data.data.result;
|
|
}
|
|
|
|
export async function queryPrometheusRange(query, start, end, step) {
|
|
const url = `${PROMETHEUS_URL}/api/v1/query_range?query=${encodeURIComponent(query)}&start=${start}&end=${end}&step=${step}`;
|
|
const res = await fetch(url);
|
|
const data = await res.json();
|
|
if (data.status !== "success") {
|
|
throw new Error(`Prometheus query failed: ${data.error}`);
|
|
}
|
|
return data.data.result;
|
|
}
|
|
|
|
export async function getServerMetricsHistory(serverId, range = "1h") {
|
|
const job = SERVER_JOBS[serverId];
|
|
if (!job) {
|
|
throw new Error(`Unknown server ID: ${serverId}`);
|
|
}
|
|
|
|
const end = Math.floor(Date.now() / 1000);
|
|
let duration, step;
|
|
switch (range) {
|
|
case "15m": duration = 15 * 60; step = 15; break;
|
|
case "1h": duration = 60 * 60; step = 60; break;
|
|
case "6h": duration = 6 * 60 * 60; step = 300; break;
|
|
case "24h": duration = 24 * 60 * 60; step = 900; break;
|
|
default: duration = 60 * 60; step = 60;
|
|
}
|
|
const start = end - duration;
|
|
|
|
const cpuQuery = `100 - (avg by(instance) (irate(node_cpu_seconds_total{job="${job}",mode="idle"}[5m])) * 100)`;
|
|
const memQuery = `100 * (1 - ((node_memory_MemAvailable_bytes{job="${job}"} or node_memory_MemFree_bytes{job="${job}"}) / node_memory_MemTotal_bytes{job="${job}"}))`;
|
|
const netRxQuery = `sum(irate(node_network_receive_bytes_total{job="${job}",device!~"lo|veth.*|docker.*|br-.*"}[5m]))`;
|
|
const netTxQuery = `sum(irate(node_network_transmit_bytes_total{job="${job}",device!~"lo|veth.*|docker.*|br-.*"}[5m]))`;
|
|
|
|
try {
|
|
const [cpuResult, memResult, netRxResult, netTxResult] = await Promise.all([
|
|
queryPrometheusRange(cpuQuery, start, end, step),
|
|
queryPrometheusRange(memQuery, start, end, step),
|
|
queryPrometheusRange(netRxQuery, start, end, step),
|
|
queryPrometheusRange(netTxQuery, start, end, step)
|
|
]);
|
|
|
|
const cpu = cpuResult[0]?.values?.map(([ts, val]) => ({ timestamp: ts * 1000, value: parseFloat(val) || 0 })) || [];
|
|
const memory = memResult[0]?.values?.map(([ts, val]) => ({ timestamp: ts * 1000, value: parseFloat(val) || 0 })) || [];
|
|
const networkRx = netRxResult[0]?.values?.map(([ts, val]) => ({ timestamp: ts * 1000, value: parseFloat(val) || 0 })) || [];
|
|
const networkTx = netTxResult[0]?.values?.map(([ts, val]) => ({ timestamp: ts * 1000, value: parseFloat(val) || 0 })) || [];
|
|
|
|
return { cpu, memory, networkRx, networkTx };
|
|
} catch (error) {
|
|
console.error("Prometheus query error:", error);
|
|
return { cpu: [], memory: [], networkRx: [], networkTx: [] };
|
|
}
|
|
}
|
|
|
|
export async function getCurrentMetrics(serverId) {
|
|
const job = SERVER_JOBS[serverId];
|
|
if (!job) {
|
|
throw new Error(`Unknown server ID: ${serverId}`);
|
|
}
|
|
|
|
const cpuQuery = `100 - (avg by(instance) (irate(node_cpu_seconds_total{job="${job}",mode="idle"}[5m])) * 100)`;
|
|
const memPercentQuery = `100 * (1 - ((node_memory_MemAvailable_bytes{job="${job}"} or node_memory_MemFree_bytes{job="${job}"}) / node_memory_MemTotal_bytes{job="${job}"}))`;
|
|
const memUsedQuery = `node_memory_MemTotal_bytes{job="${job}"} - (node_memory_MemAvailable_bytes{job="${job}"} or node_memory_MemFree_bytes{job="${job}"})`;
|
|
const memTotalQuery = `node_memory_MemTotal_bytes{job="${job}"}`;
|
|
const uptimeQuery = `node_time_seconds{job="${job}"} - node_boot_time_seconds{job="${job}"}`;
|
|
const cpuCoresQuery = `count(node_cpu_seconds_total{job="${job}",mode="idle"})`;
|
|
|
|
try {
|
|
const [cpuResult, memPercentResult, memUsedResult, memTotalResult, uptimeResult, cpuCoresResult] = await Promise.all([
|
|
queryPrometheus(cpuQuery),
|
|
queryPrometheus(memPercentQuery),
|
|
queryPrometheus(memUsedQuery),
|
|
queryPrometheus(memTotalQuery),
|
|
queryPrometheus(uptimeQuery),
|
|
queryPrometheus(cpuCoresQuery)
|
|
]);
|
|
|
|
return {
|
|
cpu: parseFloat(cpuResult[0]?.value?.[1]) || 0,
|
|
memory: parseFloat(memPercentResult[0]?.value?.[1]) || 0,
|
|
memoryUsed: parseFloat(memUsedResult[0]?.value?.[1]) || 0,
|
|
memoryTotal: parseFloat(memTotalResult[0]?.value?.[1]) || 0,
|
|
uptime: parseFloat(uptimeResult[0]?.value?.[1]) || 0,
|
|
cpuCores: parseInt(cpuCoresResult[0]?.value?.[1]) || 1
|
|
};
|
|
} catch (error) {
|
|
console.error("Prometheus current metrics error:", error);
|
|
return { cpu: 0, memory: 0, memoryUsed: 0, memoryTotal: 0, uptime: 0, cpuCores: 1 };
|
|
}
|
|
}
|