Files
PromdataPanel/server/latency-service.js
CN-JS-HuiBai 1ce174bd93 修改逻辑
2026-04-06 01:04:46 +08:00

104 lines
4.0 KiB
JavaScript

const axios = require('axios');
const cache = require('./cache');
const db = require('./db');
const POLL_INTERVAL = 10000; // 10 seconds
async function pollLatency() {
try {
const [routes] = await db.query(`
SELECT r.*, s.url
FROM latency_routes r
JOIN prometheus_sources s ON r.source_id = s.id
WHERE s.type = 'blackbox'
`);
if (routes.length === 0) return;
// Poll each route
await Promise.allSettled(routes.map(async (route) => {
try {
// Blackbox exporter probe URL
// We assume ICMP module for now. If target is a URL, maybe use http_2xx
let module = 'icmp';
let target = route.latency_target;
if (target.startsWith('http://') || target.startsWith('https://')) {
module = 'http_2xx';
}
const probeUrl = `${route.url.replace(/\/+$/, '')}/probe?module=${module}&target=${encodeURIComponent(target)}`;
const startTime = Date.now();
const response = await axios.get(probeUrl, { timeout: 5000 });
// 1. Parse prometheus text format for success and specific metrics
let latency = null;
const lines = response.data.split('\n').map(l => l.trim()).filter(l => l && !l.startsWith('#'));
// Track success but also try to parse latency anyway as a backup
let isProbeSuccess = false;
for (const line of lines) {
if (line.match(/^probe_success(\{.*\})?\s+1/)) {
isProbeSuccess = true;
break;
}
}
// Try a wider set of potential latency metrics
const targetMetrics = [
'probe_icmp_duration_seconds',
'probe_http_duration_seconds',
'probe_dns_lookup_time_seconds',
'probe_duration_seconds'
];
for (const metricName of targetMetrics) {
// Match: metric_name{labels} value
// Regex handles optional labels and scientific notation
const regex = new RegExp(`^${metricName}(?:\\{[^}]*\\})?\\s+([\\d.eE+-]+)`);
for (const line of lines) {
const match = line.match(regex);
if (match) {
const val = parseFloat(match[1]);
if (!isNaN(val)) {
latency = val * 1000; // to ms
break;
}
}
}
if (latency !== null) break;
}
// If probe reported failure but we found a duration, we might still want to show null/error
if (!isProbeSuccess && latency !== null) {
// If probe failed, force null to indicate error/offline on UI
latency = null;
}
// Save to Valkey
await cache.set(`latency:route:${route.id}`, latency, 60);
// console.log(`[Latency] Route ${route.id} (${target}): ${latency.toFixed(2)}ms`);
} catch (err) {
// console.error(`[Latency] Error polling route ${route.id}:`, err.message);
await cache.set(`latency:route:${route.id}`, null, 60);
}
}));
} catch (err) {
console.error('[Latency] Service error:', err.message);
}
}
let intervalId = null;
function start() {
if (intervalId) clearInterval(intervalId);
pollLatency(); // initial run
intervalId = setInterval(pollLatency, POLL_INTERVAL);
console.log('[Latency] Background service started (polling Blackbox Exporter directly)');
}
module.exports = {
start
};