优化数据库查询
This commit is contained in:
@@ -145,6 +145,15 @@ app.post('/api/setup/init', async (req, res) => {
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
|
||||
`);
|
||||
|
||||
await connection.query(`
|
||||
CREATE TABLE IF NOT EXISTS traffic_stats (
|
||||
id INT AUTO_INCREMENT PRIMARY KEY,
|
||||
rx_bytes BIGINT UNSIGNED DEFAULT 0,
|
||||
tx_bytes BIGINT UNSIGNED DEFAULT 0,
|
||||
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
|
||||
`);
|
||||
|
||||
await connection.end();
|
||||
|
||||
// Save to .env
|
||||
@@ -383,6 +392,22 @@ app.get('/api/metrics/overview', async (req, res) => {
|
||||
allServers = allServers.concat(m.servers);
|
||||
}
|
||||
|
||||
// --- 24h Traffic from DB ---
|
||||
try {
|
||||
// Get the oldest record within 24h and the latest overall
|
||||
const [oldest] = await db.query('SELECT rx_bytes, tx_bytes, timestamp FROM traffic_stats WHERE timestamp >= NOW() - INTERVAL 1 DAY ORDER BY timestamp ASC LIMIT 1');
|
||||
const [latest] = await db.query('SELECT rx_bytes, tx_bytes, timestamp FROM traffic_stats ORDER BY timestamp DESC LIMIT 1');
|
||||
|
||||
if (oldest.length > 0 && latest.length > 0) {
|
||||
// Calculate difference. Handle counter resets (though unlikely for aggregated total)
|
||||
traffic24hRx = latest[0].rx_bytes > oldest[0].rx_bytes ? (latest[0].rx_bytes - oldest[0].rx_bytes) : 0;
|
||||
traffic24hTx = latest[0].tx_bytes > oldest[0].tx_bytes ? (latest[0].tx_bytes - oldest[0].tx_bytes) : 0;
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('Error calculating 24h traffic from DB:', err);
|
||||
// Fallback to what we got from Prometheus directly in each source if DB fails
|
||||
}
|
||||
|
||||
res.json({
|
||||
totalServers,
|
||||
cpu: {
|
||||
@@ -480,6 +505,66 @@ app.get('*', (req, res) => {
|
||||
res.sendFile(path.join(__dirname, '..', 'public', 'index.html'));
|
||||
});
|
||||
|
||||
async function recordTrafficStats() {
|
||||
if (!isDbInitialized) return;
|
||||
try {
|
||||
const [sources] = await db.query('SELECT * FROM prometheus_sources');
|
||||
if (sources.length === 0) return;
|
||||
|
||||
let totalRx = 0;
|
||||
let totalTx = 0;
|
||||
|
||||
const results = await Promise.all(sources.map(async source => {
|
||||
try {
|
||||
const [rxRes, txRes] = await Promise.all([
|
||||
prometheusService.query(source.url, 'sum(node_network_receive_bytes_total{device!~"lo|veth.*|docker.*|br-.*"})'),
|
||||
prometheusService.query(source.url, 'sum(node_network_transmit_bytes_total{device!~"lo|veth.*|docker.*|br-.*"})')
|
||||
]);
|
||||
|
||||
const rx = (rxRes.length > 0) ? parseFloat(rxRes[0].value[1]) : 0;
|
||||
const tx = (txRes.length > 0) ? parseFloat(txRes[0].value[1]) : 0;
|
||||
return { rx, tx };
|
||||
} catch (e) {
|
||||
return { rx: 0, tx: 0 };
|
||||
}
|
||||
}));
|
||||
|
||||
for (const r of results) {
|
||||
totalRx += r.rx;
|
||||
totalTx += r.tx;
|
||||
}
|
||||
|
||||
if (totalRx > 0 || totalTx > 0) {
|
||||
await db.query('INSERT INTO traffic_stats (rx_bytes, tx_bytes) VALUES (?, ?)', [Math.round(totalRx), Math.round(totalTx)]);
|
||||
console.log(`[Traffic Recorder] Saved stats: RX=${totalRx}, TX=${totalTx}`);
|
||||
}
|
||||
} catch (err) {
|
||||
console.error('[Traffic Recorder] Error recording stats:', err);
|
||||
}
|
||||
}
|
||||
|
||||
// Check if need to create traffic_stats table if db already initialized
|
||||
async function ensureTrafficTable() {
|
||||
if (!isDbInitialized) return;
|
||||
try {
|
||||
await db.query(`
|
||||
CREATE TABLE IF NOT EXISTS traffic_stats (
|
||||
id INT AUTO_INCREMENT PRIMARY KEY,
|
||||
rx_bytes BIGINT UNSIGNED DEFAULT 0,
|
||||
tx_bytes BIGINT UNSIGNED DEFAULT 0,
|
||||
timestamp TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
|
||||
`);
|
||||
} catch (err) {}
|
||||
}
|
||||
|
||||
ensureTrafficTable();
|
||||
|
||||
// Record traffic every 5 minutes
|
||||
setInterval(recordTrafficStats, 5 * 60 * 1000);
|
||||
// Initial record after a short delay
|
||||
setTimeout(recordTrafficStats, 10000);
|
||||
|
||||
app.listen(PORT, HOST, () => {
|
||||
console.log(`\n 🚀 Data Visualization Display Wall`);
|
||||
console.log(` 📊 Server running at http://${HOST === '0.0.0.0' ? 'localhost' : HOST}:${PORT}`);
|
||||
|
||||
@@ -152,25 +152,25 @@ async function getOverviewMetrics(url, sourceName) {
|
||||
upResult
|
||||
] = await Promise.all([
|
||||
// CPU usage per instance: 1 - avg idle
|
||||
query(url, '100 - (avg by (instance) (rate(node_cpu_seconds_total{mode="idle"}[5m])) * 100)').catch(() => []),
|
||||
query(url, '100 - (avg by (instance, job) (rate(node_cpu_seconds_total{mode="idle"}[5m])) * 100)').catch(() => []),
|
||||
// CPU count per instance
|
||||
query(url, 'count by (instance) (node_cpu_seconds_total{mode="idle"})').catch(() => []),
|
||||
query(url, 'count by (instance, job) (node_cpu_seconds_total{mode="idle"})').catch(() => []),
|
||||
// Memory total per instance
|
||||
query(url, 'node_memory_MemTotal_bytes').catch(() => []),
|
||||
// Memory available per instance
|
||||
query(url, 'node_memory_MemAvailable_bytes').catch(() => []),
|
||||
// Disk total per instance (root filesystem)
|
||||
query(url, 'sum by (instance) (node_filesystem_size_bytes{mountpoint="/",fstype!="tmpfs"})').catch(() => []),
|
||||
query(url, 'sum by (instance, job) (node_filesystem_size_bytes{mountpoint="/",fstype!="tmpfs"})').catch(() => []),
|
||||
// Disk free per instance (root filesystem)
|
||||
query(url, 'sum by (instance) (node_filesystem_free_bytes{mountpoint="/",fstype!="tmpfs"})').catch(() => []),
|
||||
query(url, 'sum by (instance, job) (node_filesystem_free_bytes{mountpoint="/",fstype!="tmpfs"})').catch(() => []),
|
||||
// Network receive rate (bytes/sec)
|
||||
query(url, 'sum by (instance) (rate(node_network_receive_bytes_total{device!~"lo|veth.*|docker.*|br-.*"}[5m]))').catch(() => []),
|
||||
query(url, 'sum by (instance, job) (rate(node_network_receive_bytes_total{device!~"lo|veth.*|docker.*|br-.*"}[5m]))').catch(() => []),
|
||||
// Network transmit rate (bytes/sec)
|
||||
query(url, 'sum by (instance) (rate(node_network_transmit_bytes_total{device!~"lo|veth.*|docker.*|br-.*"}[5m]))').catch(() => []),
|
||||
query(url, 'sum by (instance, job) (rate(node_network_transmit_bytes_total{device!~"lo|veth.*|docker.*|br-.*"}[5m]))').catch(() => []),
|
||||
// Total traffic received in last 24h
|
||||
query(url, 'sum by (instance) (increase(node_network_receive_bytes_total{device!~"lo|veth.*|docker.*|br-.*"}[24h]))').catch(() => []),
|
||||
query(url, 'sum by (instance, job) (increase(node_network_receive_bytes_total{device!~"lo|veth.*|docker.*|br-.*"}[24h]))').catch(() => []),
|
||||
// Total traffic transmitted in last 24h
|
||||
query(url, 'sum by (instance) (increase(node_network_transmit_bytes_total{device!~"lo|veth.*|docker.*|br-.*"}[24h]))').catch(() => []),
|
||||
query(url, 'sum by (instance, job) (increase(node_network_transmit_bytes_total{device!~"lo|veth.*|docker.*|br-.*"}[24h]))').catch(() => []),
|
||||
// Up instances
|
||||
query(url, 'up{job=~".*node.*|.*exporter.*"}').catch(() => [])
|
||||
]);
|
||||
@@ -180,7 +180,6 @@ async function getOverviewMetrics(url, sourceName) {
|
||||
|
||||
const getOrCreate = (metric) => {
|
||||
const key = metric.instance;
|
||||
|
||||
if (!instances.has(key)) {
|
||||
instances.set(key, {
|
||||
instance: key,
|
||||
@@ -197,7 +196,12 @@ async function getOverviewMetrics(url, sourceName) {
|
||||
up: false
|
||||
});
|
||||
}
|
||||
return instances.get(key);
|
||||
const inst = instances.get(key);
|
||||
// If job was Unknown but we now have a job name, update it
|
||||
if (inst.job === 'Unknown' && metric.job) {
|
||||
inst.job = metric.job;
|
||||
}
|
||||
return inst;
|
||||
};
|
||||
|
||||
// Parse UP status
|
||||
|
||||
Reference in New Issue
Block a user