First Commit

This commit is contained in:
CN-JS-HuiBai
2026-04-04 15:13:32 +08:00
commit e69424dab2
14 changed files with 3927 additions and 0 deletions

14
server/db.js Normal file
View File

@@ -0,0 +1,14 @@
const mysql = require('mysql2/promise');
const pool = mysql.createPool({
host: process.env.MYSQL_HOST || 'localhost',
port: parseInt(process.env.MYSQL_PORT) || 3306,
user: process.env.MYSQL_USER || 'root',
password: process.env.MYSQL_PASSWORD || '',
database: process.env.MYSQL_DATABASE || 'display_wall',
waitForConnections: true,
connectionLimit: 10,
queueLimit: 0
});
module.exports = pool;

246
server/index.js Normal file
View File

@@ -0,0 +1,246 @@
require('dotenv').config();
const express = require('express');
const cors = require('cors');
const path = require('path');
const db = require('./db');
const prometheusService = require('./prometheus-service');
const app = express();
const PORT = process.env.PORT || 3000;
app.use(cors());
app.use(express.json());
app.use(express.static(path.join(__dirname, '..', 'public')));
// ==================== Prometheus Source CRUD ====================
// Get all Prometheus sources
app.get('/api/sources', async (req, res) => {
try {
const [rows] = await db.query('SELECT * FROM prometheus_sources ORDER BY created_at DESC');
// Test connectivity for each source
const sourcesWithStatus = await Promise.all(rows.map(async (source) => {
try {
const response = await prometheusService.testConnection(source.url);
return { ...source, status: 'online', version: response };
} catch (e) {
return { ...source, status: 'offline', version: null };
}
}));
res.json(sourcesWithStatus);
} catch (err) {
console.error('Error fetching sources:', err);
res.status(500).json({ error: 'Failed to fetch sources' });
}
});
// Add a new Prometheus source
app.post('/api/sources', async (req, res) => {
const { name, url, description } = req.body;
if (!name || !url) {
return res.status(400).json({ error: 'Name and URL are required' });
}
try {
const [result] = await db.query(
'INSERT INTO prometheus_sources (name, url, description) VALUES (?, ?, ?)',
[name, url, description || '']
);
const [rows] = await db.query('SELECT * FROM prometheus_sources WHERE id = ?', [result.insertId]);
res.status(201).json(rows[0]);
} catch (err) {
console.error('Error adding source:', err);
res.status(500).json({ error: 'Failed to add source' });
}
});
// Update a Prometheus source
app.put('/api/sources/:id', async (req, res) => {
const { name, url, description } = req.body;
try {
await db.query(
'UPDATE prometheus_sources SET name = ?, url = ?, description = ? WHERE id = ?',
[name, url, description || '', req.params.id]
);
const [rows] = await db.query('SELECT * FROM prometheus_sources WHERE id = ?', [req.params.id]);
res.json(rows[0]);
} catch (err) {
console.error('Error updating source:', err);
res.status(500).json({ error: 'Failed to update source' });
}
});
// Delete a Prometheus source
app.delete('/api/sources/:id', async (req, res) => {
try {
await db.query('DELETE FROM prometheus_sources WHERE id = ?', [req.params.id]);
res.json({ message: 'Source deleted' });
} catch (err) {
console.error('Error deleting source:', err);
res.status(500).json({ error: 'Failed to delete source' });
}
});
// Test connection to a Prometheus source
app.post('/api/sources/test', async (req, res) => {
const { url } = req.body;
try {
const version = await prometheusService.testConnection(url);
res.json({ status: 'ok', version });
} catch (err) {
res.status(400).json({ status: 'error', message: err.message });
}
});
// ==================== Metrics Aggregation ====================
// Get all aggregated metrics from all Prometheus sources
app.get('/api/metrics/overview', async (req, res) => {
try {
const [sources] = await db.query('SELECT * FROM prometheus_sources');
if (sources.length === 0) {
return res.json({
totalServers: 0,
cpu: { used: 0, total: 0, percent: 0 },
memory: { used: 0, total: 0, percent: 0 },
disk: { used: 0, total: 0, percent: 0 },
network: { totalBandwidth: 0, rx: 0, tx: 0 },
traffic24h: { rx: 0, tx: 0, total: 0 },
servers: []
});
}
const allMetrics = await Promise.all(sources.map(source =>
prometheusService.getOverviewMetrics(source.url, source.name).catch(err => {
console.error(`Error fetching metrics from ${source.name}:`, err.message);
return null;
})
));
const validMetrics = allMetrics.filter(m => m !== null);
// Aggregate across all sources
let totalServers = 0;
let cpuUsed = 0, cpuTotal = 0;
let memUsed = 0, memTotal = 0;
let diskUsed = 0, diskTotal = 0;
let netRx = 0, netTx = 0;
let traffic24hRx = 0, traffic24hTx = 0;
let allServers = [];
for (const m of validMetrics) {
totalServers += m.totalServers;
cpuUsed += m.cpu.used;
cpuTotal += m.cpu.total;
memUsed += m.memory.used;
memTotal += m.memory.total;
diskUsed += m.disk.used;
diskTotal += m.disk.total;
netRx += m.network.rx;
netTx += m.network.tx;
traffic24hRx += m.traffic24h.rx;
traffic24hTx += m.traffic24h.tx;
allServers = allServers.concat(m.servers);
}
res.json({
totalServers,
cpu: {
used: cpuUsed,
total: cpuTotal,
percent: cpuTotal > 0 ? (cpuUsed / cpuTotal * 100) : 0
},
memory: {
used: memUsed,
total: memTotal,
percent: memTotal > 0 ? (memUsed / memTotal * 100) : 0
},
disk: {
used: diskUsed,
total: diskTotal,
percent: diskTotal > 0 ? (diskUsed / diskTotal * 100) : 0
},
network: {
totalBandwidth: netRx + netTx,
rx: netRx,
tx: netTx
},
traffic24h: {
rx: traffic24hRx,
tx: traffic24hTx,
total: traffic24hRx + traffic24hTx
},
servers: allServers
});
} catch (err) {
console.error('Error fetching overview metrics:', err);
res.status(500).json({ error: 'Failed to fetch metrics' });
}
});
// Get network traffic history (past 24h in intervals)
app.get('/api/metrics/network-history', async (req, res) => {
try {
const [sources] = await db.query('SELECT * FROM prometheus_sources');
if (sources.length === 0) {
return res.json({ timestamps: [], rx: [], tx: [] });
}
const allHistories = await Promise.all(sources.map(source =>
prometheusService.getNetworkHistory(source.url).catch(err => {
console.error(`Error fetching network history from ${source.name}:`, err.message);
return null;
})
));
const validHistories = allHistories.filter(h => h !== null);
if (validHistories.length === 0) {
return res.json({ timestamps: [], rx: [], tx: [] });
}
// Merge all histories by timestamp
const merged = prometheusService.mergeNetworkHistories(validHistories);
res.json(merged);
} catch (err) {
console.error('Error fetching network history:', err);
res.status(500).json({ error: 'Failed to fetch network history' });
}
});
// Get CPU usage history for sparklines
app.get('/api/metrics/cpu-history', async (req, res) => {
try {
const [sources] = await db.query('SELECT * FROM prometheus_sources');
if (sources.length === 0) {
return res.json({ timestamps: [], values: [] });
}
const allHistories = await Promise.all(sources.map(source =>
prometheusService.getCpuHistory(source.url).catch(err => {
console.error(`Error fetching CPU history from ${source.name}:`, err.message);
return null;
})
));
const validHistories = allHistories.filter(h => h !== null);
if (validHistories.length === 0) {
return res.json({ timestamps: [], values: [] });
}
const merged = prometheusService.mergeCpuHistories(validHistories);
res.json(merged);
} catch (err) {
console.error('Error fetching CPU history:', err);
res.status(500).json({ error: 'Failed to fetch CPU history' });
}
});
// SPA fallback
app.get('*', (req, res) => {
res.sendFile(path.join(__dirname, '..', 'public', 'index.html'));
});
app.listen(PORT, () => {
console.log(`\n 🚀 Data Visualization Display Wall`);
console.log(` 📊 Server running at http://localhost:${PORT}`);
console.log(` ⚙️ Configure Prometheus sources at http://localhost:${PORT}/settings\n`);
});

47
server/init-db.js Normal file
View File

@@ -0,0 +1,47 @@
/**
* Database Initialization Script
* Run: npm run init-db
* Creates the required MySQL database and tables.
*/
require('dotenv').config();
const mysql = require('mysql2/promise');
async function initDatabase() {
const connection = await mysql.createConnection({
host: process.env.MYSQL_HOST || 'localhost',
port: parseInt(process.env.MYSQL_PORT) || 3306,
user: process.env.MYSQL_USER || 'root',
password: process.env.MYSQL_PASSWORD || ''
});
const dbName = process.env.MYSQL_DATABASE || 'display_wall';
console.log('🔧 Initializing database...\n');
// Create database
await connection.query(`CREATE DATABASE IF NOT EXISTS \`${dbName}\` CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci`);
console.log(` ✅ Database "${dbName}" ready`);
await connection.query(`USE \`${dbName}\``);
// Create prometheus_sources table
await connection.query(`
CREATE TABLE IF NOT EXISTS prometheus_sources (
id INT AUTO_INCREMENT PRIMARY KEY,
name VARCHAR(255) NOT NULL,
url VARCHAR(500) NOT NULL,
description TEXT DEFAULT '',
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci
`);
console.log(' ✅ Table "prometheus_sources" ready');
console.log('\n🎉 Database initialization complete!\n');
await connection.end();
}
initDatabase().catch(err => {
console.error('❌ Database initialization failed:', err.message);
process.exit(1);
});

View File

@@ -0,0 +1,320 @@
const axios = require('axios');
const QUERY_TIMEOUT = 10000;
/**
* Create an axios instance for a given Prometheus URL
*/
function createClient(baseUrl) {
return axios.create({
baseURL: baseUrl.replace(/\/+$/, ''),
timeout: QUERY_TIMEOUT
});
}
/**
* Test Prometheus connection
*/
async function testConnection(url) {
const client = createClient(url);
const res = await client.get('/api/v1/status/buildinfo');
return res.data?.data?.version || 'unknown';
}
/**
* Execute a Prometheus instant query
*/
async function query(url, expr) {
const client = createClient(url);
const res = await client.get('/api/v1/query', { params: { query: expr } });
if (res.data.status !== 'success') {
throw new Error(`Prometheus query failed: ${res.data.error || 'unknown error'}`);
}
return res.data.data.result;
}
/**
* Execute a Prometheus range query
*/
async function queryRange(url, expr, start, end, step) {
const client = createClient(url);
const res = await client.get('/api/v1/query_range', {
params: { query: expr, start, end, step }
});
if (res.data.status !== 'success') {
throw new Error(`Prometheus range query failed: ${res.data.error || 'unknown error'}`);
}
return res.data.data.result;
}
/**
* Get overview metrics from a single Prometheus source
*/
async function getOverviewMetrics(url, sourceName) {
// Run all queries in parallel
const [
cpuResult,
cpuCountResult,
memTotalResult,
memAvailResult,
diskTotalResult,
diskFreeResult,
netRxResult,
netTxResult,
traffic24hRxResult,
traffic24hTxResult,
upResult
] = await Promise.all([
// CPU usage per instance: 1 - avg idle
query(url, '100 - (avg by (instance) (rate(node_cpu_seconds_total{mode="idle"}[5m])) * 100)').catch(() => []),
// CPU count per instance
query(url, 'count by (instance) (node_cpu_seconds_total{mode="idle"})').catch(() => []),
// Memory total per instance
query(url, 'node_memory_MemTotal_bytes').catch(() => []),
// Memory available per instance
query(url, 'node_memory_MemAvailable_bytes').catch(() => []),
// Disk total per instance (root filesystem)
query(url, 'sum by (instance) (node_filesystem_size_bytes{mountpoint="/",fstype!="tmpfs"})').catch(() => []),
// Disk free per instance (root filesystem)
query(url, 'sum by (instance) (node_filesystem_free_bytes{mountpoint="/",fstype!="tmpfs"})').catch(() => []),
// Network receive rate (bytes/sec)
query(url, 'sum by (instance) (rate(node_network_receive_bytes_total{device!~"lo|veth.*|docker.*|br-.*"}[5m]))').catch(() => []),
// Network transmit rate (bytes/sec)
query(url, 'sum by (instance) (rate(node_network_transmit_bytes_total{device!~"lo|veth.*|docker.*|br-.*"}[5m]))').catch(() => []),
// Total traffic received in last 24h
query(url, 'sum by (instance) (increase(node_network_receive_bytes_total{device!~"lo|veth.*|docker.*|br-.*"}[24h]))').catch(() => []),
// Total traffic transmitted in last 24h
query(url, 'sum by (instance) (increase(node_network_transmit_bytes_total{device!~"lo|veth.*|docker.*|br-.*"}[24h]))').catch(() => []),
// Up instances
query(url, 'up{job=~".*node.*|.*exporter.*"}').catch(() => [])
]);
// Build per-instance data map
const instances = new Map();
const getOrCreate = (instance) => {
if (!instances.has(instance)) {
instances.set(instance, {
instance,
source: sourceName,
cpuPercent: 0,
cpuCores: 0,
memTotal: 0,
memUsed: 0,
diskTotal: 0,
diskUsed: 0,
netRx: 0,
netTx: 0,
up: false
});
}
return instances.get(instance);
};
// Parse UP status
for (const r of upResult) {
const inst = getOrCreate(r.metric.instance);
inst.up = parseFloat(r.value[1]) === 1;
}
// Parse CPU usage
for (const r of cpuResult) {
const inst = getOrCreate(r.metric.instance);
inst.cpuPercent = parseFloat(r.value[1]) || 0;
}
// Parse CPU count
for (const r of cpuCountResult) {
const inst = getOrCreate(r.metric.instance);
inst.cpuCores = parseFloat(r.value[1]) || 0;
}
// Parse memory
for (const r of memTotalResult) {
const inst = getOrCreate(r.metric.instance);
inst.memTotal = parseFloat(r.value[1]) || 0;
}
for (const r of memAvailResult) {
const inst = getOrCreate(r.metric.instance);
inst.memUsed = inst.memTotal - (parseFloat(r.value[1]) || 0);
}
// Parse disk
for (const r of diskTotalResult) {
const inst = getOrCreate(r.metric.instance);
inst.diskTotal = parseFloat(r.value[1]) || 0;
}
for (const r of diskFreeResult) {
const inst = getOrCreate(r.metric.instance);
inst.diskUsed = inst.diskTotal - (parseFloat(r.value[1]) || 0);
}
// Parse network rates
for (const r of netRxResult) {
const inst = getOrCreate(r.metric.instance);
inst.netRx = parseFloat(r.value[1]) || 0;
}
for (const r of netTxResult) {
const inst = getOrCreate(r.metric.instance);
inst.netTx = parseFloat(r.value[1]) || 0;
}
// Aggregate
let totalCpuUsed = 0, totalCpuCores = 0;
let totalMemUsed = 0, totalMemTotal = 0;
let totalDiskUsed = 0, totalDiskTotal = 0;
let totalNetRx = 0, totalNetTx = 0;
let totalTraffic24hRx = 0, totalTraffic24hTx = 0;
for (const inst of instances.values()) {
totalCpuUsed += (inst.cpuPercent / 100) * inst.cpuCores;
totalCpuCores += inst.cpuCores;
totalMemUsed += inst.memUsed;
totalMemTotal += inst.memTotal;
totalDiskUsed += inst.diskUsed;
totalDiskTotal += inst.diskTotal;
totalNetRx += inst.netRx;
totalNetTx += inst.netTx;
}
// Parse 24h traffic
for (const r of traffic24hRxResult) {
totalTraffic24hRx += parseFloat(r.value[1]) || 0;
}
for (const r of traffic24hTxResult) {
totalTraffic24hTx += parseFloat(r.value[1]) || 0;
}
return {
totalServers: instances.size,
cpu: {
used: totalCpuUsed,
total: totalCpuCores,
percent: totalCpuCores > 0 ? (totalCpuUsed / totalCpuCores * 100) : 0
},
memory: {
used: totalMemUsed,
total: totalMemTotal,
percent: totalMemTotal > 0 ? (totalMemUsed / totalMemTotal * 100) : 0
},
disk: {
used: totalDiskUsed,
total: totalDiskTotal,
percent: totalDiskTotal > 0 ? (totalDiskUsed / totalDiskTotal * 100) : 0
},
network: {
rx: totalNetRx,
tx: totalNetTx
},
traffic24h: {
rx: totalTraffic24hRx,
tx: totalTraffic24hTx
},
servers: Array.from(instances.values())
};
}
/**
* Get network traffic history (past 24h, 15-min intervals)
*/
async function getNetworkHistory(url) {
const now = Math.floor(Date.now() / 1000);
const start = now - 86400; // 24h ago
const step = 900; // 15 minutes
const [rxResult, txResult] = await Promise.all([
queryRange(url,
'sum(rate(node_network_receive_bytes_total{device!~"lo|veth.*|docker.*|br-.*"}[5m]))',
start, now, step
).catch(() => []),
queryRange(url,
'sum(rate(node_network_transmit_bytes_total{device!~"lo|veth.*|docker.*|br-.*"}[5m]))',
start, now, step
).catch(() => [])
]);
// Extract values - each result[0].values = [[timestamp, value], ...]
const rxValues = rxResult.length > 0 ? rxResult[0].values : [];
const txValues = txResult.length > 0 ? txResult[0].values : [];
return { rxValues, txValues };
}
/**
* Merge network histories from multiple sources
*/
function mergeNetworkHistories(histories) {
const timestampMap = new Map();
for (const history of histories) {
for (const [ts, val] of history.rxValues) {
const existing = timestampMap.get(ts) || { rx: 0, tx: 0 };
existing.rx += parseFloat(val) || 0;
timestampMap.set(ts, existing);
}
for (const [ts, val] of history.txValues) {
const existing = timestampMap.get(ts) || { rx: 0, tx: 0 };
existing.tx += parseFloat(val) || 0;
timestampMap.set(ts, existing);
}
}
const sorted = [...timestampMap.entries()].sort((a, b) => a[0] - b[0]);
return {
timestamps: sorted.map(([ts]) => ts * 1000), // ms for JS
rx: sorted.map(([, v]) => v.rx),
tx: sorted.map(([, v]) => v.tx)
};
}
/**
* Get CPU usage history (past 1h, 1-min intervals)
*/
async function getCpuHistory(url) {
const now = Math.floor(Date.now() / 1000);
const start = now - 3600; // 1h ago
const step = 60; // 1 minute
const result = await queryRange(url,
'100 - (avg(rate(node_cpu_seconds_total{mode="idle"}[5m])) * 100)',
start, now, step
).catch(() => []);
const values = result.length > 0 ? result[0].values : [];
return values; // [[timestamp, value], ...]
}
/**
* Merge CPU histories from multiple sources (average)
*/
function mergeCpuHistories(histories) {
const timestampMap = new Map();
for (const history of histories) {
for (const [ts, val] of history) {
const existing = timestampMap.get(ts) || { sum: 0, count: 0 };
existing.sum += parseFloat(val) || 0;
existing.count += 1;
timestampMap.set(ts, existing);
}
}
const sorted = [...timestampMap.entries()].sort((a, b) => a[0] - b[0]);
return {
timestamps: sorted.map(([ts]) => ts * 1000),
values: sorted.map(([, v]) => v.sum / v.count)
};
}
module.exports = {
testConnection,
query,
queryRange,
getOverviewMetrics,
getNetworkHistory,
mergeNetworkHistories,
getCpuHistory,
mergeCpuHistories
};