refactor(stats-daemon): split into cpu/mem/temp/gpu modules, add gpu support

This commit is contained in:
Damocles 2026-04-17 11:11:11 +02:00
parent 1edd14cf30
commit 76ccc99e17
5 changed files with 532 additions and 414 deletions

128
stats-daemon/src/gpu.rs Normal file
View file

@ -0,0 +1,128 @@
use std::fs;
use std::io::Write;
pub struct GpuInfo {
pub usage: u32,
pub vram_used_gb: f64,
pub vram_total_gb: f64,
pub temp_c: i32,
pub vendor: &'static str,
}
pub enum GpuBackend {
Amd {
card_path: String,
hwmon_path: Option<String>,
},
Nvidia,
None,
}
pub fn detect_gpu() -> GpuBackend {
// AMD: look for gpu_busy_percent exposed by the amdgpu driver
for i in 0..8 {
let p = format!("/sys/class/drm/card{i}/device/gpu_busy_percent");
if fs::read_to_string(&p).is_ok() {
let card = format!("/sys/class/drm/card{i}/device");
let hwmon = find_amd_hwmon();
return GpuBackend::Amd {
card_path: card,
hwmon_path: hwmon,
};
}
}
// NVIDIA: probe nvidia-smi
let nvidia_ok = std::process::Command::new("nvidia-smi")
.args(["--query-gpu=name", "--format=csv,noheader"])
.output()
.map(|o| o.status.success())
.unwrap_or(false);
if nvidia_ok {
return GpuBackend::Nvidia;
}
GpuBackend::None
}
fn find_amd_hwmon() -> Option<String> {
for i in 0..32 {
let name = format!("/sys/class/hwmon/hwmon{i}/name");
if fs::read_to_string(&name).ok()?.trim() == "amdgpu" {
return Some(format!("/sys/class/hwmon/hwmon{i}"));
}
}
None
}
fn read_amd(card: &str, hwmon: &Option<String>) -> Option<GpuInfo> {
let usage: u32 = fs::read_to_string(format!("{card}/gpu_busy_percent"))
.ok()?
.trim()
.parse()
.ok()?;
let vram_used: u64 = fs::read_to_string(format!("{card}/mem_info_vram_used"))
.ok()?
.trim()
.parse()
.ok()?;
let vram_total: u64 = fs::read_to_string(format!("{card}/mem_info_vram_total"))
.ok()?
.trim()
.parse()
.ok()?;
let temp_c = hwmon
.as_ref()
.and_then(|h| fs::read_to_string(format!("{h}/temp1_input")).ok())
.and_then(|s| s.trim().parse::<i32>().ok())
.map(|mc| mc / 1000)
.unwrap_or(0);
Some(GpuInfo {
usage,
vram_used_gb: vram_used as f64 / 1_073_741_824.0,
vram_total_gb: vram_total as f64 / 1_073_741_824.0,
temp_c,
vendor: "amd",
})
}
fn read_nvidia() -> Option<GpuInfo> {
let out = std::process::Command::new("nvidia-smi")
.args([
"--query-gpu=utilization.gpu,memory.used,memory.total,temperature.gpu",
"--format=csv,noheader,nounits",
])
.output()
.ok()?;
if !out.status.success() {
return None;
}
let s = String::from_utf8_lossy(&out.stdout);
let p: Vec<&str> = s.trim().split(',').map(str::trim).collect();
if p.len() < 4 {
return None;
}
Some(GpuInfo {
usage: p[0].parse().ok()?,
vram_used_gb: p[1].parse::<f64>().ok()? / 1024.0,
vram_total_gb: p[2].parse::<f64>().ok()? / 1024.0,
temp_c: p[3].parse().ok()?,
vendor: "nvidia",
})
}
pub fn emit_gpu(out: &mut impl Write, backend: &GpuBackend) {
let info = match backend {
GpuBackend::Amd {
card_path,
hwmon_path,
} => read_amd(card_path, hwmon_path),
GpuBackend::Nvidia => read_nvidia(),
GpuBackend::None => return,
};
if let Some(g) = info {
let _ = writeln!(
out,
"{{\"type\":\"gpu\",\"usage\":{},\"vram_used_gb\":{:.3},\"vram_total_gb\":{:.3},\"temp_c\":{},\"vendor\":\"{}\"}}",
g.usage, g.vram_used_gb, g.vram_total_gb, g.temp_c, g.vendor
);
}
}