Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add gpu ram collector for nvidia feature flag #794

Merged
merged 9 commits into from
Oct 15, 2022
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 29 additions & 1 deletion src/app/data_farmer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,8 @@ pub struct TimedData {
pub swap_data: Option<Value>,
#[cfg(feature = "zfs")]
pub arc_data: Option<Value>,
#[cfg(feature = "gpu")]
pub gpu_data: Vec<Option<Value>>,
}

#[derive(Clone, Debug, Default)]
Expand Down Expand Up @@ -133,6 +135,8 @@ pub struct DataCollection {
pub battery_harvest: Vec<batteries::BatteryHarvest>,
#[cfg(feature = "zfs")]
pub arc_harvest: memory::MemHarvest,
#[cfg(feature = "gpu")]
pub gpu_harvest: Vec<(String, memory::MemHarvest)>,
}

impl Default for DataCollection {
Expand All @@ -155,6 +159,8 @@ impl Default for DataCollection {
battery_harvest: Vec::default(),
#[cfg(feature = "zfs")]
arc_harvest: memory::MemHarvest::default(),
#[cfg(feature = "gpu")]
gpu_harvest: Vec::default(),
}
}
}
Expand All @@ -179,6 +185,10 @@ impl DataCollection {
{
self.arc_harvest = memory::MemHarvest::default();
}
#[cfg(feature = "gpu")]
{
self.gpu_harvest = Vec::default();
}
}

pub fn clean_data(&mut self, max_time_millis: u64) {
Expand Down Expand Up @@ -222,6 +232,14 @@ impl DataCollection {
self.eat_arc(arc, &mut new_entry);
}
}

#[cfg(feature = "gpu")]
{
if let Some(gpu) = harvested_data.gpu {
self.eat_gpu(gpu, &mut new_entry);
}
}

// CPU
if let Some(cpu) = harvested_data.cpu {
self.eat_cpu(cpu, &mut new_entry);
Expand Down Expand Up @@ -405,8 +423,18 @@ impl DataCollection {

#[cfg(feature = "zfs")]
fn eat_arc(&mut self, arc: memory::MemHarvest, new_entry: &mut TimedData) {
// Arc
new_entry.arc_data = arc.use_percent;
self.arc_harvest = arc;
}

#[cfg(feature = "gpu")]
fn eat_gpu(&mut self, gpu: Vec<(String, memory::MemHarvest)>, new_entry: &mut TimedData) {
// Note this only pre-calculates the data points - the names will be
// within the local copy of gpu_harvest. Since it's all sequential
// it probably doesn't matter anyways.
gpu.iter().for_each(|data| {
new_entry.gpu_data.push(data.1.use_percent);
});
self.gpu_harvest = gpu.to_vec();
}
}
16 changes: 16 additions & 0 deletions src/app/data_harvester.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,9 @@ use futures::join;

use super::DataFilters;

#[cfg(feature = "nvidia")]
pub mod nvidia;

#[cfg(feature = "battery")]
pub mod batteries;
pub mod cpu;
Expand All @@ -42,6 +45,8 @@ pub struct Data {
pub list_of_batteries: Option<Vec<batteries::BatteryHarvest>>,
#[cfg(feature = "zfs")]
pub arc: Option<memory::MemHarvest>,
#[cfg(feature = "gpu")]
pub gpu: Option<Vec<(String, memory::MemHarvest)>>,
}

impl Default for Data {
Expand All @@ -61,6 +66,8 @@ impl Default for Data {
list_of_batteries: None,
#[cfg(feature = "zfs")]
arc: None,
#[cfg(feature = "gpu")]
gpu: None,
}
}
}
Expand All @@ -83,6 +90,10 @@ impl Data {
{
self.arc = None;
}
#[cfg(feature = "gpu")]
{
self.gpu = None;
}
}
}

Expand Down Expand Up @@ -436,6 +447,11 @@ impl DataCollector {
self.data.arc = arc;
}

#[cfg(feature = "gpu")]
if let Ok(gpu) = mem_res.3 {
self.data.gpu = gpu;
}

if let Ok(disks) = disk_res {
self.data.disks = disks;
}
Expand Down
61 changes: 57 additions & 4 deletions src/app/data_harvester/memory/general/heim.rs
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,19 @@ pub async fn get_mem_data(
crate::utils::error::Result<Option<MemHarvest>>,
crate::utils::error::Result<Option<MemHarvest>>,
crate::utils::error::Result<Option<MemHarvest>>,
crate::utils::error::Result<Option<Vec<(String, MemHarvest)>>>,
) {
use futures::join;

if !actually_get {
(Ok(None), Ok(None), Ok(None))
(Ok(None), Ok(None), Ok(None), Ok(None))
} else {
join!(get_ram_data(), get_swap_data(), get_arc_data())
join!(
get_ram_data(),
get_swap_data(),
get_arc_data(),
get_gpu_data()
)
jamartin9 marked this conversation as resolved.
Show resolved Hide resolved
}
}

Expand Down Expand Up @@ -180,6 +186,8 @@ pub async fn get_arc_data() -> crate::utils::error::Result<Option<MemHarvest>> {
{
let mut mem_arc = 0;
let mut mem_total = 0;
let mut zfs_keys_read: u8 = 0;
const ZFS_KEYS_NEEDED: u8 = 2;
use smol::fs::read_to_string;
let arcinfo = read_to_string("/proc/spl/kstat/zfs/arcstats").await?;
for line in arcinfo.lines() {
Expand All @@ -191,8 +199,7 @@ pub async fn get_arc_data() -> crate::utils::error::Result<Option<MemHarvest>> {
continue;
}
};
let mut zfs_keys_read: u8 = 0;
const ZFS_KEYS_NEEDED: u8 = 2;

if let Some((_type, number)) = value.trim_start().rsplit_once(' ') {
// Parse the value, remember it's in bytes!
if let Ok(number) = number.parse::<u64>() {
Expand Down Expand Up @@ -247,3 +254,49 @@ pub async fn get_arc_data() -> crate::utils::error::Result<Option<MemHarvest>> {
},
}))
}

pub async fn get_gpu_data() -> crate::utils::error::Result<Option<Vec<(String, MemHarvest)>>> {
#[cfg(not(feature = "nvidia"))]
{
Ok(None)
}

#[cfg(feature = "nvidia")]
{
use crate::data_harvester::nvidia::NVML_DATA;
if let Ok(nvml) = &*NVML_DATA {
if let Ok(ngpu) = nvml.device_count() {
let mut results = Vec::with_capacity(ngpu as usize);
for i in 0..ngpu {
if let Ok(device) = nvml.device_by_index(i) {
if let (Ok(name), Ok(mem)) = (device.name(), device.memory_info()) {
// add device memory in bytes
let mem_total_in_kib = mem.total / 1024;
let mem_used_in_kib = mem.used / 1024;
results.push((
name,
MemHarvest {
mem_total_in_kib,
mem_used_in_kib,
use_percent: if mem_total_in_kib == 0 {
None
} else {
Some(
mem_used_in_kib as f64 / mem_total_in_kib as f64
* 100.0,
)
},
},
));
}
}
}
Ok(Some(results))
} else {
Ok(None)
}
} else {
Ok(None)
}
}
}
56 changes: 54 additions & 2 deletions src/app/data_harvester/memory/general/sysinfo.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,19 @@ pub async fn get_mem_data(
crate::utils::error::Result<Option<MemHarvest>>,
crate::utils::error::Result<Option<MemHarvest>>,
crate::utils::error::Result<Option<MemHarvest>>,
crate::utils::error::Result<Option<Vec<(String, MemHarvest)>>>,
) {
use futures::join;

if !actually_get {
(Ok(None), Ok(None), Ok(None))
(Ok(None), Ok(None), Ok(None), Ok(None))
} else {
join!(get_ram_data(sys), get_swap_data(sys), get_arc_data())
join!(
get_ram_data(sys),
get_swap_data(sys),
get_arc_data(),
get_gpu_data()
)
}
}

Expand Down Expand Up @@ -82,3 +88,49 @@ pub async fn get_arc_data() -> crate::utils::error::Result<Option<MemHarvest>> {
},
}))
}

pub async fn get_gpu_data() -> crate::utils::error::Result<Option<Vec<(String, MemHarvest)>>> {
#[cfg(not(feature = "nvidia"))]
{
Ok(None)
}

#[cfg(feature = "nvidia")]
{
use crate::data_harvester::nvidia::NVML_DATA;
if let Ok(nvml) = &*NVML_DATA {
if let Ok(ngpu) = nvml.device_count() {
let mut results = Vec::with_capacity(ngpu as usize);
for i in 0..ngpu {
if let Ok(device) = nvml.device_by_index(i) {
if let (Ok(name), Ok(mem)) = (device.name(), device.memory_info()) {
// add device memory in bytes
let mem_total_in_kib = mem.total / 1024;
let mem_used_in_kib = mem.used / 1024;
results.push((
name,
MemHarvest {
mem_total_in_kib,
mem_used_in_kib,
use_percent: if mem_total_in_kib == 0 {
None
} else {
Some(
mem_used_in_kib as f64 / mem_total_in_kib as f64
* 100.0,
)
},
},
));
}
}
}
Ok(Some(results))
} else {
Ok(None)
}
} else {
Ok(None)
}
}
}
3 changes: 3 additions & 0 deletions src/app/data_harvester/nvidia.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
use nvml_wrapper::{error::NvmlError, NVML};
use once_cell::sync::Lazy;
pub static NVML_DATA: Lazy<Result<NVML, NvmlError>> = Lazy::new(NVML::init);
6 changes: 4 additions & 2 deletions src/app/data_harvester/temperature/nvidia.rs
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,14 @@ use super::{
TemperatureType,
};

use nvml_wrapper::{enum_wrappers::device::TemperatureSensor, NVML};
use nvml_wrapper::enum_wrappers::device::TemperatureSensor;

use crate::data_harvester::nvidia::NVML_DATA;

pub fn add_nvidia_data(
temperature_vec: &mut Vec<TempHarvest>, temp_type: &TemperatureType, filter: &Option<Filter>,
) -> crate::utils::error::Result<()> {
if let Ok(nvml) = NVML::init() {
if let Ok(nvml) = &*NVML_DATA {
if let Ok(ngpu) = nvml.device_count() {
for i in 0..ngpu {
if let Ok(device) = nvml.device_by_index(i) {
Expand Down
5 changes: 5 additions & 0 deletions src/bin/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,11 @@ fn main() -> Result<()> {
app.converted_data.arc_data =
convert_arc_data_points(&app.data_collection);
}
#[cfg(feature = "gpu")]
{
app.converted_data.gpu_data =
convert_gpu_data(&app.data_collection);
}
let (memory_labels, swap_labels) =
convert_mem_labels(&app.data_collection);

Expand Down
24 changes: 17 additions & 7 deletions src/canvas.rs
Original file line number Diff line number Diff line change
Expand Up @@ -451,19 +451,29 @@ impl Painter {
}
};

let mut mem_rows = 0;
let mut mem_rows = 1;

if app_state.converted_data.swap_labels.is_some() {
mem_rows += 1; // add row for swap
}

#[cfg(feature = "zfs")]
{
let arc_data = &app_state.converted_data.arc_data;
if let Some(arc) = arc_data.last() {
if arc.1 != 0.0 {
mem_rows += 1; // add row for arc
}
if app_state.converted_data.arc_labels.is_some() {
mem_rows += 1; // add row for arc
}
}

#[cfg(feature = "gpu")]
{
if let Some(gpu_data) = &app_state.converted_data.gpu_data {
mem_rows += gpu_data.len() as u16; // add row(s) for gpu
}
}

mem_rows += 2; // add rows for SWAP and MEM
if mem_rows == 1 {
mem_rows += 1; // need at least 2 rows for RX and TX
}

let vertical_chunks = Layout::default()
.direction(Direction::Vertical)
Expand Down
Loading