Compare commits

...

29 Commits

Author SHA1 Message Date
Gregory Ballantine
a6f6994c2b Added runtime req for iperf3
Some checks failed
ci/woodpecker/push/woodpecker Pipeline failed
2024-01-28 19:59:31 -05:00
a4e0f27ddb Version bump to v0.4.0
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
ci/woodpecker/tag/woodpecker Pipeline was successful
2022-09-05 13:06:02 -04:00
f9de9ef93c Renamed the 'ping' test to the 'latency' test to more accurately reflect the test; Added the '-i' flag to the jitter and latency tests to set the ping interval timing
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
2022-09-05 13:05:32 -04:00
3169c11cc5 Version bump to v0.3.3
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
ci/woodpecker/tag/woodpecker Pipeline was successful
2022-09-05 12:31:47 -04:00
d4d3e37e8d Completed the network bandwidth test function using iperf3; adjusted Cargo.toml a bit to allow patch updates for dependencies
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
2022-09-05 12:31:18 -04:00
7b7150886a Made the Steam game download function work for both Windows and Linux
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
2022-08-22 13:47:49 -04:00
4aa3eddc91 Added scaffolding for a few game benchmarks - they currently just make sure the game is installed via Steam, nothing else
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
2022-08-22 13:45:16 -04:00
8a7d8c1860 Version bump to v0.3.2
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
ci/woodpecker/tag/woodpecker Pipeline was successful
2022-08-17 18:49:12 -04:00
39ce86d2c3 Fixed the network ping and jitter tests to run on Windows
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
2022-08-17 18:48:07 -04:00
88be1ad2ba Added the scaffolding for the jitter test. Now just need to perform the proper calculations.
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
2022-08-17 18:39:53 -04:00
7555a2a3a7 Version bump to v0.3.1
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
ci/woodpecker/tag/woodpecker Pipeline was successful
2022-08-17 18:05:41 -04:00
9cd88d923d Changed the parameter for the network ping test
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
2022-08-17 18:03:48 -04:00
6fdc52b320 Updated the network bandwidth arguments to be more consistent with the disk command arguments
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
2022-08-17 18:01:14 -04:00
f9ca8beaad Fixed the tempfile flags for some of the disk commands
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
2022-08-17 17:58:04 -04:00
132a0ee501 Added a runtime parameter to the CPU stress test to automatically limit how long it runs
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
2022-08-17 15:28:11 -04:00
360ef2f959 Version bump to v0.3.0
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
ci/woodpecker/tag/woodpecker Pipeline was successful
2022-08-07 10:19:02 -04:00
0f887d0c76 Added a new CPU test suite with a CPU stress test tool.
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
2022-08-07 10:18:30 -04:00
983b1cbeef Renamed the 'tests' module to 'benchmarks' to better reflect what's in that module
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
2022-08-07 09:39:26 -04:00
d967fc0920 Added a function for truncating output
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
2022-08-07 09:37:33 -04:00
beffae7cd4 Version bump to v0.2.4
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
ci/woodpecker/tag/woodpecker Pipeline was successful
2022-08-06 17:31:27 -04:00
477ad7482a Added a global parameter to loop tests
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
2022-08-06 17:31:02 -04:00
833c71bed5 Version bump to v0.2.3
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
ci/woodpecker/tag/woodpecker Pipeline was successful
2022-08-06 01:54:37 -04:00
182d9b3cb4 Added package info for packaging
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
2022-08-06 01:54:17 -04:00
94218af170 Version bump to v0.2.0
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
ci/woodpecker/tag/woodpecker Pipeline was successful
2022-08-06 01:27:38 -04:00
9856c59da9 Replaced dd with fio in the disk write subcommand; added subcommand for disk random writes
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
2022-08-06 01:27:15 -04:00
e41a4dfffb Updated the disk read test to use fio instead of dd; added new test for 4k random reads (previous test was for sequential reads)
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
2022-08-06 01:10:13 -04:00
6f86266dee Updated README
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
2022-08-05 21:47:42 -04:00
e39b78d5d7 Version bump to v0.2.1
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
ci/woodpecker/tag/woodpecker Pipeline was successful
2022-08-05 21:37:29 -04:00
be549808b5 Added a disk benchmark test for sequential reads
All checks were successful
ci/woodpecker/push/woodpecker Pipeline was successful
2022-08-05 21:37:15 -04:00
14 changed files with 484 additions and 107 deletions

View File

@ -2,12 +2,15 @@ pipeline:
test_build:
image: rust:1.62
commands:
- cargo build
- "cargo build"
build_release:
image: rust:1.62
commands:
- cargo build --release
- "cargo install cargo-deb cargo-generate-rpm"
- "cargo build --release"
- "cargo deb"
- "cargo generate-rpm"
- "mv target/release/bgbench target/release/bgbench-${CI_COMMIT_TAG}-linux-x86_64"
when:
event: tag
@ -20,6 +23,8 @@ pipeline:
base_url: https://git.metaunix.net
files:
- "target/release/*${CI_COMMIT_TAG}-linux-x86_64"
- "target/debian/hardware-tests*.deb"
- "target/generate-rpm/hardware-tests*.rpm"
title: "${CI_COMMIT_TAG}"
when:
event: tag

View File

@ -1,14 +1,27 @@
[package]
name = "hardware-tests"
version = "0.2.0"
description = "Bit Goblin PC hardware test suite."
version = "0.4.0"
edition = "2021"
readme = "README.md"
license = "BSD 2-Clause"
authors = ["Gregory Ballantine <gballantine@bitgoblin.tech>"]
[[bin]]
name = "bgbench"
path = "src/main.rs"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
chrono = "0.4.20"
clap = { version = "3.2.16", features = ["derive"] }
chrono = "0.4"
clap = { version = "3.2", features = ["derive"] }
sysinfo = "0.25"
[package.metadata.deb]
depends = "fio"
[package.metadata.generate-rpm]
assets = [
{ source = "target/release/bgbench", dest = "/usr/bin/bgbench", mode = "755" },
]
[package.metadata.generate-rpm.requires]
fio = "*"

View File

@ -1,6 +1,6 @@
# Bit Goblin Hardware Tests
Scripts used for testing hardware in Bit Goblin's videos.
Benchmarking suite used for testing hardware in Bit Goblin's videos.
## Download & Installation
@ -8,9 +8,17 @@ Check out the [Releases page](https://git.metaunix.net/BitGoblin/hardware-tests/
Currently there is no installation method other than downloading the provided release binaries. In the future I want to build Linux package repositories for this, and have a Windows installer.
## Running
Simply run the tool with `./bgbench` and you'll be presented with the available subcommands.
### Runtime requirements:
* `disk` - requires `fio`.
* `network bandwidth` - requires `iperf3`
## Building
The easiest way to build the program is using the official Rust image from Docker Hub, for which there's a wrapper script at `bin/docker-build.sh` that can be used to build the test programs. These will be available under `target/debug/`.
The easiest way to build the program is using the official Rust image from Docker Hub, for which there's a wrapper script at `bin/docker-build.sh` that can be used to build the test program. These will be available under `target/debug/`. Else you can just run a `cargo build` to compile the program.
## License

109
src/benchmarks/disk.rs Normal file
View File

@ -0,0 +1,109 @@
use std::process::Command;
// test disk sequential read speeds w/ fio
pub fn disk_read_seq_test(tempfile: &str, size: &u8) {
// run the fio command
let output = Command::new("fio")
.arg("--name=TEST")
.arg(format!("--filename={}", tempfile))
.arg("--rw=read")
.arg("--size=2g")
.arg(format!("--io_size={}g", size))
.arg("--blocksize=1024k")
.arg("--ioengine=libaio")
.arg("--fsync=10000")
.arg("--iodepth=32")
.arg("--direct=1")
.arg("--numjobs=1")
.arg("--runtime=60")
.arg("--group_reporting")
.output()
.expect("Failed to execute command");
// check that the command succeeded
assert!(output.status.success());
// print the test's output
println!("{}", String::from_utf8_lossy(&output.stdout));
}
// test disk 4K random read speeds w/ fio
pub fn disk_read_rand_test(tempfile: &str, size: &u8) {
// run the fio command
let output = Command::new("fio")
.arg("--name=TEST")
.arg(format!("--filename={}", tempfile))
.arg("--rw=randread")
.arg("--size=2g")
.arg(format!("--io_size={}g", size))
.arg("--blocksize=4k")
.arg("--ioengine=libaio")
.arg("--fsync=1")
.arg("--iodepth=1")
.arg("--direct=1")
.arg("--numjobs=32")
.arg("--runtime=60")
.arg("--group_reporting")
.output()
.expect("Failed to execute command");
// check that the command succeeded
assert!(output.status.success());
// print the test's output
println!("{}", String::from_utf8_lossy(&output.stdout));
}
// test sequential disk write speeds w/ fio
pub fn disk_write_seq_test(tempfile: &str, size: &u8) {
// run the fio command
let output = Command::new("fio")
.arg("--name=TEST")
.arg(format!("--filename={}", tempfile))
.arg("--rw=write")
.arg("--size=2g")
.arg(format!("--io_size={}g", size))
.arg("--blocksize=1024k")
.arg("--ioengine=libaio")
.arg("--fsync=10000")
.arg("--iodepth=32")
.arg("--direct=1")
.arg("--numjobs=1")
.arg("--runtime=60")
.arg("--group_reporting")
.output()
.expect("Failed to execute command");
// check that the command succeeded
assert!(output.status.success());
// print the test's output
println!("{}", String::from_utf8_lossy(&output.stdout));
}
// test random 4K disk write speeds w/ fio
pub fn disk_write_rand_test(tempfile: &str, size: &u8) {
// run the fio command
let output = Command::new("fio")
.arg("--name=TEST")
.arg(format!("--filename={}", tempfile))
.arg("--rw=randrw")
.arg("--size=2g")
.arg(format!("--io_size={}g", size))
.arg("--blocksize=4k")
.arg("--ioengine=libaio")
.arg("--fsync=1")
.arg("--iodepth=1")
.arg("--direct=1")
.arg("--numjobs=32")
.arg("--runtime=60")
.arg("--group_reporting")
.output()
.expect("Failed to execute command");
// check that the command succeeded
assert!(output.status.success());
// print the test's output
println!("{}", String::from_utf8_lossy(&output.stdout));
}

36
src/benchmarks/games.rs Normal file
View File

@ -0,0 +1,36 @@
use std::process::Command;
pub fn run_civ6_ai_benchmark() {
// make sure CS:GO is installed via Steam
download_game_steam(289070);
}
// run the CS:GO benchmark (using benchmark file from PTS)
pub fn run_csgo_benchmark() {
// make sure CS:GO is installed via Steam
download_game_steam(730);
}
pub fn run_demd_benchmark() {
// make sure CS:GO is installed via Steam
download_game_steam(337000);
}
fn download_game_steam(game_id: u32) {
let mut steam_path = "steam";
if cfg!(windows) {
steam_path = "C:\\Program Files (x86)\\Steam\\steam.exe";
}
// first we need to make sure CS:GO is installed via Steam
let install_output = Command::new(steam_path)
.arg(format!("steam://install/{}", game_id))
.output()
.expect("Failed to execute command");
// check that the command succeeded
assert!(install_output.status.success());
// print the test's output
println!("{}", String::from_utf8_lossy(&install_output.stdout));
}

View File

@ -1,2 +1,3 @@
pub mod disk;
pub mod games;
pub mod network;

90
src/benchmarks/network.rs Normal file
View File

@ -0,0 +1,90 @@
use std::process;
use crate::text;
// ping a host
pub fn latency_test(address: &str, count: &u16, interval: &u16) {
println!("Pinging host {}, {} times.", address, count);
// if we're on Windows we need to use the -n flag for ping counts
let mut count_arg = "-c";
if cfg!(windows) {
count_arg = "-n";
}
// convert the ping interval to seconds
let interval_secs = *interval as f64 / 1000 as f64;
// run the ping command
let output = process::Command::new("ping")
.arg(address)
.arg(count_arg)
.arg(format!("{}", count))
.arg("-i")
.arg(format!("{}", interval_secs))
.output()
.expect("Failed to execute command");
// check that the command succeeded
assert!(output.status.success());
// grab the ping results from stdout
let results_raw = &String::from_utf8_lossy(&output.stdout);
let results = text::format::trim_output(results_raw, 4);
for line in results {
println!("{}", line);
}
}
// network jitter test
pub fn jitter_test(address: &str, count: &u16, interval: &u16) {
println!("Pinging host {}, {} times to determine network jitter.", address, count);
// if we're on Windows we need to use the -n flag for ping counts
let mut count_arg = "-c";
if cfg!(windows) {
count_arg = "-n";
}
// convert the ping interval to seconds
let interval_secs = *interval as f64 / 1000 as f64;
// run the ping command
let output = process::Command::new("ping")
.arg(address)
.arg(count_arg)
.arg(format!("{}", count))
.arg("-i")
.arg(format!("{}", interval_secs))
.output()
.expect("Failed to execute command");
// check that the command succeeded
assert!(output.status.success());
// grab the ping results from stdout
let results_raw = &String::from_utf8_lossy(&output.stdout);
let results = text::format::trim_output(results_raw, 4);
for line in results {
println!("{}", line);
}
}
// timed file copy test to guage bandwidth speeds
pub fn bandwidth_test(host: &str) {
println!("Testing network bandwidth using iperf; connecting to {}.", host);
println!("{}", host);
let output = process::Command::new("iperf3")
.arg("-c")
.arg(host)
.output()
.expect("Failed to execute command");
// check that the command succeeded
assert!(output.status.success());
// grab and print the command's results
let results_raw = &String::from_utf8_lossy(&output.stdout);
println!("{}", results_raw);
}

View File

@ -1,6 +1,8 @@
mod tests;
mod benchmarks;
mod stress;
mod text;
use clap::{Parser, Subcommand};
use clap::{Args, Parser, Subcommand};
#[derive(Parser)]
#[clap(name = "Bit Goblin Benchmark", author, version, about = "Bit Goblin's hardware benchmarking tool.", long_about = None)]
@ -8,33 +10,109 @@ use clap::{Parser, Subcommand};
struct Cli {
#[clap(subcommand)]
command: Commands,
#[clap(short = 'l', long, default_value_t = 1, help = "Number of times to run test. Default = 1", global = true)]
loopcount: u8,
}
#[derive(Subcommand)]
enum Commands {
// disk tests subcommand
// CPU benchmarks subcommand
#[clap(name = "cpu", about = "CPU benchmarks and stress tests.")]
Cpu(Cpu),
// disk benchmarks subcommand
#[clap(name = "disk", about = "Hard drive and SSD benchmarks.")]
Disk(Disk),
// network tests subcommand
// games benchmarks subcommand
#[clap(name = "games", about = "Benchmark your system with games.")]
Games(Games),
// network benchmarks subcommand
#[clap(name = "network", about = "Test various aspects of your network.")]
Net(Net),
}
#[derive(Parser)]
#[derive(Args)]
struct Cpu {
#[clap(subcommand)]
cpu_commands: CpuCommands,
}
#[derive(Subcommand)]
enum CpuCommands {
// CPU stress test subcommand
#[clap(name = "stress", about = "Stress test the CPU with math!")]
StressTest {
#[clap(short = 'r', long, default_value_t = 5, help = "Length of time (in minutes) to run the stress test. Defaults to 5")]
runtime: u16,
#[clap(short = 't', long, default_value_t = 0, help = "Number of threads to use; defaults to CPU's max thread count. Defaults to 0 (automatic)")]
threads: usize,
},
}
#[derive(Args)]
struct Disk {
#[structopt(subcommand)]
#[clap(subcommand)]
disk_commands: DiskCommands,
}
#[derive(Subcommand)]
enum DiskCommands {
#[clap(name = "write-test", about = "Write a large file to determine sequential disk speeds.")]
WriteTest {
#[clap(short = 't', default_value_t = String::from("/tmp/disk-test.tmp"))]
// sequential disk read subcommand
#[clap(name = "read_seq", about = "Sequential disk read speed test.")]
ReadSeqTest {
#[clap(short = 't', long, default_value_t = String::from("/tmp/disk-test.tmp"))]
tempfile: String,
#[clap(short = 's', default_value_t = 5)]
#[clap(short = 's', long, default_value_t = 15)]
size: u8,
},
// random disk read subcommand
#[clap(name = "read_rand", about = "Random 4K disk read speed test.")]
ReadRandTest {
#[clap(short = 't', long, default_value_t = String::from("/tmp/disk-test.tmp"))]
tempfile: String,
#[clap(short = 's', long, default_value_t = 15)]
size: u8,
},
// sequential disk write subcommand
#[clap(name = "write_seq", about = "Write a large file to determine sequential disk write speeds.")]
WriteSeqTest {
#[clap(short = 't', long, default_value_t = String::from("/tmp/disk-test.tmp"))]
tempfile: String,
#[clap(short = 's', long, default_value_t = 15)]
size: u8,
},
// random 4K disk write subcommand
#[clap(name = "write_rand", about = "Write a bunch of smallfiles to determine random disk write speeds.")]
WriteRandTest {
#[clap(short = 't', long, default_value_t = String::from("/tmp/disk-test.tmp"))]
tempfile: String,
#[clap(short = 's', long, default_value_t = 15)]
size: u8,
},
}
#[derive(Parser)]
struct Games {
#[structopt(subcommand)]
games_commands: GamesCommands,
}
#[derive(Subcommand)]
enum GamesCommands {
// Civilization 6 AI benchmark subcommand
#[clap(name = "civ6_ai", about = "Run the Civilization 6 AI benchmark via Steam.")]
Civ6AI {},
// CS:GO benchmark subcommand
#[clap(name = "csgo", about = "Run the CS:GO game benchmark via Steam.")]
CSGO {},
// Deus Ex: Mankind Divided benchmark subcommand
#[clap(name = "demd", about = "Run the Deus Ex: Mankind Divided game benchmark via Steam.")]
DEMD {},
}
#[derive(Parser)]
@ -45,22 +123,33 @@ struct Net {
#[derive(Subcommand)]
enum NetCommands {
// ping subcommand
#[clap(name = "ping", about = "Ping a host to determine network latency.")]
Ping {
#[clap(short = 't', default_value_t = String::from("8.8.8.8"))]
// bandwidth test subcommand
#[clap(name = "bandwidth", about = "Uses iperf to test network bandwidth.")]
Bandwidth {
#[clap(short = 'a', long, required = true)]
host: String,
#[clap(short = 'c', default_value_t = 30)]
count: u16,
},
// bandwidth test subcommand
#[clap(name = "bandwidth", about = "Downloads a remote file to determine network bandwidth.")]
Bandwidth {
#[clap(short = 'd', default_value_t = String::from("https://www.bitgoblin.tech/hardware-tests/export-01.mp4"))]
download: String,
#[clap(short = 'o', default_value_t = String::from("./tempfile"))]
output: String,
// jitter subcommand
#[clap(name = "jitter", about = "Ping a host to determine network jitter.")]
Jitter {
#[clap(short = 'a', long, default_value_t = String::from("8.8.8.8"))]
address: String,
#[clap(short = 'c', long, default_value_t = 100)]
count: u16,
#[clap(short = 'i', long, default_value_t = 1000)]
interval: u16,
},
// latency subcommand
#[clap(name = "latency", about = "Ping a host to determine network latency.")]
Latency {
#[clap(short = 'a', long, default_value_t = String::from("8.8.8.8"))]
address: String,
#[clap(short = 'c', long, default_value_t = 100)]
count: u16,
#[clap(short = 'i', long, default_value_t = 1000)]
interval: u16,
},
}
@ -69,13 +158,62 @@ fn main() {
// map subcommands back to the main command
match &cli.command {
Commands::Cpu(args) => match &args.cpu_commands {
CpuCommands::StressTest { runtime, threads } => stress::cpu::cpu_stress_math(*runtime, *threads),
},
Commands::Disk(args) => match &args.disk_commands {
DiskCommands::WriteTest { tempfile, size } => tests::disk::disk_write_test(tempfile, size),
}
DiskCommands::ReadSeqTest { tempfile, size } => {
for i in 0..cli.loopcount {
println!("Test run number {}.", i + 1);
benchmarks::disk::disk_read_seq_test(tempfile, size);
}
},
DiskCommands::ReadRandTest { tempfile, size } => {
for i in 0..cli.loopcount {
println!("Test run number {}.", i + 1);
benchmarks::disk::disk_read_rand_test(tempfile, size);
}
},
DiskCommands::WriteSeqTest { tempfile, size } => {
for i in 0..cli.loopcount {
println!("Test run number {}.", i + 1);
benchmarks::disk::disk_write_seq_test(tempfile, size);
}
},
DiskCommands::WriteRandTest { tempfile, size } => {
for i in 0..cli.loopcount {
println!("Test run number {}.", i + 1);
benchmarks::disk::disk_write_rand_test(tempfile, size);
}
},
},
Commands::Games(args) => match &args.games_commands {
GamesCommands::Civ6AI {} => benchmarks::games::run_civ6_ai_benchmark(),
GamesCommands::CSGO {} => benchmarks::games::run_csgo_benchmark(),
GamesCommands::DEMD {} => benchmarks::games::run_demd_benchmark(),
},
Commands::Net(args) => match &args.net_commands {
NetCommands::Ping { host, count } => tests::network::ping_host(host, count),
NetCommands::Bandwidth { download, output } => tests::network::bandwidth_test(download, output),
NetCommands::Bandwidth { host } => {
for i in 0..cli.loopcount {
println!("Test run number {}.", i + 1);
benchmarks::network::bandwidth_test(host);
}
},
NetCommands::Jitter { address, count, interval } => {
for i in 0..cli.loopcount {
println!("Test run number {}.", i + 1);
benchmarks::network::jitter_test(address, count, interval);
}
},
NetCommands::Latency { address, count, interval } => {
for i in 0..cli.loopcount {
println!("Test run number {}.", i + 1);
benchmarks::network::latency_test(address, count, interval);
}
},
},
}
}

38
src/stress/cpu.rs Normal file
View File

@ -0,0 +1,38 @@
use std::{thread, time};
use std::process::exit;
use sysinfo::{System,SystemExt};
pub fn cpu_stress_math(runtime: u16, threads: usize) {
// fetch system information
let mut sys = System::new_all();
sys.refresh_all();
let num_cpus = sys.cpus().len();
let mut num_threads = threads;
if num_threads == 0 {
println!("Number of threads not specified, defaulting to CPU's thread count of {}.", num_cpus);
num_threads = num_cpus;
} else {
println!("Using specified thread count of {}", num_threads);
}
for i in 0..num_threads {
println!("Spawning thread number {}", i + 1);
thread::spawn (|| {
worker();
});
}
println!("Sleeping main thread for the allotted runtime of {} minute(s).", runtime);
let duration = time::Duration::from_secs((runtime * 60).into());
thread::sleep(duration);
exit(0);
}
fn worker() {
let mut _x = 0;
loop {
_x += 1;
_x -= 1;
}
}

1
src/stress/mod.rs Normal file
View File

@ -0,0 +1 @@
pub mod cpu;

View File

@ -1,29 +0,0 @@
use std::fs;
use std::process::Command;
// test disk write speeds by continually writing zeroes to it
pub fn disk_write_test(tempfile: &str, size: &u8) {
// convert size in Gigabytes down to Megabytes
let size_gigs: u32 = (*size as u32 * 1024).into();
// run the dd command with a block size of 1 MB
let output = Command::new("dd")
.arg("bs=1M")
.arg(format!("count={}", size_gigs))
.arg("if=/dev/zero")
.arg(format!("of={}", tempfile))
.output()
.expect("Failed to execute command");
// check that the command succeeded
assert!(output.status.success());
// for whatever reason, `dd` output ends up in stderr
println!("{}", String::from_utf8_lossy(&output.stderr));
// remove the test file
match fs::remove_file(tempfile) {
Ok(()) => println!("Cleaning up..."),
Err(e) => println!("There was a problem during cleanup - {}", e),
}
}

View File

@ -1,42 +0,0 @@
use chrono::prelude::*;
use std::{fs,process};
// ping a host
pub fn ping_host(host: &str, count: &u16) {
println!("Pinging host {}, {} times.", host, count);
// run the ping command
let output = process::Command::new("ping")
.arg(host)
.arg(format!("-c {}", count))
.output()
.expect("Failed to execute command");
// check that the command succeeded
assert!(output.status.success());
// print out the ping results from stdout
println!("{}", String::from_utf8_lossy(&output.stdout));
}
// timed file copy test to guage bandwidth speeds
pub fn bandwidth_test(download: &str, output: &str) {
println!("Testing network bandwidth by downloading {}.", download);
// get start time so we can track how long it takes to complete
let start_time = Utc::now();
// do the download
// get finish time
let finish_time = Utc::now();
// compute time to complete
let comp_time = finish_time - start_time;
println!("{}", comp_time.num_milliseconds());
// clean up the test file
match fs::remove_file(output) {
Ok(()) => println!("Cleaning up..."),
Err(e) => println!("There was a problem during cleanup - {}", e),
}
}

8
src/text/format.rs Normal file
View File

@ -0,0 +1,8 @@
use std::vec::Vec;
pub fn trim_output(text: &str, linecount: u16) -> Vec<&str> {
let text_vec: Vec<&str> = text.split("\n").collect();
let text_start = text_vec.len() - (linecount as usize);
let text_trim = text_vec.as_slice()[text_start..].to_vec();
return text_trim;
}

1
src/text/mod.rs Normal file
View File

@ -0,0 +1 @@
pub mod format;