Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

util: support float cgroup cpu quota (#8425) #8427

Merged
merged 5 commits into from
Sep 8, 2020
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 4 additions & 6 deletions components/tikv_util/src/sys/cgroup.rs
Original file line number Diff line number Diff line change
Expand Up @@ -254,20 +254,18 @@ impl CGroupSys {
Self { cgroups }
}

pub fn cpu_cores_quota(&self) -> i64 {
pub fn cpu_cores_quota(&self) -> Option<f64> {
if let Some(sub_cpu) = self.cgroups.get(CPU_SUBSYS) {
if let Ok(quota) = sub_cpu.read_num(CPU_QUOTA) {
if quota < 0 {
return -1;
return None;
}
if let Ok(period) = sub_cpu.read_num(CPU_PERIOD) {
return quota / period;
return Some(quota as f64 / period as f64);
}
}
}

// -1 means no limit.
-1
None
}

pub fn memory_limit_in_bytes(&self) -> i64 {
Expand Down
13 changes: 13 additions & 0 deletions components/tikv_util/src/sys/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,13 +21,21 @@ pub mod sys_quota {
}
}

<<<<<<< HEAD
pub fn cpu_cores_quota(&self) -> usize {
let cpu_num = sysinfo::get_logical_cores();
let cgroup_quota = self.cgroup.cpu_cores_quota();
if cgroup_quota < 0 {
cpu_num
} else {
std::cmp::min(cpu_num, cgroup_quota as usize)
=======
pub fn cpu_cores_quota(&self) -> f64 {
let cpu_num = num_cpus::get() as f64;
match self.cgroup.cpu_cores_quota() {
Some(cgroup_quota) if cgroup_quota > 0.0 && cgroup_quota < cpu_num => cgroup_quota,
_ => cpu_num,
>>>>>>> cd7dc74... util: support float cgroup cpu quota (#8425)
}
}

Expand Down Expand Up @@ -65,8 +73,13 @@ pub mod sys_quota {
Self {}
}

<<<<<<< HEAD
pub fn cpu_cores_quota(&self) -> usize {
sysinfo::get_logical_cores()
=======
pub fn cpu_cores_quota(&self) -> f64 {
num_cpus::get() as f64
>>>>>>> cd7dc74... util: support float cgroup cpu quota (#8425)
}

pub fn memory_limit_in_bytes(&self) -> u64 {
Expand Down
10 changes: 5 additions & 5 deletions src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ fn get_background_job_limit(
// By default, rocksdb assign (max_background_jobs / 4) threads dedicated for flush, and
// the rest shared by flush and compaction.
let max_background_jobs: i32 =
cmp::max(2, cmp::min(default_background_jobs, (cpu_num - 1) as i32));
cmp::max(2, cmp::min(default_background_jobs, (cpu_num - 1.0) as i32));
// Cap max_sub_compactions to allow at least two compactions.
let max_compactions = max_background_jobs - max_background_jobs / 4;
let max_sub_compactions: u32 = cmp::max(
Expand Down Expand Up @@ -1410,7 +1410,7 @@ const UNIFIED_READPOOL_MIN_CONCURRENCY: usize = 4;
impl Default for UnifiedReadPoolConfig {
fn default() -> UnifiedReadPoolConfig {
let cpu_num = SysQuota::new().cpu_cores_quota();
let mut concurrency = (cpu_num as f64 * 0.8) as usize;
let mut concurrency = (cpu_num * 0.8) as usize;
concurrency = cmp::max(UNIFIED_READPOOL_MIN_CONCURRENCY, concurrency);
Self {
min_thread_count: 1,
Expand Down Expand Up @@ -1646,7 +1646,7 @@ readpool_config!(StorageReadPoolConfig, storage_read_pool_test, "storage");
impl Default for StorageReadPoolConfig {
fn default() -> Self {
let cpu_num = SysQuota::new().cpu_cores_quota();
let mut concurrency = (cpu_num as f64 * 0.5) as usize;
let mut concurrency = (cpu_num * 0.5) as usize;
concurrency = cmp::max(DEFAULT_STORAGE_READPOOL_MIN_CONCURRENCY, concurrency);
concurrency = cmp::min(DEFAULT_STORAGE_READPOOL_MAX_CONCURRENCY, concurrency);
Self {
Expand Down Expand Up @@ -1688,7 +1688,7 @@ readpool_config!(
impl Default for CoprReadPoolConfig {
fn default() -> Self {
let cpu_num = SysQuota::new().cpu_cores_quota();
let mut concurrency = (cpu_num as f64 * 0.8) as usize;
let mut concurrency = (cpu_num * 0.8) as usize;
concurrency = cmp::max(DEFAULT_COPROCESSOR_READPOOL_MIN_CONCURRENCY, concurrency);
Self {
use_unified_pool: None,
Expand Down Expand Up @@ -1918,7 +1918,7 @@ impl Default for BackupConfig {
let cpu_num = SysQuota::new().cpu_cores_quota();
Self {
// use at most 75% of vCPU by default
num_threads: (cpu_num - cpu_num / 4).clamp(1, 32),
num_threads: (cpu_num * 0.75).clamp(1.0, 32.0) as usize,
}
}
}
Expand Down
7 changes: 5 additions & 2 deletions src/server/config.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
// Copyright 2016 TiKV Project Authors. Licensed under Apache-2.0.

use std::{i32, isize};
use std::{cmp, i32, isize};

use super::Result;
use grpcio::CompressionAlgorithms;
Expand Down Expand Up @@ -32,6 +32,9 @@ const DEFAULT_ENDPOINT_REQUEST_MAX_HANDLE_SECS: u64 = 60;
// Number of rows in each chunk for streaming coprocessor.
const DEFAULT_ENDPOINT_STREAM_BATCH_ROW_LIMIT: usize = 128;

// At least 4 long coprocessor requests are allowed to run concurrently.
const MIN_ENDPOINT_MAX_CONCURRENCY: usize = 4;

const DEFAULT_SNAP_MAX_BYTES_PER_SEC: u64 = 100 * 1024 * 1024;

const DEFAULT_MAX_GRPC_SEND_MSG_LEN: i32 = 10 * 1024 * 1024;
Expand Down Expand Up @@ -156,7 +159,7 @@ impl Default for Config {
end_point_request_max_handle_duration: ReadableDuration::secs(
DEFAULT_ENDPOINT_REQUEST_MAX_HANDLE_SECS,
),
end_point_max_concurrency: cpu_num,
end_point_max_concurrency: cmp::max(cpu_num as usize, MIN_ENDPOINT_MAX_CONCURRENCY),
snap_max_write_bytes_per_sec: ReadableSize(DEFAULT_SNAP_MAX_BYTES_PER_SEC),
snap_max_total_size: ReadableSize(0),
stats_concurrency: 1,
Expand Down
2 changes: 1 addition & 1 deletion src/storage/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ impl Default for Config {
gc_ratio_threshold: DEFAULT_GC_RATIO_THRESHOLD,
max_key_size: DEFAULT_MAX_KEY_SIZE,
scheduler_concurrency: DEFAULT_SCHED_CONCURRENCY,
scheduler_worker_pool_size: if cpu_num >= 16 { 8 } else { 4 },
scheduler_worker_pool_size: if cpu_num >= 16.0 { 8 } else { 4 },
scheduler_pending_write_threshold: ReadableSize::mb(DEFAULT_SCHED_PENDING_WRITE_MB),
reserve_space: ReadableSize::gb(DEFAULT_RESERVER_SPACE_SIZE),
block_cache: BlockCacheConfig::default(),
Expand Down