vcpu: boost vcpu threads

Add an option to allow boosting vcpu threads with high uclamp so that
it'll move to bigger CPUs and higher frequencies.

BUG=b:342349882
TEST=tools/presubmit
Change-Id: Ib1cb15b0f862acf8ec95a00d12e7de23e0ddcb4f
Signed-off-by: David Dai <davidai@google.com>
Reviewed-on: https://chromium-review.googlesource.com/c/crosvm/crosvm/+/5627833
Commit-Queue: Daniel Verkamp <dverkamp@chromium.org>
Reviewed-by: Daniel Verkamp <dverkamp@chromium.org>
This commit is contained in:
David Dai 2024-06-07 11:13:04 -07:00 committed by crosvm LUCI
parent 0d17dd7877
commit abf2992446
4 changed files with 37 additions and 0 deletions

View file

@ -1043,6 +1043,14 @@ pub struct RunCommand {
/// pci-address=ADDR - Preferred PCI address, e.g. "00:01.0".
block: Vec<DiskOptionWithId>,
#[cfg(any(target_os = "android", target_os = "linux"))]
#[argh(switch)]
#[serde(skip)]
#[merge(strategy = overwrite_option)]
/// set a minimum utilization for vCPU threads which will hint to the host scheduler
/// to ramp up higher frequencies or place vCPU threads on larger cores.
pub boost_uclamp: Option<bool>,
#[cfg(target_arch = "x86_64")]
#[argh(switch)]
#[merge(strategy = overwrite_option)]
@ -2725,6 +2733,7 @@ impl TryFrom<RunCommand> for super::config::Config {
#[cfg(any(target_os = "android", target_os = "linux"))]
{
cfg.lock_guest_memory = cmd.lock_guest_memory.unwrap_or_default();
cfg.boost_uclamp = cmd.boost_uclamp.unwrap_or_default();
}
#[cfg(feature = "audio")]

View file

@ -697,6 +697,8 @@ pub struct Config {
pub block_control_tube: Vec<Tube>,
#[cfg(windows)]
pub block_vhost_user_tube: Vec<Tube>,
#[cfg(any(target_os = "android", target_os = "linux"))]
pub boost_uclamp: bool,
pub boot_cpu: usize,
#[cfg(target_arch = "x86_64")]
pub break_linux_pci_config_io: bool,
@ -986,6 +988,8 @@ impl Default for Config {
log_file: None,
#[cfg(windows)]
logs_directory: None,
#[cfg(any(target_os = "android", target_os = "linux"))]
boost_uclamp: false,
memory: None,
memory_file: None,
mmio_address_ranges: Vec::new(),

View file

@ -3575,6 +3575,7 @@ fn run_control<V: VmArch + 'static, Vcpu: VcpuArch + 'static>(
#[cfg(target_arch = "x86_64")]
bus_lock_ratelimit_ctrl,
run_mode,
cfg.boost_uclamp,
)?;
vcpu_handles.push((handle, to_vcpu_channel));
}

View file

@ -24,6 +24,8 @@ use arch::LinuxArch;
use arch::VcpuArch;
use arch::VcpuInitArch;
use arch::VmArch;
use base::sched_attr;
use base::sched_setattr;
use base::signal::clear_signal_handler;
use base::signal::BlockedSignal;
use base::*;
@ -35,6 +37,8 @@ use hypervisor::IoParams;
use hypervisor::VcpuExit;
use hypervisor::VcpuSignalHandle;
use libc::c_int;
use libc::SCHED_FLAG_KEEP_ALL;
use libc::SCHED_FLAG_RESET_ON_FORK;
use metrics_events::MetricEventType;
#[cfg(target_arch = "riscv64")]
use riscv64::Riscv64 as Arch;
@ -50,6 +54,10 @@ use super::ExitState;
#[cfg(target_arch = "x86_64")]
use crate::crosvm::ratelimit::Ratelimit;
// TODO(davidai): Import libc constant when updated
const SCHED_FLAG_UTIL_CLAMP_MIN: u64 = 0x20;
const SCHED_SCALE_CAPACITY: u32 = 1024;
fn bus_io_handler(bus: &Bus) -> impl FnMut(IoParams) -> Option<[u8; 8]> + '_ {
|IoParams {
address,
@ -87,6 +95,7 @@ pub fn set_vcpu_thread_scheduling(
enable_per_vm_core_scheduling: bool,
vcpu_cgroup_tasks_file: Option<File>,
run_rt: bool,
boost_uclamp: bool,
) -> anyhow::Result<()> {
if !vcpu_affinity.is_empty() {
if let Err(e) = set_cpu_affinity(vcpu_affinity) {
@ -94,6 +103,18 @@ pub fn set_vcpu_thread_scheduling(
}
}
if boost_uclamp {
let mut sched_attr = sched_attr::default();
sched_attr.sched_flags = SCHED_FLAG_KEEP_ALL as u64
| SCHED_FLAG_UTIL_CLAMP_MIN
| SCHED_FLAG_RESET_ON_FORK as u64;
sched_attr.sched_util_min = SCHED_SCALE_CAPACITY;
if let Err(e) = sched_setattr(0, &mut sched_attr, 0) {
warn!("Failed to boost vcpu util: {}", e);
}
}
if core_scheduling && !enable_per_vm_core_scheduling {
// Do per-vCPU core scheduling by setting a unique cookie to each vCPU.
if let Err(e) = enable_core_scheduling() {
@ -499,6 +520,7 @@ pub fn run_vcpu<V>(
vcpu_cgroup_tasks_file: Option<File>,
#[cfg(target_arch = "x86_64")] bus_lock_ratelimit_ctrl: Arc<Mutex<Ratelimit>>,
run_mode: VmRunMode,
boost_uclamp: bool,
) -> Result<JoinHandle<()>>
where
V: VcpuArch + 'static,
@ -516,6 +538,7 @@ where
enable_per_vm_core_scheduling,
vcpu_cgroup_tasks_file,
run_rt && !delay_rt,
boost_uclamp,
) {
error!("vcpu thread setup failed: {:#}", e);
return ExitState::Stop;