mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-24 20:20:50 +07:00
Merge branch 'pm-cpuidle'
* pm-cpuidle: CPU / PM: expose pm_qos_resume_latency for CPUs cpuidle/menu: add per CPU PM QoS resume latency consideration cpuidle/menu: stop seeking deeper idle if current state is deep enough ACPI / idle: small formatting fixes
This commit is contained in:
commit
ad7eec4244
@ -12,7 +12,6 @@
|
||||
#include <linux/sched.h>
|
||||
|
||||
#include <acpi/processor.h>
|
||||
#include <asm/acpi.h>
|
||||
#include <asm/mwait.h>
|
||||
#include <asm/special_insns.h>
|
||||
|
||||
@ -89,7 +88,8 @@ static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
|
||||
retval = 0;
|
||||
/* If the HW does not support any sub-states in this C-state */
|
||||
if (num_cstate_subtype == 0) {
|
||||
pr_warn(FW_BUG "ACPI MWAIT C-state 0x%x not supported by HW (0x%x)\n", cx->address, edx_part);
|
||||
pr_warn(FW_BUG "ACPI MWAIT C-state 0x%x not supported by HW (0x%x)\n",
|
||||
cx->address, edx_part);
|
||||
retval = -1;
|
||||
goto out;
|
||||
}
|
||||
@ -104,8 +104,8 @@ static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
|
||||
if (!mwait_supported[cstate_type]) {
|
||||
mwait_supported[cstate_type] = 1;
|
||||
printk(KERN_DEBUG
|
||||
"Monitor-Mwait will be used to enter C-%d "
|
||||
"state\n", cx->type);
|
||||
"Monitor-Mwait will be used to enter C-%d state\n",
|
||||
cx->type);
|
||||
}
|
||||
snprintf(cx->desc,
|
||||
ACPI_CX_DESC_LEN, "ACPI FFH INTEL MWAIT 0x%x",
|
||||
@ -166,6 +166,7 @@ EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter);
|
||||
static int __init ffh_cstate_init(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
|
||||
if (c->x86_vendor != X86_VENDOR_INTEL)
|
||||
return -1;
|
||||
|
||||
|
@ -17,6 +17,7 @@
|
||||
#include <linux/of.h>
|
||||
#include <linux/cpufeature.h>
|
||||
#include <linux/tick.h>
|
||||
#include <linux/pm_qos.h>
|
||||
|
||||
#include "base.h"
|
||||
|
||||
@ -376,6 +377,7 @@ int register_cpu(struct cpu *cpu, int num)
|
||||
|
||||
per_cpu(cpu_sys_devices, num) = &cpu->dev;
|
||||
register_cpu_under_node(num, cpu_to_node(num));
|
||||
dev_pm_qos_expose_latency_limit(&cpu->dev, 0);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -19,6 +19,7 @@
|
||||
#include <linux/tick.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/math64.h>
|
||||
#include <linux/cpu.h>
|
||||
|
||||
/*
|
||||
* Please note when changing the tuning values:
|
||||
@ -280,17 +281,23 @@ static unsigned int get_typical_interval(struct menu_device *data)
|
||||
static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||
{
|
||||
struct menu_device *data = this_cpu_ptr(&menu_devices);
|
||||
struct device *device = get_cpu_device(dev->cpu);
|
||||
int latency_req = pm_qos_request(PM_QOS_CPU_DMA_LATENCY);
|
||||
int i;
|
||||
unsigned int interactivity_req;
|
||||
unsigned int expected_interval;
|
||||
unsigned long nr_iowaiters, cpu_load;
|
||||
int resume_latency = dev_pm_qos_read_value(device);
|
||||
|
||||
if (data->needs_update) {
|
||||
menu_update(drv, dev);
|
||||
data->needs_update = 0;
|
||||
}
|
||||
|
||||
/* resume_latency is 0 means no restriction */
|
||||
if (resume_latency && resume_latency < latency_req)
|
||||
latency_req = resume_latency;
|
||||
|
||||
/* Special case when user has set very strict latency requirement */
|
||||
if (unlikely(latency_req == 0))
|
||||
return 0;
|
||||
@ -357,9 +364,9 @@ static int menu_select(struct cpuidle_driver *drv, struct cpuidle_device *dev)
|
||||
if (s->disabled || su->disable)
|
||||
continue;
|
||||
if (s->target_residency > data->predicted_us)
|
||||
continue;
|
||||
break;
|
||||
if (s->exit_latency > latency_req)
|
||||
continue;
|
||||
break;
|
||||
|
||||
data->last_state_idx = i;
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user