mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 23:46:40 +07:00
cd7240c0b9
TSC's get reset after suspend/resume (even on cpu's with invariant TSC which runs at a constant rate across ACPI P-, C- and T-states). And in some systems BIOS seem to reinit TSC to arbitrary large value (still sync'd across cpu's) during resume. This leads to a scenario of scheduler rq->clock (sched_clock_cpu()) less than rq->age_stamp (introduced in 2.6.32). This leads to a big value returned by scale_rt_power() and the resulting big group power set by the update_group_power() is causing improper load balancing between busy and idle cpu's after suspend/resume. This resulted in multi-threaded workloads (like kernel-compilation) go slower after suspend/resume cycle on core i5 laptops. Fix this by recomputing cyc2ns_offset's during resume, so that sched_clock() continues from the point where it was left off during suspend. Reported-by: Florian Pritz <flo@xssn.at> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Cc: <stable@kernel.org> # [v2.6.32+] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1282262618.2675.24.camel@sbsiddha-MOBL3.sc.intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
238 lines
5.8 KiB
C
238 lines
5.8 KiB
C
/*
|
|
* Suspend support specific for i386/x86-64.
|
|
*
|
|
* Distribute under GPLv2
|
|
*
|
|
* Copyright (c) 2007 Rafael J. Wysocki <rjw@sisk.pl>
|
|
* Copyright (c) 2002 Pavel Machek <pavel@ucw.cz>
|
|
* Copyright (c) 2001 Patrick Mochel <mochel@osdl.org>
|
|
*/
|
|
|
|
#include <linux/suspend.h>
|
|
#include <linux/smp.h>
|
|
|
|
#include <asm/pgtable.h>
|
|
#include <asm/proto.h>
|
|
#include <asm/mtrr.h>
|
|
#include <asm/page.h>
|
|
#include <asm/mce.h>
|
|
#include <asm/xcr.h>
|
|
#include <asm/suspend.h>
|
|
#include <asm/debugreg.h>
|
|
|
|
#ifdef CONFIG_X86_32
|
|
static struct saved_context saved_context;
|
|
|
|
unsigned long saved_context_ebx;
|
|
unsigned long saved_context_esp, saved_context_ebp;
|
|
unsigned long saved_context_esi, saved_context_edi;
|
|
unsigned long saved_context_eflags;
|
|
#else
|
|
/* CONFIG_X86_64 */
|
|
struct saved_context saved_context;
|
|
#endif
|
|
|
|
/**
|
|
* __save_processor_state - save CPU registers before creating a
|
|
* hibernation image and before restoring the memory state from it
|
|
* @ctxt - structure to store the registers contents in
|
|
*
|
|
* NOTE: If there is a CPU register the modification of which by the
|
|
* boot kernel (ie. the kernel used for loading the hibernation image)
|
|
* might affect the operations of the restored target kernel (ie. the one
|
|
* saved in the hibernation image), then its contents must be saved by this
|
|
* function. In other words, if kernel A is hibernated and different
|
|
* kernel B is used for loading the hibernation image into memory, the
|
|
* kernel A's __save_processor_state() function must save all registers
|
|
* needed by kernel A, so that it can operate correctly after the resume
|
|
* regardless of what kernel B does in the meantime.
|
|
*/
|
|
static void __save_processor_state(struct saved_context *ctxt)
|
|
{
|
|
#ifdef CONFIG_X86_32
|
|
mtrr_save_fixed_ranges(NULL);
|
|
#endif
|
|
kernel_fpu_begin();
|
|
|
|
/*
|
|
* descriptor tables
|
|
*/
|
|
#ifdef CONFIG_X86_32
|
|
store_gdt(&ctxt->gdt);
|
|
store_idt(&ctxt->idt);
|
|
#else
|
|
/* CONFIG_X86_64 */
|
|
store_gdt((struct desc_ptr *)&ctxt->gdt_limit);
|
|
store_idt((struct desc_ptr *)&ctxt->idt_limit);
|
|
#endif
|
|
store_tr(ctxt->tr);
|
|
|
|
/* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
|
|
/*
|
|
* segment registers
|
|
*/
|
|
#ifdef CONFIG_X86_32
|
|
savesegment(es, ctxt->es);
|
|
savesegment(fs, ctxt->fs);
|
|
savesegment(gs, ctxt->gs);
|
|
savesegment(ss, ctxt->ss);
|
|
#else
|
|
/* CONFIG_X86_64 */
|
|
asm volatile ("movw %%ds, %0" : "=m" (ctxt->ds));
|
|
asm volatile ("movw %%es, %0" : "=m" (ctxt->es));
|
|
asm volatile ("movw %%fs, %0" : "=m" (ctxt->fs));
|
|
asm volatile ("movw %%gs, %0" : "=m" (ctxt->gs));
|
|
asm volatile ("movw %%ss, %0" : "=m" (ctxt->ss));
|
|
|
|
rdmsrl(MSR_FS_BASE, ctxt->fs_base);
|
|
rdmsrl(MSR_GS_BASE, ctxt->gs_base);
|
|
rdmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
|
|
mtrr_save_fixed_ranges(NULL);
|
|
|
|
rdmsrl(MSR_EFER, ctxt->efer);
|
|
#endif
|
|
|
|
/*
|
|
* control registers
|
|
*/
|
|
ctxt->cr0 = read_cr0();
|
|
ctxt->cr2 = read_cr2();
|
|
ctxt->cr3 = read_cr3();
|
|
#ifdef CONFIG_X86_32
|
|
ctxt->cr4 = read_cr4_safe();
|
|
#else
|
|
/* CONFIG_X86_64 */
|
|
ctxt->cr4 = read_cr4();
|
|
ctxt->cr8 = read_cr8();
|
|
#endif
|
|
ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE,
|
|
&ctxt->misc_enable);
|
|
}
|
|
|
|
/* Needed by apm.c */
|
|
void save_processor_state(void)
|
|
{
|
|
__save_processor_state(&saved_context);
|
|
save_sched_clock_state();
|
|
}
|
|
#ifdef CONFIG_X86_32
|
|
EXPORT_SYMBOL(save_processor_state);
|
|
#endif
|
|
|
|
static void do_fpu_end(void)
|
|
{
|
|
/*
|
|
* Restore FPU regs if necessary.
|
|
*/
|
|
kernel_fpu_end();
|
|
}
|
|
|
|
static void fix_processor_context(void)
|
|
{
|
|
int cpu = smp_processor_id();
|
|
struct tss_struct *t = &per_cpu(init_tss, cpu);
|
|
|
|
set_tss_desc(cpu, t); /*
|
|
* This just modifies memory; should not be
|
|
* necessary. But... This is necessary, because
|
|
* 386 hardware has concept of busy TSS or some
|
|
* similar stupidity.
|
|
*/
|
|
|
|
#ifdef CONFIG_X86_64
|
|
get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
|
|
|
|
syscall_init(); /* This sets MSR_*STAR and related */
|
|
#endif
|
|
load_TR_desc(); /* This does ltr */
|
|
load_LDT(¤t->active_mm->context); /* This does lldt */
|
|
}
|
|
|
|
/**
|
|
* __restore_processor_state - restore the contents of CPU registers saved
|
|
* by __save_processor_state()
|
|
* @ctxt - structure to load the registers contents from
|
|
*/
|
|
static void __restore_processor_state(struct saved_context *ctxt)
|
|
{
|
|
if (ctxt->misc_enable_saved)
|
|
wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable);
|
|
/*
|
|
* control registers
|
|
*/
|
|
/* cr4 was introduced in the Pentium CPU */
|
|
#ifdef CONFIG_X86_32
|
|
if (ctxt->cr4)
|
|
write_cr4(ctxt->cr4);
|
|
#else
|
|
/* CONFIG X86_64 */
|
|
wrmsrl(MSR_EFER, ctxt->efer);
|
|
write_cr8(ctxt->cr8);
|
|
write_cr4(ctxt->cr4);
|
|
#endif
|
|
write_cr3(ctxt->cr3);
|
|
write_cr2(ctxt->cr2);
|
|
write_cr0(ctxt->cr0);
|
|
|
|
/*
|
|
* now restore the descriptor tables to their proper values
|
|
* ltr is done i fix_processor_context().
|
|
*/
|
|
#ifdef CONFIG_X86_32
|
|
load_gdt(&ctxt->gdt);
|
|
load_idt(&ctxt->idt);
|
|
#else
|
|
/* CONFIG_X86_64 */
|
|
load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
|
|
load_idt((const struct desc_ptr *)&ctxt->idt_limit);
|
|
#endif
|
|
|
|
/*
|
|
* segment registers
|
|
*/
|
|
#ifdef CONFIG_X86_32
|
|
loadsegment(es, ctxt->es);
|
|
loadsegment(fs, ctxt->fs);
|
|
loadsegment(gs, ctxt->gs);
|
|
loadsegment(ss, ctxt->ss);
|
|
|
|
/*
|
|
* sysenter MSRs
|
|
*/
|
|
if (boot_cpu_has(X86_FEATURE_SEP))
|
|
enable_sep_cpu();
|
|
#else
|
|
/* CONFIG_X86_64 */
|
|
asm volatile ("movw %0, %%ds" :: "r" (ctxt->ds));
|
|
asm volatile ("movw %0, %%es" :: "r" (ctxt->es));
|
|
asm volatile ("movw %0, %%fs" :: "r" (ctxt->fs));
|
|
load_gs_index(ctxt->gs);
|
|
asm volatile ("movw %0, %%ss" :: "r" (ctxt->ss));
|
|
|
|
wrmsrl(MSR_FS_BASE, ctxt->fs_base);
|
|
wrmsrl(MSR_GS_BASE, ctxt->gs_base);
|
|
wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
|
|
#endif
|
|
|
|
/*
|
|
* restore XCR0 for xsave capable cpu's.
|
|
*/
|
|
if (cpu_has_xsave)
|
|
xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
|
|
|
|
fix_processor_context();
|
|
|
|
do_fpu_end();
|
|
mtrr_bp_restore();
|
|
}
|
|
|
|
/* Needed by apm.c */
|
|
void restore_processor_state(void)
|
|
{
|
|
__restore_processor_state(&saved_context);
|
|
restore_sched_clock_state();
|
|
}
|
|
#ifdef CONFIG_X86_32
|
|
EXPORT_SYMBOL(restore_processor_state);
|
|
#endif
|