aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/apic_32.c5
-rw-r--r--arch/x86/kernel/apic_64.c26
-rw-r--r--arch/x86/kernel/cpu/amd.c30
-rw-r--r--arch/x86/kernel/cpu/amd_64.c25
-rw-r--r--arch/x86/kernel/process.c66
5 files changed, 69 insertions, 83 deletions
diff --git a/arch/x86/kernel/apic_32.c b/arch/x86/kernel/apic_32.c
index 4b99b1b..c44206e 100644
--- a/arch/x86/kernel/apic_32.c
+++ b/arch/x86/kernel/apic_32.c
@@ -64,9 +64,8 @@ static int enable_local_apic __initdata;
/* Local APIC timer verification ok */
static int local_apic_timer_verify_ok;
-/* Disable local APIC timer from the kernel commandline or via dmi quirk
- or using CPU MSR check */
-int local_apic_timer_disabled;
+/* Disable local APIC timer from the kernel commandline or via dmi quirk */
+static int local_apic_timer_disabled;
/* Local APIC timer works in C2 */
int local_apic_timer_c2_ok;
EXPORT_SYMBOL_GPL(local_apic_timer_c2_ok);
diff --git a/arch/x86/kernel/apic_64.c b/arch/x86/kernel/apic_64.c
index 0633cfd..a5cc844 100644
--- a/arch/x86/kernel/apic_64.c
+++ b/arch/x86/kernel/apic_64.c
@@ -43,7 +43,7 @@
#include <mach_ipi.h>
#include <mach_apic.h>
-int disable_apic_timer __cpuinitdata;
+static int disable_apic_timer __cpuinitdata;
static int apic_calibrate_pmtmr __initdata;
int disable_apic;
@@ -422,32 +422,8 @@ void __init setup_boot_APIC_clock(void)
setup_APIC_timer();
}
-/*
- * AMD C1E enabled CPUs have a real nasty problem: Some BIOSes set the
- * C1E flag only in the secondary CPU, so when we detect the wreckage
- * we already have enabled the boot CPU local apic timer. Check, if
- * disable_apic_timer is set and the DUMMY flag is cleared. If yes,
- * set the DUMMY flag again and force the broadcast mode in the
- * clockevents layer.
- */
-static void __cpuinit check_boot_apic_timer_broadcast(void)
-{
- if (!disable_apic_timer ||
- (lapic_clockevent.features & CLOCK_EVT_FEAT_DUMMY))
- return;
-
- printk(KERN_INFO "AMD C1E detected late. Force timer broadcast.\n");
- lapic_clockevent.features |= CLOCK_EVT_FEAT_DUMMY;
-
- local_irq_enable();
- clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
- &boot_cpu_physical_apicid);
- local_irq_disable();
-}
-
void __cpuinit setup_secondary_APIC_clock(void)
{
- check_boot_apic_timer_broadcast();
setup_APIC_timer();
}
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index e76b49e..acc891a 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -24,31 +24,6 @@
extern void vide(void);
__asm__(".align 4\nvide: ret");
-#ifdef CONFIG_X86_LOCAL_APIC
-
-/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
-static __cpuinit int amd_apic_timer_broken(struct cpuinfo_x86 *c)
-{
- u32 lo, hi;
-
- if (c->x86 < 0x0F)
- return 0;
-
- /* Family 0x0f models < rev F do not have this MSR */
- if (c->x86 == 0x0f && c->x86_model < 0x40)
- return 0;
-
- rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
- if (lo & K8_INTP_C1E_ACTIVE_MASK) {
- if (smp_processor_id() != boot_cpu_physical_apicid)
- printk(KERN_INFO "AMD C1E detected late. "
- "Force timer broadcast.\n");
- return 1;
- }
- return 0;
-}
-#endif
-
int force_mwait __cpuinitdata;
static void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
@@ -285,11 +260,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c)
num_cache_leaves = 3;
}
-#ifdef CONFIG_X86_LOCAL_APIC
- if (amd_apic_timer_broken(c))
- local_apic_timer_disabled = 1;
-#endif
-
/* K6s reports MCEs but don't actually have all the MSRs */
if (c->x86 < 6)
clear_cpu_cap(c, X86_FEATURE_MCE);
diff --git a/arch/x86/kernel/cpu/amd_64.c b/arch/x86/kernel/cpu/amd_64.c
index f5fc161..f8d2058 100644
--- a/arch/x86/kernel/cpu/amd_64.c
+++ b/arch/x86/kernel/cpu/amd_64.c
@@ -110,28 +110,6 @@ static void __cpuinit early_init_amd_mc(struct cpuinfo_x86 *c)
#endif
}
-/* AMD systems with C1E don't have a working lAPIC timer. Check for that. */
-static __cpuinit int amd_apic_timer_broken(struct cpuinfo_x86 *c)
-{
- u32 lo, hi;
-
- if (c->x86 < 0x0F)
- return 0;
-
- /* Family 0x0f models < rev F do not have this MSR */
- if (c->x86 == 0x0f && c->x86_model < 0x40)
- return 0;
-
- rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
- if (lo & K8_INTP_C1E_ACTIVE_MASK) {
- if (smp_processor_id() != boot_cpu_physical_apicid)
- printk(KERN_INFO "AMD C1E detected late. "
- "Force timer broadcast.\n");
- return 1;
- }
- return 0;
-}
-
void __cpuinit early_init_amd(struct cpuinfo_x86 *c)
{
early_init_amd_mc(c);
@@ -212,9 +190,6 @@ void __cpuinit init_amd(struct cpuinfo_x86 *c)
if (c->x86 == 0x10)
amd_enable_pci_ext_cfg(c);
- if (amd_apic_timer_broken(c))
- disable_apic_timer = 1;
-
if (c == &boot_cpu_data && c->x86 >= 0xf && c->x86 <= 0x11) {
unsigned long long tseg;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 9fea146..68ad353 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -6,6 +6,7 @@
#include <linux/sched.h>
#include <linux/module.h>
#include <linux/pm.h>
+#include <linux/clockchips.h>
struct kmem_cache *task_xstate_cachep;
@@ -219,6 +220,68 @@ static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
return (edx & MWAIT_EDX_C1);
}
+/*
+ * Check for AMD CPUs, which have potentially C1E support
+ */
+static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
+{
+ if (c->x86_vendor != X86_VENDOR_AMD)
+ return 0;
+
+ if (c->x86 < 0x0F)
+ return 0;
+
+ /* Family 0x0f models < rev F do not have C1E */
+ if (c->x86 == 0x0f && c->x86_model < 0x40)
+ return 0;
+
+ return 1;
+}
+
+/*
+ * C1E aware idle routine. We check for C1E active in the interrupt
+ * pending message MSR. If we detect C1E, then we handle it the same
+ * way as C3 power states (local apic timer and TSC stop)
+ */
+static void c1e_idle(void)
+{
+ static cpumask_t c1e_mask = CPU_MASK_NONE;
+ static int c1e_detected;
+
+ if (need_resched())
+ return;
+
+ if (!c1e_detected) {
+ u32 lo, hi;
+
+ rdmsr(MSR_K8_INT_PENDING_MSG, lo, hi);
+ if (lo & K8_INTP_C1E_ACTIVE_MASK) {
+ c1e_detected = 1;
+ mark_tsc_unstable("TSC halt in C1E");
+ printk(KERN_INFO "System has C1E enabled\n");
+ }
+ }
+
+ if (c1e_detected) {
+ int cpu = smp_processor_id();
+
+ if (!cpu_isset(cpu, c1e_mask)) {
+ cpu_set(cpu, c1e_mask);
+ /* Force broadcast so ACPI can not interfere */
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_FORCE,
+ &cpu);
+ printk(KERN_INFO "Switch to broadcast mode on CPU%d\n",
+ cpu);
+ }
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER, &cpu);
+ default_idle();
+ local_irq_disable();
+ clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT, &cpu);
+ local_irq_enable();
+ } else
+ default_idle();
+}
+
void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
{
#ifdef CONFIG_X86_SMP
@@ -236,6 +299,9 @@ void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
*/
printk(KERN_INFO "using mwait in idle threads.\n");
pm_idle = mwait_idle;
+ } else if (check_c1e_idle(c)) {
+ printk(KERN_INFO "using C1E aware idle routine\n");
+ pm_idle = c1e_idle;
} else
pm_idle = default_idle;
}