From 3e03bbeac541856aaaf1ce1ab0250b6a490e4099 Mon Sep 17 00:00:00 2001 From: Shunichi Fuji Date: Tue, 11 Aug 2009 03:34:40 +0900 Subject: x86: Add reboot quirk for every 5 series MacBook/Pro MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reboot does not work on my MacBook Pro 13 inch (MacBookPro5,5) too. It seems all unibody MacBook and MacBookPro require PCI reboot handling, i guess. Following model/machine ID list shows unibody MacBook/Pro have the 5 series of model number: http://www.everymac.com/systems/by_capability/macs-by-machine-model-machine-id.html Signed-off-by: Shunichi Fuji Cc: Ozan Çağlayan LKML-Reference: <30046e3b0908101134p6487ddbftd8776e4ddef204be@mail.gmail.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/reboot.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 9eb8976..a06e8d1 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -418,20 +418,20 @@ static int __init set_pci_reboot(const struct dmi_system_id *d) } static struct dmi_system_id __initdata pci_reboot_dmi_table[] = { - { /* Handle problems with rebooting on Apple MacBook5,2 */ + { /* Handle problems with rebooting on Apple MacBook5 */ .callback = set_pci_reboot, - .ident = "Apple MacBook", + .ident = "Apple MacBook5", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5"), }, }, - { /* Handle problems with rebooting on Apple MacBookPro5,1 */ + { /* Handle problems with rebooting on Apple MacBookPro5 */ .callback = set_pci_reboot, - .ident = "Apple MacBookPro5,1", + .ident = "Apple MacBookPro5", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), - DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,1"), + DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5"), }, }, { } -- cgit v1.1 From 0d01f31439c1e4d602bf9fdc924ab66f407f5e38 Mon Sep 17 00:00:00 2001 From: Dmitry Torokhov Date: Sun, 9 Aug 2009 21:44:49 -0700 Subject: x86, mce: therm_throt - change when we print messages My Latitude d630 seems to be handling thermal events in SMI by lowering the max frequency of the CPU till it cools down but still leaks the "everything is normal" events. This spams the console and with high priority printks. Adjust therm_throt driver to only print messages about the fact that temperatire returned back to normal when leaving the throttling state. Also lower the severity of "back to normal" message from KERN_CRIT to KERN_INFO. Signed-off-by: Dmitry Torokhov Acked-by: H. Peter Anvin LKML-Reference: <20090810051513.0558F526EC9@mailhub.coreip.homeip.net> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/mcheck/therm_throt.c | 18 +++++++++++------- 1 file changed, 11 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c index bff8dd1..8bc64cf 100644 --- a/arch/x86/kernel/cpu/mcheck/therm_throt.c +++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c @@ -36,6 +36,7 @@ static DEFINE_PER_CPU(__u64, next_check) = INITIAL_JIFFIES; static DEFINE_PER_CPU(unsigned long, thermal_throttle_count); +static DEFINE_PER_CPU(bool, thermal_throttle_active); static atomic_t therm_throt_en = ATOMIC_INIT(0); @@ -96,24 +97,27 @@ static int therm_throt_process(int curr) { unsigned int cpu = smp_processor_id(); __u64 tmp_jiffs = get_jiffies_64(); + bool was_throttled = __get_cpu_var(thermal_throttle_active); + bool is_throttled = __get_cpu_var(thermal_throttle_active) = curr; - if (curr) + if (is_throttled) __get_cpu_var(thermal_throttle_count)++; - if (time_before64(tmp_jiffs, __get_cpu_var(next_check))) + if (!(was_throttled ^ is_throttled) && + time_before64(tmp_jiffs, __get_cpu_var(next_check))) return 0; __get_cpu_var(next_check) = tmp_jiffs + CHECK_INTERVAL; /* if we just entered the thermal event */ - if (curr) { + if (is_throttled) { printk(KERN_CRIT "CPU%d: Temperature above threshold, " - "cpu clock throttled (total events = %lu)\n", cpu, - __get_cpu_var(thermal_throttle_count)); + "cpu clock throttled (total events = %lu)\n", + cpu, __get_cpu_var(thermal_throttle_count)); add_taint(TAINT_MACHINE_CHECK); - } else { - printk(KERN_CRIT "CPU%d: Temperature/speed normal\n", cpu); + } else if (was_throttled) { + printk(KERN_INFO "CPU%d: Temperature/speed normal\n", cpu); } return 1; -- cgit v1.1 From 3c581a7f94542341bf0da496a226b44ac63521a8 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 11 Aug 2009 10:47:36 +0200 Subject: perf_counter, x86: Fix lapic printk message Instead of this garbled bootup on UP Pentium-M systems: [ 0.015048] Performance Counters: [ 0.016004] no Local APIC, try rebooting with lapicno PMU driver, software counters only. Print: [ 0.015050] Performance Counters: [ 0.016004] no APIC, boot with the "lapic" boot parameter to force-enable it. [ 0.017003] no PMU driver, software counters only. Cf: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index a7aa8f9..40e233a 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -1590,7 +1590,7 @@ static int p6_pmu_init(void) } if (!cpu_has_apic) { - pr_info("no Local APIC, try rebooting with lapic"); + pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n"); return -ENODEV; } -- cgit v1.1 From f64ccccb8afa43abdd63fcbd230f818d6ea0883f Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 11 Aug 2009 10:26:33 +0200 Subject: perf_counter, x86: Fix generic cache events on P6-mobile CPUs Johannes Stezenbach reported that 'perf stat' does not count cache-miss and cache-references events on his Pentium-M based laptop. This is because we left them blank in p6_perfmon_event_map[], fill them in. Reported-by: Johannes Stezenbach Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/perf_counter.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 40e233a..fffc126 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -72,8 +72,8 @@ static const u64 p6_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = 0x0079, [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, - [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0000, - [PERF_COUNT_HW_CACHE_MISSES] = 0x0000, + [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e, + [PERF_COUNT_HW_CACHE_MISSES] = 0x012e, [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, [PERF_COUNT_HW_BUS_CYCLES] = 0x0062, -- cgit v1.1 From fbd8b1819e80ac5a176d085fdddc3a34d1499318 Mon Sep 17 00:00:00 2001 From: Kevin Winchester Date: Mon, 10 Aug 2009 19:56:45 -0300 Subject: x86: Clear incorrectly forced X86_FEATURE_LAHF_LM flag Due to an erratum with certain AMD Athlon 64 processors, the BIOS may need to force enable the LAHF_LM capability. Unfortunately, in at least one case, the BIOS does this even for processors that do not support the functionality. Add a specific check that will clear the feature bit for processors known not to support the LAHF/SAHF instructions. Signed-off-by: Kevin Winchester Acked-by: Borislav Petkov LKML-Reference: <4A80A5AD.2000209@gmail.com> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/amd.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index e2485b0..63fddcd 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -400,6 +400,13 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) level = cpuid_eax(1); if((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58) set_cpu_cap(c, X86_FEATURE_REP_GOOD); + + /* + * Some BIOSes incorrectly force this feature, but only K8 + * revision D (model = 0x14) and later actually support it. + */ + if (c->x86_model < 0x14) + clear_cpu_cap(c, X86_FEATURE_LAHF_LM); } if (c->x86 == 0x10 || c->x86 == 0x11) set_cpu_cap(c, X86_FEATURE_REP_GOOD); -- cgit v1.1 From e8055139d996e85722984968472868d6dccb1490 Mon Sep 17 00:00:00 2001 From: Ondrej Zary Date: Tue, 11 Aug 2009 20:00:11 +0200 Subject: x86: Fix oops in identify_cpu() on CPUs without CPUID Kernel is broken for x86 CPUs without CPUID since 2.6.28. It crashes with NULL pointer dereference in identify_cpu(): 766 generic_identify(c); 767 768--> if (this_cpu->c_identify) 769 this_cpu->c_identify(c); this_cpu is NULL. This is because it's only initialized in get_cpu_vendor() function, which is not called if the CPU has no CPUID instruction. Signed-off-by: Ondrej Zary LKML-Reference: <200908112000.15993.linux@rainbow-software.org> Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/common.c | 48 ++++++++++++++++++++++---------------------- 1 file changed, 24 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index f1961c0..5ce60a8 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -59,7 +59,30 @@ void __init setup_cpu_local_masks(void) alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); } -static const struct cpu_dev *this_cpu __cpuinitdata; +static void __cpuinit default_init(struct cpuinfo_x86 *c) +{ +#ifdef CONFIG_X86_64 + display_cacheinfo(c); +#else + /* Not much we can do here... */ + /* Check if at least it has cpuid */ + if (c->cpuid_level == -1) { + /* No cpuid. It must be an ancient CPU */ + if (c->x86 == 4) + strcpy(c->x86_model_id, "486"); + else if (c->x86 == 3) + strcpy(c->x86_model_id, "386"); + } +#endif +} + +static const struct cpu_dev __cpuinitconst default_cpu = { + .c_init = default_init, + .c_vendor = "Unknown", + .c_x86_vendor = X86_VENDOR_UNKNOWN, +}; + +static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { #ifdef CONFIG_X86_64 @@ -332,29 +355,6 @@ void switch_to_new_gdt(int cpu) static const struct cpu_dev *__cpuinitdata cpu_devs[X86_VENDOR_NUM] = {}; -static void __cpuinit default_init(struct cpuinfo_x86 *c) -{ -#ifdef CONFIG_X86_64 - display_cacheinfo(c); -#else - /* Not much we can do here... */ - /* Check if at least it has cpuid */ - if (c->cpuid_level == -1) { - /* No cpuid. It must be an ancient CPU */ - if (c->x86 == 4) - strcpy(c->x86_model_id, "486"); - else if (c->x86 == 3) - strcpy(c->x86_model_id, "386"); - } -#endif -} - -static const struct cpu_dev __cpuinitconst default_cpu = { - .c_init = default_init, - .c_vendor = "Unknown", - .c_x86_vendor = X86_VENDOR_UNKNOWN, -}; - static void __cpuinit get_model_name(struct cpuinfo_x86 *c) { unsigned int *v; -- cgit v1.1 From 04da8a43da804723a550f00dd158fd5b5e25ae35 Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 11 Aug 2009 10:40:08 +0200 Subject: perf_counter, x86: Fix/improve apic fallback Johannes Stezenbach reported that his Pentium-M based laptop does not have the local APIC enabled by default, and hence perfcounters do not get initialized. Add a fallback for this case: allow non-sampled counters and return with an error on sampled counters. This allows 'perf stat' to work out of box - and allows 'perf top' and 'perf record' to fall back on a hrtimer based sampling method. ( Passing 'lapic' on the boot line will allow hardware sampling to occur - but if the APIC is disabled permanently by the hardware then this fallback still allows more systems to use perfcounters. ) Also decouple perfcounter support from X86_LOCAL_APIC. -v2: fix typo breaking counters on all other systems ... Reported-by: Johannes Stezenbach Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo Cc: Frederic Weisbecker LKML-Reference: Signed-off-by: Ingo Molnar --- arch/x86/Kconfig | 2 +- arch/x86/kernel/cpu/perf_counter.c | 34 ++++++++++++++++++++++++++++++---- 2 files changed, 31 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 738bdc6..13ffa5d 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -24,6 +24,7 @@ config X86 select HAVE_UNSTABLE_SCHED_CLOCK select HAVE_IDE select HAVE_OPROFILE + select HAVE_PERF_COUNTERS if (!M386 && !M486) select HAVE_IOREMAP_PROT select HAVE_KPROBES select ARCH_WANT_OPTIONAL_GPIOLIB @@ -742,7 +743,6 @@ config X86_UP_IOAPIC config X86_LOCAL_APIC def_bool y depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC - select HAVE_PERF_COUNTERS if (!M386 && !M486) config X86_IO_APIC def_bool y diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index fffc126..900332b 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c @@ -55,6 +55,7 @@ struct x86_pmu { int num_counters_fixed; int counter_bits; u64 counter_mask; + int apic; u64 max_period; u64 intel_ctrl; }; @@ -613,6 +614,7 @@ static DEFINE_MUTEX(pmc_reserve_mutex); static bool reserve_pmc_hardware(void) { +#ifdef CONFIG_X86_LOCAL_APIC int i; if (nmi_watchdog == NMI_LOCAL_APIC) @@ -627,9 +629,11 @@ static bool reserve_pmc_hardware(void) if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) goto eventsel_fail; } +#endif return true; +#ifdef CONFIG_X86_LOCAL_APIC eventsel_fail: for (i--; i >= 0; i--) release_evntsel_nmi(x86_pmu.eventsel + i); @@ -644,10 +648,12 @@ perfctr_fail: enable_lapic_nmi_watchdog(); return false; +#endif } static void release_pmc_hardware(void) { +#ifdef CONFIG_X86_LOCAL_APIC int i; for (i = 0; i < x86_pmu.num_counters; i++) { @@ -657,6 +663,7 @@ static void release_pmc_hardware(void) if (nmi_watchdog == NMI_LOCAL_APIC) enable_lapic_nmi_watchdog(); +#endif } static void hw_perf_counter_destroy(struct perf_counter *counter) @@ -748,6 +755,15 @@ static int __hw_perf_counter_init(struct perf_counter *counter) hwc->sample_period = x86_pmu.max_period; hwc->last_period = hwc->sample_period; atomic64_set(&hwc->period_left, hwc->sample_period); + } else { + /* + * If we have a PMU initialized but no APIC + * interrupts, we cannot sample hardware + * counters (user-space has to fall back and + * sample via a hrtimer based software counter): + */ + if (!x86_pmu.apic) + return -EOPNOTSUPP; } counter->destroy = hw_perf_counter_destroy; @@ -1449,18 +1465,22 @@ void smp_perf_pending_interrupt(struct pt_regs *regs) void set_perf_counter_pending(void) { +#ifdef CONFIG_X86_LOCAL_APIC apic->send_IPI_self(LOCAL_PENDING_VECTOR); +#endif } void perf_counters_lapic_init(void) { - if (!x86_pmu_initialized()) +#ifdef CONFIG_X86_LOCAL_APIC + if (!x86_pmu.apic || !x86_pmu_initialized()) return; /* * Always use NMI for PMU */ apic_write(APIC_LVTPC, APIC_DM_NMI); +#endif } static int __kprobes @@ -1484,7 +1504,9 @@ perf_counter_nmi_handler(struct notifier_block *self, regs = args->regs; +#ifdef CONFIG_X86_LOCAL_APIC apic_write(APIC_LVTPC, APIC_DM_NMI); +#endif /* * Can't rely on the handled return value to say it was our NMI, two * counters could trigger 'simultaneously' raising two back-to-back NMIs. @@ -1515,6 +1537,7 @@ static struct x86_pmu p6_pmu = { .event_map = p6_pmu_event_map, .raw_event = p6_pmu_raw_event, .max_events = ARRAY_SIZE(p6_perfmon_event_map), + .apic = 1, .max_period = (1ULL << 31) - 1, .version = 0, .num_counters = 2, @@ -1541,6 +1564,7 @@ static struct x86_pmu intel_pmu = { .event_map = intel_pmu_event_map, .raw_event = intel_pmu_raw_event, .max_events = ARRAY_SIZE(intel_perfmon_event_map), + .apic = 1, /* * Intel PMCs cannot be accessed sanely above 32 bit width, * so we install an artificial 1<<31 period regardless of @@ -1564,6 +1588,7 @@ static struct x86_pmu amd_pmu = { .num_counters = 4, .counter_bits = 48, .counter_mask = (1ULL << 48) - 1, + .apic = 1, /* use highest bit to detect overflow */ .max_period = (1ULL << 47) - 1, }; @@ -1589,13 +1614,14 @@ static int p6_pmu_init(void) return -ENODEV; } + x86_pmu = p6_pmu; + if (!cpu_has_apic) { pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n"); - return -ENODEV; + pr_info("no hardware sampling interrupt available.\n"); + x86_pmu.apic = 0; } - x86_pmu = p6_pmu; - return 0; } -- cgit v1.1 From 7a90e00dda0bae66b5232d5a37155f13a0581369 Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 07:41:45 +0900 Subject: sh: Make sure rte delay slots are nopped out on all parts. Future SH parts do not support any instruction but a nop in the rte delay slot, so make the change for all offending parts. SH-5 is excluded from this, and already has its own set of restrictions with regards to rte delay slot handling. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh2/entry.S | 3 ++- arch/sh/kernel/cpu/sh2a/entry.S | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh2/entry.S b/arch/sh/kernel/cpu/sh2/entry.S index becc54c45..c8a4331 100644 --- a/arch/sh/kernel/cpu/sh2/entry.S +++ b/arch/sh/kernel/cpu/sh2/entry.S @@ -227,8 +227,9 @@ ENTRY(sh_bios_handler) mov.l @r15+, r14 add #8,r15 lds.l @r15+, pr + mov.l @r15+,r15 rte - mov.l @r15+,r15 + nop .align 2 1: .long gdb_vbr_vector #endif /* CONFIG_SH_STANDARD_BIOS */ diff --git a/arch/sh/kernel/cpu/sh2a/entry.S b/arch/sh/kernel/cpu/sh2a/entry.S index ab3903e..222742d 100644 --- a/arch/sh/kernel/cpu/sh2a/entry.S +++ b/arch/sh/kernel/cpu/sh2a/entry.S @@ -176,8 +176,9 @@ ENTRY(sh_bios_handler) movml.l @r15+,r14 add #8,r15 lds.l @r15+, pr + mov.l @r15+,r15 rte - mov.l @r15+,r15 + nop .align 2 1: .long gdb_vbr_vector #endif /* CONFIG_SH_STANDARD_BIOS */ -- cgit v1.1 From 7dd6662a92fe9a15ad565045aa60367995cc533d Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sat, 15 Aug 2009 07:43:21 +0900 Subject: sh: delay slot future proofing via EXPMASK on SH-4A parts. This implements EXPMASK initialization code for SH-4A parts, where it is possible to disable compat features that will go away in newer cores. Presently this includes disabling support for non-nop instructions in the rte delay slot, as well as a sleep instruction being placed in a delay slot (neither of which the kernel does any longer). As a result of this, any future offenders will have illegal slot exceptions generated for them. Associative writes for the memory-mapped cache array are still left enabled, until such a point that special cache operations for SH-4A are provided to move off of the current (and rather dated) SH-4 versions. Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/init.c | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c index ad85421..d40b9db 100644 --- a/arch/sh/kernel/cpu/init.c +++ b/arch/sh/kernel/cpu/init.c @@ -3,7 +3,7 @@ * * CPU init code * - * Copyright (C) 2002 - 2007 Paul Mundt + * Copyright (C) 2002 - 2009 Paul Mundt * Copyright (C) 2003 Richard Curnow * * This file is subject to the terms and conditions of the GNU General Public @@ -62,6 +62,37 @@ static void __init speculative_execution_init(void) #define speculative_execution_init() do { } while (0) #endif +#ifdef CONFIG_CPU_SH4A +#define EXPMASK 0xff2f0004 +#define EXPMASK_RTEDS (1 << 0) +#define EXPMASK_BRDSSLP (1 << 1) +#define EXPMASK_MMCAW (1 << 4) + +static void __init expmask_init(void) +{ + unsigned long expmask = __raw_readl(EXPMASK); + + /* + * Future proofing. + * + * Disable support for slottable sleep instruction + * and non-nop instructions in the rte delay slot. + */ + expmask &= ~(EXPMASK_RTEDS | EXPMASK_BRDSSLP); + + /* + * Enable associative writes to the memory-mapped cache array + * until the cache flush ops have been rewritten. + */ + expmask |= EXPMASK_MMCAW; + + __raw_writel(expmask, EXPMASK); + ctrl_barrier(); +} +#else +#define expmask_init() do { } while (0) +#endif + /* 2nd-level cache init */ void __uses_jump_to_uncached __attribute__ ((weak)) l2_cache_init(void) { @@ -321,4 +352,5 @@ asmlinkage void __init sh_cpu_init(void) #endif speculative_execution_init(); + expmask_init(); } -- cgit v1.1 From 180aa6e6aa11922dcd4c13df1967d62bb2ede76c Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sat, 15 Aug 2009 00:04:00 +0100 Subject: sh: Set the cfa_offset to 0 if we see a DW_CFA_def_cfa_register op The way that the CFA is calculated can change as we progress through a function. If we see a DW_CFA_def_cfa_register op we need to reset the frame's cfa_offset value which may have been previously setup. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/kernel/dwarf.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index db02136..c6c5764 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -449,6 +449,7 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, count = dwarf_read_uleb128(current_insn, &frame->cfa_register); current_insn += count; + frame->cfa_offset = 0; frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; break; case DW_CFA_def_cfa_offset: -- cgit v1.1 From 9747e78b304b44d6fb73e2c8071406d55aa8bb75 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Sat, 15 Aug 2009 02:53:34 +0000 Subject: sh: use in-soc KEYSC on se7724 This patch updates the Solution Engine 7724 board code to use in-SoC KEYSC resources for the keyboard platform device. Using the in-SoC key scan controller fixes a crash-during-resume issue. Without this patch the KEYSC hardware block located in the board specific FPGA is used together with an external IRQ which is routed through the FPGA and handled by some board specific demux code. This board specific FPGA interrupt code does not implement desc->set_wake() so the enable_irq_wake() call in the sh_keysc driver will fail at suspend-to-ram time and the disable_irq_wake() will bomb out when resuming. Changing the platform data to use the in-SoC KEYSC hardware makes the se7724 board support code less special which is a good thing. Also, the board specific KEYSC pin setup code selects in-SoC pin functions already which makes the current FPGA platform device data look like a typo. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/boards/mach-se/7724/setup.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/sh/boards/mach-se/7724/setup.c b/arch/sh/boards/mach-se/7724/setup.c index 8fed45a..15456a0 100644 --- a/arch/sh/boards/mach-se/7724/setup.c +++ b/arch/sh/boards/mach-se/7724/setup.c @@ -238,7 +238,7 @@ static struct platform_device ceu1_device = { }, }; -/* KEYSC */ +/* KEYSC in SoC (Needs SW33-2 set to ON) */ static struct sh_keysc_info keysc_info = { .mode = SH_KEYSC_MODE_1, .scan_timing = 10, @@ -255,12 +255,13 @@ static struct sh_keysc_info keysc_info = { static struct resource keysc_resources[] = { [0] = { - .start = 0x1a204000, - .end = 0x1a20400f, + .name = "KEYSC", + .start = 0x044b0000, + .end = 0x044b000f, .flags = IORESOURCE_MEM, }, [1] = { - .start = IRQ0_KEY, + .start = 79, .flags = IORESOURCE_IRQ, }, }; -- cgit v1.1 From 237674e050ae8ea40a432412df6c15d60b7ae8a6 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Sat, 15 Aug 2009 02:53:42 +0000 Subject: sh: sh7724 ddr self-refresh changes This patch updates the SuperH Mobile sleep assembly code with support for DBSC memory controller found in the sh7724 processor. Without this fix the memory hooked up to the sh7724 processor will never enter self-refresh mode before suspending to ram. The effect of this is that the memory contents most likeley will be lost upon resume which may or may not be what you want. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/shmobile/sleep.S | 70 +++++++++++++++++++++++++++++++++++-- 1 file changed, 68 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/shmobile/sleep.S b/arch/sh/kernel/cpu/shmobile/sleep.S index 5d888ef..baf2d7d 100644 --- a/arch/sh/kernel/cpu/shmobile/sleep.S +++ b/arch/sh/kernel/cpu/shmobile/sleep.S @@ -26,8 +26,30 @@ ENTRY(sh_mobile_standby) tst #SUSP_SH_SF, r0 bt skip_set_sf +#ifdef CONFIG_CPU_SUBTYPE_SH7724 + /* DBSC: put memory in self-refresh mode */ - /* SDRAM: disable power down and put in self-refresh mode */ + mov.l dben_reg, r4 + mov.l dben_data0, r1 + mov.l r1, @r4 + + mov.l dbrfpdn0_reg, r4 + mov.l dbrfpdn0_data0, r1 + mov.l r1, @r4 + + mov.l dbcmdcnt_reg, r4 + mov.l dbcmdcnt_data0, r1 + mov.l r1, @r4 + + mov.l dbcmdcnt_reg, r4 + mov.l dbcmdcnt_data1, r1 + mov.l r1, @r4 + + mov.l dbrfpdn0_reg, r4 + mov.l dbrfpdn0_data1, r1 + mov.l r1, @r4 +#else + /* SBSC: disable power down and put in self-refresh mode */ mov.l 1f, r4 mov.l 2f, r1 mov.l @r4, r2 @@ -35,6 +57,7 @@ ENTRY(sh_mobile_standby) mov.l 3f, r3 and r3, r2 mov.l r2, @r4 +#endif skip_set_sf: tst #SUSP_SH_SLEEP, r0 @@ -84,7 +107,36 @@ done_sleep: tst #SUSP_SH_SF, r0 bt skip_restore_sf - /* SDRAM: set auto-refresh mode */ +#ifdef CONFIG_CPU_SUBTYPE_SH7724 + /* DBSC: put memory in auto-refresh mode */ + + mov.l dbrfpdn0_reg, r4 + mov.l dbrfpdn0_data0, r1 + mov.l r1, @r4 + + /* sleep 140 ns */ + nop + nop + nop + nop + + mov.l dbcmdcnt_reg, r4 + mov.l dbcmdcnt_data0, r1 + mov.l r1, @r4 + + mov.l dbcmdcnt_reg, r4 + mov.l dbcmdcnt_data1, r1 + mov.l r1, @r4 + + mov.l dben_reg, r4 + mov.l dben_data1, r1 + mov.l r1, @r4 + + mov.l dbrfpdn0_reg, r4 + mov.l dbrfpdn0_data2, r1 + mov.l r1, @r4 +#else + /* SBSC: set auto-refresh mode */ mov.l 1f, r4 mov.l @r4, r2 mov.l 4f, r3 @@ -98,15 +150,29 @@ done_sleep: add r4, r3 or r2, r3 mov.l r3, @r1 +#endif skip_restore_sf: rts nop .balign 4 +#ifdef CONFIG_CPU_SUBTYPE_SH7724 +dben_reg: .long 0xfd000010 /* DBEN */ +dben_data0: .long 0 +dben_data1: .long 1 +dbrfpdn0_reg: .long 0xfd000040 /* DBRFPDN0 */ +dbrfpdn0_data0: .long 0 +dbrfpdn0_data1: .long 1 +dbrfpdn0_data2: .long 0x00010000 +dbcmdcnt_reg: .long 0xfd000014 /* DBCMDCNT */ +dbcmdcnt_data0: .long 2 +dbcmdcnt_data1: .long 4 +#else 1: .long 0xfe400008 /* SDCR0 */ 2: .long 0x00000400 3: .long 0xffff7fff 4: .long 0xfffffbff +#endif 5: .long 0xa4150020 /* STBCR */ 6: .long 0xfe40001c /* RTCOR */ 7: .long 0xfe400018 /* RTCNT */ -- cgit v1.1 From 38f9ddf44150c1a213b41726384d055f7c35ec4f Mon Sep 17 00:00:00 2001 From: Paul Mundt Date: Sun, 16 Aug 2009 03:35:26 +0900 Subject: sh: Merge the _32/_64 variants of arch/sh/kernel/Makefile. This uses the BITS export as per x86 in order to allow the same Makefile to be used. Signed-off-by: Paul Mundt --- arch/sh/kernel/Makefile | 43 +++++++++++++++++++++++++++++++++++++++---- arch/sh/kernel/Makefile_32 | 40 ---------------------------------------- arch/sh/kernel/Makefile_64 | 20 -------------------- 3 files changed, 39 insertions(+), 64 deletions(-) delete mode 100644 arch/sh/kernel/Makefile_32 delete mode 100644 arch/sh/kernel/Makefile_64 (limited to 'arch') diff --git a/arch/sh/kernel/Makefile b/arch/sh/kernel/Makefile index 349d833..f37cf02 100644 --- a/arch/sh/kernel/Makefile +++ b/arch/sh/kernel/Makefile @@ -1,5 +1,40 @@ -ifeq ($(CONFIG_SUPERH32),y) -include ${srctree}/arch/sh/kernel/Makefile_32 -else -include ${srctree}/arch/sh/kernel/Makefile_64 +# +# Makefile for the Linux/SuperH kernel. +# + +extra-y := head_$(BITS).o init_task.o vmlinux.lds + +ifdef CONFIG_FUNCTION_TRACER +# Do not profile debug and lowlevel utilities +CFLAGS_REMOVE_ftrace.o = -pg endif + +obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \ + machvec.o process_$(BITS).o ptrace_$(BITS).o setup.o \ + signal_$(BITS).o sys_sh.o sys_sh$(BITS).o syscalls_$(BITS).o \ + time.o topology.o traps.o traps_$(BITS).o unwinder.o + +obj-y += cpu/ +obj-$(CONFIG_VSYSCALL) += vsyscall/ +obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o +obj-$(CONFIG_KGDB) += kgdb.o +obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o +obj-$(CONFIG_MODULES) += sh_ksyms_$(BITS).o module.o +obj-$(CONFIG_EARLY_PRINTK) += early_printk.o +obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o +obj-$(CONFIG_CRASH_DUMP) += crash_dump.o +obj-$(CONFIG_STACKTRACE) += stacktrace.o +obj-$(CONFIG_IO_TRAPPED) += io_trapped.o +obj-$(CONFIG_KPROBES) += kprobes.o +obj-$(CONFIG_GENERIC_GPIO) += gpio.o +obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o +obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o +obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o +obj-$(CONFIG_DUMP_CODE) += disassemble.o +obj-$(CONFIG_HIBERNATION) += swsusp.o +obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o + +obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o + +EXTRA_CFLAGS += -Werror diff --git a/arch/sh/kernel/Makefile_32 b/arch/sh/kernel/Makefile_32 deleted file mode 100644 index f2245eb..0000000 --- a/arch/sh/kernel/Makefile_32 +++ /dev/null @@ -1,40 +0,0 @@ -# -# Makefile for the Linux/SuperH kernel. -# - -extra-y := head_32.o init_task.o vmlinux.lds - -ifdef CONFIG_FUNCTION_TRACER -# Do not profile debug and lowlevel utilities -CFLAGS_REMOVE_ftrace.o = -pg -endif - -obj-y := debugtraps.o dumpstack.o idle.o io.o io_generic.o irq.o \ - machvec.o process_32.o ptrace_32.o setup.o signal_32.o \ - sys_sh.o sys_sh32.o syscalls_32.o time.o topology.o \ - traps.o traps_32.o unwinder.o - -obj-y += cpu/ -obj-$(CONFIG_VSYSCALL) += vsyscall/ -obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_SH_STANDARD_BIOS) += sh_bios.o -obj-$(CONFIG_KGDB) += kgdb.o -obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o -obj-$(CONFIG_MODULES) += sh_ksyms_32.o module.o -obj-$(CONFIG_EARLY_PRINTK) += early_printk.o -obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o -obj-$(CONFIG_CRASH_DUMP) += crash_dump.o -obj-$(CONFIG_STACKTRACE) += stacktrace.o -obj-$(CONFIG_IO_TRAPPED) += io_trapped.o -obj-$(CONFIG_KPROBES) += kprobes.o -obj-$(CONFIG_GENERIC_GPIO) += gpio.o -obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o -obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o -obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o -obj-$(CONFIG_DUMP_CODE) += disassemble.o -obj-$(CONFIG_HIBERNATION) += swsusp.o -obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o - -obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o - -EXTRA_CFLAGS += -Werror diff --git a/arch/sh/kernel/Makefile_64 b/arch/sh/kernel/Makefile_64 deleted file mode 100644 index 639ee51..0000000 --- a/arch/sh/kernel/Makefile_64 +++ /dev/null @@ -1,20 +0,0 @@ -extra-y := head_64.o init_task.o vmlinux.lds - -obj-y := debugtraps.o idle.o io.o io_generic.o irq.o machvec.o process_64.o \ - ptrace_64.o setup.o signal_64.o sys_sh.o sys_sh64.o \ - syscalls_64.o time.o topology.o traps.o traps_64.o unwinder.o - -obj-y += cpu/ -obj-$(CONFIG_SMP) += smp.o -obj-$(CONFIG_SH_CPU_FREQ) += cpufreq.o -obj-$(CONFIG_MODULES) += sh_ksyms_64.o module.o -obj-$(CONFIG_EARLY_PRINTK) += early_printk.o -obj-$(CONFIG_CRASH_DUMP) += crash_dump.o -obj-$(CONFIG_STACKTRACE) += stacktrace.o -obj-$(CONFIG_IO_TRAPPED) += io_trapped.o -obj-$(CONFIG_GENERIC_GPIO) += gpio.o -obj-$(CONFIG_DWARF_UNWINDER) += dwarf.o - -obj-$(CONFIG_GENERIC_CLOCKEVENTS_BROADCAST) += localtimer.o - -EXTRA_CFLAGS += -Werror -- cgit v1.1 From b955873bf530ee4b80e6c8b734521ad07cbaed7e Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sat, 15 Aug 2009 23:10:57 +0100 Subject: sh: Try again at getting the initial return address for an unwind The previous hack for calculating the return address for the first frame we unwind (dwarf_unwinder_dump) didn't always work. The problem was that it assumed once it read the rule for calculating the return address, there would be no new rules for calculating it. This isn't true because the way in which the CFA is calculated can change as you progress through a function and the return address is figured out using the CFA. Therefore, the way to calculate the return address can change. So, instead of using some offset from the beginning of dwarf_unwind_stack which is just a flakey approach, and instead of executing instructions from the FDE until the return address is setup, we now figure out the pc in dwarf_unwind_stack() just before we call dwarf_cfa_execute_insns(). Signed-off-by: Matt Fleming --- arch/sh/kernel/dwarf.c | 41 ++++++----------------------------------- 1 file changed, 6 insertions(+), 35 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index c6c5764..44e674e 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -330,7 +330,6 @@ struct dwarf_fde *dwarf_lookup_fde(unsigned long pc) * @fde: the FDE for this function * @frame: the instructions calculate the CFA for this frame * @pc: the program counter of the address we're interested in - * @define_ra: keep executing insns until the return addr reg is defined? * * Execute the Call Frame instruction sequence starting at * @insn_start and ending at @insn_end. The instructions describe @@ -342,36 +341,17 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, struct dwarf_cie *cie, struct dwarf_fde *fde, struct dwarf_frame *frame, - unsigned long pc, - bool define_ra) + unsigned long pc) { unsigned char insn; unsigned char *current_insn; unsigned int count, delta, reg, expr_len, offset; - bool seen_ra_reg; current_insn = insn_start; - /* - * If we're executing instructions for the dwarf_unwind_stack() - * FDE we need to keep executing instructions until the value of - * DWARF_ARCH_RA_REG is defined. See the comment in - * dwarf_unwind_stack() for more details. - */ - if (define_ra) - seen_ra_reg = false; - else - seen_ra_reg = true; - - while (current_insn < insn_end && (frame->pc <= pc || !seen_ra_reg) ) { + while (current_insn < insn_end && frame->pc <= pc) { insn = __raw_readb(current_insn++); - if (!seen_ra_reg) { - if (frame->num_regs >= DWARF_ARCH_RA_REG && - frame->regs[DWARF_ARCH_RA_REG].flags) - seen_ra_reg = true; - } - /* * Firstly, handle the opcodes that embed their operands * in the instructions. @@ -511,26 +491,17 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, struct dwarf_fde *fde; unsigned long addr; int i, offset; - bool define_ra = false; /* * If this is the first invocation of this recursive function we * need get the contents of a physical register to get the CFA * in order to begin the virtual unwinding of the stack. * - * Setting "define_ra" to true indictates that we want - * dwarf_cfa_execute_insns() to continue executing instructions - * until we know how to calculate the value of DWARF_ARCH_RA_REG - * (which we need in order to kick off the whole unwinding - * process). - * * NOTE: the return address is guaranteed to be setup by the * time this function makes its first function call. */ - if (!pc && !prev) { - pc = (unsigned long)&dwarf_unwind_stack; - define_ra = true; - } + if (!pc && !prev) + pc = (unsigned long)current_text_addr(); frame = kzalloc(sizeof(*frame), GFP_ATOMIC); if (!frame) @@ -566,11 +537,11 @@ struct dwarf_frame *dwarf_unwind_stack(unsigned long pc, /* CIE initial instructions */ dwarf_cfa_execute_insns(cie->initial_instructions, cie->instructions_end, cie, fde, - frame, pc, false); + frame, pc); /* FDE instructions */ dwarf_cfa_execute_insns(fde->instructions, fde->end, cie, - fde, frame, pc, define_ra); + fde, frame, pc); /* Calculate the CFA */ switch (frame->flags) { -- cgit v1.1 From cd7246f0e2747bd2b43d25d0f63f05db182a62c0 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Sun, 16 Aug 2009 01:44:33 +0100 Subject: sh: Add support for DWARF GNU extensions Also, remove the "fix" to DW_CFA_def_cfa_register where we reset the frame's cfa_offset to 0. This action is incorrect when handling DW_CFA_def_cfa_register as the DWARF spec specifically states that the previous contents of cfa_offset should be used with the new register. The reason that I thought cfa_offset should be reset to 0 was because it was being assigned a bogus value prior to executing the DW_CFA_def_cfa_register op. It turns out that the bogus cfa_offset value came from interpreting .cfi_escape pseudo-ops (those used by the GNU extensions) as CFA_DW_def_cfa ops. Signed-off-by: Matt Fleming --- arch/sh/include/asm/dwarf.h | 4 ++++ arch/sh/kernel/dwarf.c | 14 +++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/include/asm/dwarf.h b/arch/sh/include/asm/dwarf.h index 60b1807..d3d3837 100644 --- a/arch/sh/include/asm/dwarf.h +++ b/arch/sh/include/asm/dwarf.h @@ -340,6 +340,10 @@ struct dwarf_stack { #define DW_CFA_lo_user 0x1c #define DW_CFA_hi_user 0x3f +/* GNU extension opcodes */ +#define DW_CFA_GNU_args_size 0x2e +#define DW_CFA_GNU_negative_offset_extended 0x2f + /* * Some call frame instructions encode their operands in the opcode. We * need some helper functions to extract both the opcode and operands diff --git a/arch/sh/kernel/dwarf.c b/arch/sh/kernel/dwarf.c index 44e674e..5fd6e60 100644 --- a/arch/sh/kernel/dwarf.c +++ b/arch/sh/kernel/dwarf.c @@ -429,7 +429,6 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, count = dwarf_read_uleb128(current_insn, &frame->cfa_register); current_insn += count; - frame->cfa_offset = 0; frame->flags |= DWARF_FRAME_CFA_REG_OFFSET; break; case DW_CFA_def_cfa_offset: @@ -465,6 +464,19 @@ static int dwarf_cfa_execute_insns(unsigned char *insn_start, frame->regs[reg].flags |= DWARF_REG_OFFSET; frame->regs[reg].addr = offset; break; + case DW_CFA_GNU_args_size: + count = dwarf_read_uleb128(current_insn, &offset); + current_insn += count; + break; + case DW_CFA_GNU_negative_offset_extended: + count = dwarf_read_uleb128(current_insn, ®); + current_insn += count; + count = dwarf_read_uleb128(current_insn, &offset); + offset *= cie->data_alignment_factor; + dwarf_frame_alloc_regs(frame, reg); + frame->regs[reg].flags |= DWARF_REG_OFFSET; + frame->regs[reg].addr = -offset; + break; default: pr_debug("unhandled DWARF instruction 0x%x\n", insn); break; -- cgit v1.1 From 309214af53f030e0664aea011a9f628c5eb9cc2d Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Mon, 17 Aug 2009 09:27:29 +0000 Subject: sh: rework SuperH Mobile sleep code exception handling This patch updates the exception handling in the sleep code for SuperH Mobile. With the patch applied the sleep code always rewrites the VBR and resumes from the exception vector, re-initializes hardware and jumps straight to the original interrupt vector. Tested on sh7722 and sh7724 with "Sleep Mode", "Sleep Mode + SF" and "Software Standby Mode + SF" with CONFIG_SUSPEND. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/shmobile/pm.c | 16 +--- arch/sh/kernel/cpu/shmobile/sleep.S | 155 ++++++++++++++++++++++++------------ 2 files changed, 107 insertions(+), 64 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/shmobile/pm.c b/arch/sh/kernel/cpu/shmobile/pm.c index de078d2..ee3c2aa 100644 --- a/arch/sh/kernel/cpu/shmobile/pm.c +++ b/arch/sh/kernel/cpu/shmobile/pm.c @@ -41,23 +41,11 @@ extern const unsigned int sh_mobile_standby_size; void sh_mobile_call_standby(unsigned long mode) { - extern void *vbr_base; void *onchip_mem = (void *)ILRAM_BASE; - void (*standby_onchip_mem)(unsigned long) = onchip_mem; - - /* Note: Wake up from sleep may generate exceptions! - * Setup VBR to point to on-chip ram if self-refresh is - * going to be used. - */ - if (mode & SUSP_SH_SF) - asm volatile("ldc %0, vbr" : : "r" (onchip_mem) : "memory"); + void (*standby_onchip_mem)(unsigned long, unsigned long) = onchip_mem; /* Let assembly snippet in on-chip memory handle the rest */ - standby_onchip_mem(mode); - - /* Put VBR back in System RAM again */ - if (mode & SUSP_SH_SF) - asm volatile("ldc %0, vbr" : : "r" (&vbr_base) : "memory"); + standby_onchip_mem(mode, ILRAM_BASE); } static int sh_pm_enter(suspend_state_t state) diff --git a/arch/sh/kernel/cpu/shmobile/sleep.S b/arch/sh/kernel/cpu/shmobile/sleep.S index baf2d7d..a439e6c 100644 --- a/arch/sh/kernel/cpu/shmobile/sleep.S +++ b/arch/sh/kernel/cpu/shmobile/sleep.S @@ -16,19 +16,52 @@ #include #include +/* + * Kernel mode register usage, see entry.S: + * k0 scratch + * k1 scratch + * k4 scratch + */ +#define k0 r0 +#define k1 r1 +#define k4 r4 + /* manage self-refresh and enter standby mode. * this code will be copied to on-chip memory and executed from there. */ .balign 4096,0,4096 ENTRY(sh_mobile_standby) + + /* save original vbr */ + stc vbr, r1 + mova saved_vbr, r0 + mov.l r1, @r0 + + /* point vbr to our on-chip memory page */ + ldc r5, vbr + + /* save return address */ + mova saved_spc, r0 + sts pr, r5 + mov.l r5, @r0 + + /* save sr */ + mova saved_sr, r0 + stc sr, r5 + mov.l r5, @r0 + + /* save mode flags */ + mova saved_mode, r0 + mov.l r4, @r0 + + /* put mode flags in r0 */ mov r4, r0 tst #SUSP_SH_SF, r0 bt skip_set_sf #ifdef CONFIG_CPU_SUBTYPE_SH7724 /* DBSC: put memory in self-refresh mode */ - mov.l dben_reg, r4 mov.l dben_data0, r1 mov.l r1, @r4 @@ -60,14 +93,6 @@ ENTRY(sh_mobile_standby) #endif skip_set_sf: - tst #SUSP_SH_SLEEP, r0 - bt test_standby - - /* set mode to "sleep mode" */ - bra do_sleep - mov #0x00, r1 - -test_standby: tst #SUSP_SH_STANDBY, r0 bt test_rstandby @@ -85,77 +110,107 @@ test_rstandby: test_ustandby: tst #SUSP_SH_USTANDBY, r0 - bt done_sleep + bt force_sleep /* set mode to "u-standby mode" */ - mov #0x10, r1 + bra do_sleep + mov #0x10, r1 - /* fall-through */ +force_sleep: + + /* set mode to "sleep mode" */ + mov #0x00, r1 do_sleep: /* setup and enter selected standby mode */ mov.l 5f, r4 mov.l r1, @r4 +again: sleep + bra again + nop + +restore_jump_vbr: + /* setup spc with return address to c code */ + mov.l saved_spc, k0 + ldc k0, spc + + /* restore vbr */ + mov.l saved_vbr, k0 + ldc k0, vbr + + /* setup ssr with saved sr */ + mov.l saved_sr, k0 + ldc k0, ssr + + /* get mode flags */ + mov.l saved_mode, k0 done_sleep: /* reset standby mode to sleep mode */ - mov.l 5f, r4 - mov #0x00, r1 - mov.l r1, @r4 + mov.l 5f, k4 + mov #0x00, k1 + mov.l k1, @k4 - tst #SUSP_SH_SF, r0 + tst #SUSP_SH_SF, k0 bt skip_restore_sf #ifdef CONFIG_CPU_SUBTYPE_SH7724 /* DBSC: put memory in auto-refresh mode */ + mov.l dbrfpdn0_reg, k4 + mov.l dbrfpdn0_data0, k1 + mov.l k1, @k4 - mov.l dbrfpdn0_reg, r4 - mov.l dbrfpdn0_data0, r1 - mov.l r1, @r4 - - /* sleep 140 ns */ - nop + nop /* sleep 140 ns */ nop nop nop - mov.l dbcmdcnt_reg, r4 - mov.l dbcmdcnt_data0, r1 - mov.l r1, @r4 + mov.l dbcmdcnt_reg, k4 + mov.l dbcmdcnt_data0, k1 + mov.l k1, @k4 - mov.l dbcmdcnt_reg, r4 - mov.l dbcmdcnt_data1, r1 - mov.l r1, @r4 + mov.l dbcmdcnt_reg, k4 + mov.l dbcmdcnt_data1, k1 + mov.l k1, @k4 - mov.l dben_reg, r4 - mov.l dben_data1, r1 - mov.l r1, @r4 + mov.l dben_reg, k4 + mov.l dben_data1, k1 + mov.l k1, @k4 - mov.l dbrfpdn0_reg, r4 - mov.l dbrfpdn0_data2, r1 - mov.l r1, @r4 + mov.l dbrfpdn0_reg, k4 + mov.l dbrfpdn0_data2, k1 + mov.l k1, @k4 #else /* SBSC: set auto-refresh mode */ - mov.l 1f, r4 - mov.l @r4, r2 - mov.l 4f, r3 - and r3, r2 - mov.l r2, @r4 - mov.l 6f, r4 - mov.l 7f, r1 - mov.l 8f, r2 - mov.l @r4, r3 - mov #-1, r4 - add r4, r3 - or r2, r3 - mov.l r3, @r1 + mov.l 1f, k4 + mov.l @k4, k0 + mov.l 4f, k1 + and k1, k0 + mov.l k0, @k4 + mov.l 6f, k4 + mov.l 8f, k0 + mov.l @k4, k1 + mov #-1, k4 + add k4, k1 + or k1, k0 + mov.l 7f, k1 + mov.l k0, @k1 #endif skip_restore_sf: - rts + /* jump to vbr vector */ + mov.l saved_vbr, k0 + mov.l offset_vbr, k4 + add k4, k0 + jmp @k0 nop .balign 4 +saved_mode: .long 0 +saved_spc: .long 0 +saved_sr: .long 0 +saved_vbr: .long 0 +offset_vbr: .long 0x600 #ifdef CONFIG_CPU_SUBTYPE_SH7724 dben_reg: .long 0xfd000010 /* DBEN */ dben_data0: .long 0 @@ -178,12 +233,12 @@ dbcmdcnt_data1: .long 4 7: .long 0xfe400018 /* RTCNT */ 8: .long 0xa55a0000 + /* interrupt vector @ 0x600 */ .balign 0x400,0,0x400 .long 0xdeadbeef .balign 0x200,0,0x200 - /* sh7722 will end up here in sleep mode */ - rte + bra restore_jump_vbr nop sh_mobile_standby_end: -- cgit v1.1 From 63cd91dfd4976cb9687b07e9a9864912631d2e08 Mon Sep 17 00:00:00 2001 From: Magnus Damm Date: Mon, 17 Aug 2009 09:41:40 +0000 Subject: sh: SuperH Mobile Software Standby support for cpuidle This patch adds "SuperH Mobile Standby Mode [SF]" to the list of cpuidle sleep modes. If the software latency requirements from cpuidle are met together with fulfilled hardware requirements then deep sleep modes can be entered. Tested on sh7722 and sh7724 with "Sleep Mode", "Sleep Mode + SF" and "Software Standby Mode + SF" together with a multimedia work load and flood ping without packet drop. Signed-off-by: Magnus Damm Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/shmobile/cpuidle.c | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/shmobile/cpuidle.c b/arch/sh/kernel/cpu/shmobile/cpuidle.c index 4afdd97..1c504bd 100644 --- a/arch/sh/kernel/cpu/shmobile/cpuidle.c +++ b/arch/sh/kernel/cpu/shmobile/cpuidle.c @@ -21,6 +21,7 @@ static unsigned long cpuidle_mode[] = { SUSP_SH_SLEEP, /* regular sleep mode */ SUSP_SH_SLEEP | SUSP_SH_SF, /* sleep mode + self refresh */ + SUSP_SH_STANDBY | SUSP_SH_SF, /* software standby mode + self refresh */ }; static int cpuidle_sleep_enter(struct cpuidle_device *dev, @@ -96,6 +97,16 @@ void sh_mobile_setup_cpuidle(void) state->flags |= CPUIDLE_FLAG_TIME_VALID; state->enter = cpuidle_sleep_enter; + state = &dev->states[i++]; + snprintf(state->name, CPUIDLE_NAME_LEN, "C2"); + strncpy(state->desc, "SuperH Mobile Standby Mode [SF]", CPUIDLE_DESC_LEN); + state->exit_latency = 2300; + state->target_residency = 1 * 2; + state->power_usage = 1; + state->flags = 0; + state->flags |= CPUIDLE_FLAG_TIME_VALID; + state->enter = cpuidle_sleep_enter; + dev->state_count = i; cpuidle_register_device(dev); -- cgit v1.1 From f3a8308864f920d2babe76921733d76ec4a11fb8 Mon Sep 17 00:00:00 2001 From: Matt Fleming Date: Tue, 18 Aug 2009 11:35:09 +0900 Subject: sh: Add a few missing irqflags tracing markers. save_regs contains an SR modification without an irqflags annotation, which resulted in a missing TRACE_IRQS_OFF in the interrupt exception path on SH-3/SH4. I've also moved the TRACE_IRQS_OFF/ON annotation when returning from the interrupt to just before we call __restore_all. This seems like the most logical place to put this because the annotation is for when we restore the SR register so we should delay the annotation until as last as possible. We were also missing a TRACE_IRQS_OFF in resume_kernel when CONFIG_PREEMPT is enabled. The end result is that this fixes up the lockdep engine debugging support with CONFIG_PREEMPT enabled on all SH-3/4 parts. Signed-off-by: Matt Fleming Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh3/entry.S | 8 ++++++++ arch/sh/kernel/entry-common.S | 27 ++++++++++++++++----------- 2 files changed, 24 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh3/entry.S b/arch/sh/kernel/cpu/sh3/entry.S index 67ad646..272636e 100644 --- a/arch/sh/kernel/cpu/sh3/entry.S +++ b/arch/sh/kernel/cpu/sh3/entry.S @@ -508,6 +508,14 @@ ENTRY(handle_interrupt) bsr save_regs ! needs original pr value in k3 mov #-1, k2 ! default vector kept in k2 + stc sr, r0 ! get status register + shlr2 r0 + and #0x3c, r0 + cmp/eq #0x3c, r0 + bf 9f + TRACE_IRQS_OFF +9: + ! Setup return address and jump to do_IRQ mov.l 4f, r9 ! fetch return address lds r9, pr ! put return address in pr diff --git a/arch/sh/kernel/entry-common.S b/arch/sh/kernel/entry-common.S index e63178f..7004776 100644 --- a/arch/sh/kernel/entry-common.S +++ b/arch/sh/kernel/entry-common.S @@ -77,15 +77,6 @@ ENTRY(ret_from_irq) ! mov #OFF_SR, r0 mov.l @(r0,r15), r0 ! get status register - - shlr2 r0 - and #0x3c, r0 - cmp/eq #0x3c, r0 - bt 9f - TRACE_IRQS_ON -9: - mov #OFF_SR, r0 - mov.l @(r0,r15), r0 ! get status register shll r0 shll r0 ! kernel space? get_current_thread_info r8, r0 @@ -96,6 +87,7 @@ ENTRY(ret_from_irq) nop ENTRY(resume_kernel) cli + TRACE_IRQS_OFF mov.l @(TI_PRE_COUNT,r8), r0 ! current_thread_info->preempt_count tst r0, r0 bf noresched @@ -213,12 +205,25 @@ syscall_trace_entry: mov.l r0, @(OFF_R0,r15) ! Return value __restore_all: - mov.l 1f, r0 + mov #OFF_SR, r0 + mov.l @(r0,r15), r0 ! get status register + + shlr2 r0 + and #0x3c, r0 + cmp/eq #0x3c, r0 + bt 1f + TRACE_IRQS_ON + bra 2f + nop +1: + TRACE_IRQS_OFF +2: + mov.l 3f, r0 jmp @r0 nop .align 2 -1: .long restore_all +3: .long restore_all .align 2 syscall_badsys: ! Bad syscall number -- cgit v1.1 From 7a01be28bac860086dbf659665d70928ae11c192 Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Tue, 18 Aug 2009 06:51:28 +0000 Subject: sh: mach-kfr2409: add FLLFRQ value for PLL correction. FLLFRQ setting is needed to use correct PLL clock for kfr2409. Signed-off-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- arch/sh/include/mach-kfr2r09/mach/partner-jet-setup.txt | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/sh/include/mach-kfr2r09/mach/partner-jet-setup.txt b/arch/sh/include/mach-kfr2r09/mach/partner-jet-setup.txt index 9c85088..25801d4 100644 --- a/arch/sh/include/mach-kfr2r09/mach/partner-jet-setup.txt +++ b/arch/sh/include/mach-kfr2r09/mach/partner-jet-setup.txt @@ -24,6 +24,7 @@ LIST "setup clocks" ED 0xa4150004, 0x00000050 ED 0xa4150000, 0x91053508 WAIT 1 +ED 0xa4150050, 0x00000340 ED 0xa4150024, 0x00005000 LIST "setup pins" -- cgit v1.1 From b2ea8b421515ddd692c88fc5afb0e7f93e96e6cb Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Tue, 18 Aug 2009 06:51:47 +0000 Subject: sh: clkfwk: Support additional IFC divisor on SH7724. This patch updates the FRQCRA.IFC divisor values for SH7724. Despite not being initially documented, the / 3 mode is also support for the IFC division. Signed-off-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- arch/sh/kernel/cpu/sh4a/clock-sh7724.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c index 34611d9..627588d 100644 --- a/arch/sh/kernel/cpu/sh4a/clock-sh7724.c +++ b/arch/sh/kernel/cpu/sh4a/clock-sh7724.c @@ -127,7 +127,7 @@ struct clk *main_clks[] = { &div3_clk, }; -static int divisors[] = { 2, 0, 4, 6, 8, 12, 16, 0, 24, 32, 36, 48, 0, 72 }; +static int divisors[] = { 2, 3, 4, 6, 8, 12, 16, 0, 24, 32, 36, 48, 0, 72 }; static struct clk_div_mult_table div4_table = { .divisors = divisors, -- cgit v1.1 From e174d13010a6bd52045466bc35ca5a86e3f3ba9b Mon Sep 17 00:00:00 2001 From: Kuninori Morimoto Date: Tue, 18 Aug 2009 07:00:20 +0000 Subject: sh: Prevent heartbeat from scribbling over non-LED bits. While most platforms implement LED banks in sets of 8/16/32, some use different configurations. This adds a LED mask to the heartbeat platform data to allow platforms to constrain the bitmap, which is otherwise derived from the register size. Signed-off-by: Kuninori Morimoto Signed-off-by: Paul Mundt --- arch/sh/drivers/heartbeat.c | 10 ++++++++++ arch/sh/include/asm/heartbeat.h | 1 + 2 files changed, 11 insertions(+) (limited to 'arch') diff --git a/arch/sh/drivers/heartbeat.c b/arch/sh/drivers/heartbeat.c index 938817e..a9339a6 100644 --- a/arch/sh/drivers/heartbeat.c +++ b/arch/sh/drivers/heartbeat.c @@ -40,14 +40,19 @@ static inline void heartbeat_toggle_bit(struct heartbeat_data *hd, if (inverted) new = ~new; + new &= hd->mask; + switch (hd->regsize) { case 32: + new |= ioread32(hd->base) & ~hd->mask; iowrite32(new, hd->base); break; case 16: + new |= ioread16(hd->base) & ~hd->mask; iowrite16(new, hd->base); break; default: + new |= ioread8(hd->base) & ~hd->mask; iowrite8(new, hd->base); break; } @@ -72,6 +77,7 @@ static int heartbeat_drv_probe(struct platform_device *pdev) { struct resource *res; struct heartbeat_data *hd; + int i; if (unlikely(pdev->num_resources != 1)) { dev_err(&pdev->dev, "invalid number of resources\n"); @@ -107,6 +113,10 @@ static int heartbeat_drv_probe(struct platform_device *pdev) hd->nr_bits = ARRAY_SIZE(default_bit_pos); } + hd->mask = 0; + for (i = 0; i < hd->nr_bits; i++) + hd->mask |= (1 << hd->bit_pos[i]); + if (!hd->regsize) hd->regsize = 8; /* default access size */ diff --git a/arch/sh/include/asm/heartbeat.h b/arch/sh/include/asm/heartbeat.h index 724a43e..caaafe5 100644 --- a/arch/sh/include/asm/heartbeat.h +++ b/arch/sh/include/asm/heartbeat.h @@ -11,6 +11,7 @@ struct heartbeat_data { unsigned int nr_bits; struct timer_list timer; unsigned int regsize; + unsigned int mask; unsigned long flags; }; -- cgit v1.1