diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/cpu/common.c | 13 | ||||
-rw-r--r-- | arch/i386/kernel/nmi.c | 8 | ||||
-rw-r--r-- | arch/i386/kernel/paravirt.c | 9 | ||||
-rw-r--r-- | arch/i386/kernel/smpboot.c | 9 | ||||
-rw-r--r-- | arch/i386/mach-voyager/voyager_smp.c | 6 | ||||
-rw-r--r-- | arch/mips/Kconfig | 14 | ||||
-rw-r--r-- | arch/mips/kernel/smtc.c | 54 | ||||
-rw-r--r-- | arch/mips/vr41xx/common/irq.c | 12 | ||||
-rw-r--r-- | arch/ppc/platforms/ev64360.c | 3 | ||||
-rw-r--r-- | arch/x86_64/kernel/nmi.c | 2 |
10 files changed, 87 insertions, 43 deletions
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c index 8689d62..8a8bbda 100644 --- a/arch/i386/kernel/cpu/common.c +++ b/arch/i386/kernel/cpu/common.c @@ -710,11 +710,8 @@ __cpuinit int init_gdt(int cpu, struct task_struct *idle) return 1; } -/* Common CPU init for both boot and secondary CPUs */ -static void __cpuinit _cpu_init(int cpu, struct task_struct *curr) +void __cpuinit cpu_set_gdt(int cpu) { - struct tss_struct * t = &per_cpu(init_tss, cpu); - struct thread_struct *thread = &curr->thread; struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu); /* Reinit these anyway, even if they've already been done (on @@ -722,6 +719,13 @@ static void __cpuinit _cpu_init(int cpu, struct task_struct *curr) the real ones). */ load_gdt(cpu_gdt_descr); set_kernel_gs(); +} + +/* Common CPU init for both boot and secondary CPUs */ +static void __cpuinit _cpu_init(int cpu, struct task_struct *curr) +{ + struct tss_struct * t = &per_cpu(init_tss, cpu); + struct thread_struct *thread = &curr->thread; if (cpu_test_and_set(cpu, cpu_initialized)) { printk(KERN_WARNING "CPU#%d already initialized!\n", cpu); @@ -807,6 +811,7 @@ void __cpuinit cpu_init(void) local_irq_enable(); } + cpu_set_gdt(cpu); _cpu_init(cpu, curr); } diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index a5e34d6..1a6f8bb 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c @@ -310,13 +310,7 @@ static int __init setup_nmi_watchdog(char *str) if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE)) return 0; - /* - * If any other x86 CPU has a local APIC, then - * please test the NMI stuff there and send me the - * missing bits. Right now Intel P6/P4 and AMD K7 only. - */ - if ((nmi == NMI_LOCAL_APIC) && (nmi_known_cpu() == 0)) - return 0; /* no lapic support */ + nmi_watchdog = nmi; return 1; } diff --git a/arch/i386/kernel/paravirt.c b/arch/i386/kernel/paravirt.c index 3dceab5..e55fd05 100644 --- a/arch/i386/kernel/paravirt.c +++ b/arch/i386/kernel/paravirt.c @@ -566,4 +566,11 @@ struct paravirt_ops paravirt_ops = { .irq_enable_sysexit = native_irq_enable_sysexit, .iret = native_iret, }; -EXPORT_SYMBOL(paravirt_ops); + +/* + * NOTE: CONFIG_PARAVIRT is experimental and the paravirt_ops + * semantics are subject to change. Hence we only do this + * internal-only export of this, until it gets sorted out and + * all lowlevel CPU ops used by modules are separately exported. + */ +EXPORT_SYMBOL_GPL(paravirt_ops); diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c index dea7ef9..8c6c8c5 100644 --- a/arch/i386/kernel/smpboot.c +++ b/arch/i386/kernel/smpboot.c @@ -596,6 +596,12 @@ static void __cpuinit start_secondary(void *unused) void __devinit initialize_secondary(void) { /* + * switch to the per CPU GDT we already set up + * in do_boot_cpu() + */ + cpu_set_gdt(current_thread_info()->cpu); + + /* * We don't actually need to load the full TSS, * basically just the stack pointer and the eip. */ @@ -972,9 +978,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu) /* Stack for startup_32 can be just as for start_secondary onwards */ stack_start.esp = (void *) idle->thread.esp; - start_pda = cpu_pda(cpu); - cpu_gdt_descr = per_cpu(cpu_gdt_descr, cpu); - irq_ctx_init(cpu); x86_cpu_to_apicid[cpu] = apicid; diff --git a/arch/i386/mach-voyager/voyager_smp.c b/arch/i386/mach-voyager/voyager_smp.c index 55428e6..74aeedf 100644 --- a/arch/i386/mach-voyager/voyager_smp.c +++ b/arch/i386/mach-voyager/voyager_smp.c @@ -773,6 +773,12 @@ initialize_secondary(void) #endif /* + * switch to the per CPU GDT we already set up + * in do_boot_cpu() + */ + cpu_set_gdt(current_thread_info()->cpu); + + /* * We don't actually need to load the full TSS, * basically just the stack pointer and the eip. */ diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index fd2ff06..bbd386f 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -1568,6 +1568,20 @@ config MIPS_MT_FPAFF depends on MIPS_MT default y +config MIPS_MT_SMTC_INSTANT_REPLAY + bool "Low-latency Dispatch of Deferred SMTC IPIs" + depends on MIPS_MT_SMTC + default y + help + SMTC pseudo-interrupts between TCs are deferred and queued + if the target TC is interrupt-inhibited (IXMT). In the first + SMTC prototypes, these queued IPIs were serviced on return + to user mode, or on entry into the kernel idle loop. The + INSTANT_REPLAY option dispatches them as part of local_irq_restore() + processing, which adds runtime overhead (hence the option to turn + it off), but ensures that IPIs are handled promptly even under + heavy I/O interrupt load. + config MIPS_VPE_LOADER_TOM bool "Load VPE program into memory hidden from linux" depends on MIPS_VPE_LOADER diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index a8b3871..44238ab 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c @@ -1017,6 +1017,33 @@ void setup_cross_vpe_interrupts(void) * SMTC-specific hacks invoked from elsewhere in the kernel. */ +void smtc_ipi_replay(void) +{ + /* + * To the extent that we've ever turned interrupts off, + * we may have accumulated deferred IPIs. This is subtle. + * If we use the smtc_ipi_qdepth() macro, we'll get an + * exact number - but we'll also disable interrupts + * and create a window of failure where a new IPI gets + * queued after we test the depth but before we re-enable + * interrupts. So long as IXMT never gets set, however, + * we should be OK: If we pick up something and dispatch + * it here, that's great. If we see nothing, but concurrent + * with this operation, another TC sends us an IPI, IXMT + * is clear, and we'll handle it as a real pseudo-interrupt + * and not a pseudo-pseudo interrupt. + */ + if (IPIQ[smp_processor_id()].depth > 0) { + struct smtc_ipi *pipi; + extern void self_ipi(struct smtc_ipi *); + + while ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()]))) { + self_ipi(pipi); + smtc_cpu_stats[smp_processor_id()].selfipis++; + } + } +} + void smtc_idle_loop_hook(void) { #ifdef SMTC_IDLE_HOOK_DEBUG @@ -1113,29 +1140,14 @@ void smtc_idle_loop_hook(void) if (pdb_msg != &id_ho_db_msg[0]) printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); #endif /* SMTC_IDLE_HOOK_DEBUG */ + /* - * To the extent that we've ever turned interrupts off, - * we may have accumulated deferred IPIs. This is subtle. - * If we use the smtc_ipi_qdepth() macro, we'll get an - * exact number - but we'll also disable interrupts - * and create a window of failure where a new IPI gets - * queued after we test the depth but before we re-enable - * interrupts. So long as IXMT never gets set, however, - * we should be OK: If we pick up something and dispatch - * it here, that's great. If we see nothing, but concurrent - * with this operation, another TC sends us an IPI, IXMT - * is clear, and we'll handle it as a real pseudo-interrupt - * and not a pseudo-pseudo interrupt. + * Replay any accumulated deferred IPIs. If "Instant Replay" + * is in use, there should never be any. */ - if (IPIQ[smp_processor_id()].depth > 0) { - struct smtc_ipi *pipi; - extern void self_ipi(struct smtc_ipi *); - - if ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()])) != NULL) { - self_ipi(pipi); - smtc_cpu_stats[smp_processor_id()].selfipis++; - } - } +#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY + smtc_ipi_replay(); +#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ } void smtc_soft_dump(void) diff --git a/arch/mips/vr41xx/common/irq.c b/arch/mips/vr41xx/common/irq.c index 397ba94c..16decf4 100644 --- a/arch/mips/vr41xx/common/irq.c +++ b/arch/mips/vr41xx/common/irq.c @@ -1,7 +1,7 @@ /* * Interrupt handing routines for NEC VR4100 series. * - * Copyright (C) 2005 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> + * Copyright (C) 2005-2007 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -73,13 +73,19 @@ static void irq_dispatch(unsigned int irq) if (cascade->get_irq != NULL) { unsigned int source_irq = irq; desc = irq_desc + source_irq; - desc->chip->ack(source_irq); + if (desc->chip->mask_ack) + desc->chip->mask_ack(source_irq); + else { + desc->chip->mask(source_irq); + desc->chip->ack(source_irq); + } irq = cascade->get_irq(irq); if (irq < 0) atomic_inc(&irq_err_count); else irq_dispatch(irq); - desc->chip->end(source_irq); + if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) + desc->chip->unmask(source_irq); } else do_IRQ(irq); } diff --git a/arch/ppc/platforms/ev64360.c b/arch/ppc/platforms/ev64360.c index 90ed375..f87e06f 100644 --- a/arch/ppc/platforms/ev64360.c +++ b/arch/ppc/platforms/ev64360.c @@ -358,13 +358,12 @@ ev64360_setup_mtd(void) ptbl_entries = 3; - if ((ptbl = kmalloc(ptbl_entries * sizeof(struct mtd_partition), + if ((ptbl = kzalloc(ptbl_entries * sizeof(struct mtd_partition), GFP_KERNEL)) == NULL) { printk(KERN_WARNING "Can't alloc MTD partition table\n"); return -ENOMEM; } - memset(ptbl, 0, ptbl_entries * sizeof(struct mtd_partition)); ptbl[0].name = "reserved"; ptbl[0].offset = 0; diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c index 186aebb..9cb42ec 100644 --- a/arch/x86_64/kernel/nmi.c +++ b/arch/x86_64/kernel/nmi.c @@ -302,8 +302,6 @@ int __init setup_nmi_watchdog(char *str) if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE)) return 0; - if ((nmi == NMI_LOCAL_APIC) && (nmi_known_cpu() == 0)) - return 0; /* no lapic support */ nmi_watchdog = nmi; return 1; } |