aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r--arch/powerpc/kernel/irq.c140
1 files changed, 93 insertions, 47 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 9040330..64f6f20 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -73,8 +73,10 @@
#define CREATE_TRACE_POINTS
#include <asm/trace.h>
+DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+EXPORT_PER_CPU_SYMBOL(irq_stat);
+
int __irq_offset_value;
-static int ppc_spurious_interrupts;
#ifdef CONFIG_PPC32
EXPORT_SYMBOL(__irq_offset_value);
@@ -180,30 +182,64 @@ notrace void raw_local_irq_restore(unsigned long en)
EXPORT_SYMBOL(raw_local_irq_restore);
#endif /* CONFIG_PPC64 */
+static int show_other_interrupts(struct seq_file *p, int prec)
+{
+ int j;
+
+#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
+ if (tau_initialized) {
+ seq_printf(p, "%*s: ", prec, "TAU");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", tau_interrupts(j));
+ seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
+ }
+#endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
+
+ seq_printf(p, "%*s: ", prec, "LOC");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
+ seq_printf(p, " Local timer interrupts\n");
+
+ seq_printf(p, "%*s: ", prec, "SPU");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
+ seq_printf(p, " Spurious interrupts\n");
+
+ seq_printf(p, "%*s: ", prec, "CNT");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
+ seq_printf(p, " Performance monitoring interrupts\n");
+
+ seq_printf(p, "%*s: ", prec, "MCE");
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
+ seq_printf(p, " Machine check exceptions\n");
+
+ return 0;
+}
+
int show_interrupts(struct seq_file *p, void *v)
{
- int i = *(loff_t *)v, j;
+ unsigned long flags, any_count = 0;
+ int i = *(loff_t *) v, j, prec;
struct irqaction *action;
struct irq_desc *desc;
- unsigned long flags;
+ if (i > nr_irqs)
+ return 0;
+
+ for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
+ j *= 10;
+
+ if (i == nr_irqs)
+ return show_other_interrupts(p, prec);
+
+ /* print header */
if (i == 0) {
- seq_puts(p, " ");
+ seq_printf(p, "%*s", prec + 8, "");
for_each_online_cpu(j)
- seq_printf(p, "CPU%d ", j);
+ seq_printf(p, "CPU%-8d", j);
seq_putc(p, '\n');
- } else if (i == nr_irqs) {
-#if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
- if (tau_initialized){
- seq_puts(p, "TAU: ");
- for_each_online_cpu(j)
- seq_printf(p, "%10u ", tau_interrupts(j));
- seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
- }
-#endif /* CONFIG_PPC32 && CONFIG_TAU_INT*/
- seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
-
- return 0;
}
desc = irq_to_desc(i);
@@ -211,37 +247,48 @@ int show_interrupts(struct seq_file *p, void *v)
return 0;
raw_spin_lock_irqsave(&desc->lock, flags);
-
+ for_each_online_cpu(j)
+ any_count |= kstat_irqs_cpu(i, j);
action = desc->action;
- if (!action || !action->handler)
- goto skip;
+ if (!action && !any_count)
+ goto out;
- seq_printf(p, "%3d: ", i);
-#ifdef CONFIG_SMP
+ seq_printf(p, "%*d: ", prec, i);
for_each_online_cpu(j)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
-#else
- seq_printf(p, "%10u ", kstat_irqs(i));
-#endif /* CONFIG_SMP */
if (desc->chip)
- seq_printf(p, " %s ", desc->chip->name);
+ seq_printf(p, " %-16s", desc->chip->name);
else
- seq_puts(p, " None ");
+ seq_printf(p, " %-16s", "None");
+ seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge");
- seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge ");
- seq_printf(p, " %s", action->name);
+ if (action) {
+ seq_printf(p, " %s", action->name);
+ while ((action = action->next) != NULL)
+ seq_printf(p, ", %s", action->name);
+ }
- for (action = action->next; action; action = action->next)
- seq_printf(p, ", %s", action->name);
seq_putc(p, '\n');
-
-skip:
+out:
raw_spin_unlock_irqrestore(&desc->lock, flags);
-
return 0;
}
+/*
+ * /proc/stat helpers
+ */
+u64 arch_irq_stat_cpu(unsigned int cpu)
+{
+ u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
+
+ sum += per_cpu(irq_stat, cpu).pmu_irqs;
+ sum += per_cpu(irq_stat, cpu).mce_exceptions;
+ sum += per_cpu(irq_stat, cpu).spurious_irqs;
+
+ return sum;
+}
+
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(cpumask_t map)
{
@@ -353,8 +400,7 @@ void do_IRQ(struct pt_regs *regs)
if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
handle_one_irq(irq);
else if (irq != NO_IRQ_IGNORE)
- /* That's not SMP safe ... but who cares ? */
- ppc_spurious_interrupts++;
+ __get_cpu_var(irq_stat).spurious_irqs++;
irq_exit();
set_irq_regs(old_regs);
@@ -474,7 +520,7 @@ void do_softirq(void)
*/
static LIST_HEAD(irq_hosts);
-static DEFINE_SPINLOCK(irq_big_lock);
+static DEFINE_RAW_SPINLOCK(irq_big_lock);
static unsigned int revmap_trees_allocated;
static DEFINE_MUTEX(revmap_trees_mutex);
struct irq_map_entry irq_map[NR_IRQS];
@@ -520,14 +566,14 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
if (host->ops->match == NULL)
host->ops->match = default_irq_host_match;
- spin_lock_irqsave(&irq_big_lock, flags);
+ raw_spin_lock_irqsave(&irq_big_lock, flags);
/* If it's a legacy controller, check for duplicates and
* mark it as allocated (we use irq 0 host pointer for that
*/
if (revmap_type == IRQ_HOST_MAP_LEGACY) {
if (irq_map[0].host != NULL) {
- spin_unlock_irqrestore(&irq_big_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_big_lock, flags);
/* If we are early boot, we can't free the structure,
* too bad...
* this will be fixed once slab is made available early
@@ -541,7 +587,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node,
}
list_add(&host->link, &irq_hosts);
- spin_unlock_irqrestore(&irq_big_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_big_lock, flags);
/* Additional setups per revmap type */
switch(revmap_type) {
@@ -592,13 +638,13 @@ struct irq_host *irq_find_host(struct device_node *node)
* the absence of a device node. This isn't a problem so far
* yet though...
*/
- spin_lock_irqsave(&irq_big_lock, flags);
+ raw_spin_lock_irqsave(&irq_big_lock, flags);
list_for_each_entry(h, &irq_hosts, link)
if (h->ops->match(h, node)) {
found = h;
break;
}
- spin_unlock_irqrestore(&irq_big_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_big_lock, flags);
return found;
}
EXPORT_SYMBOL_GPL(irq_find_host);
@@ -967,7 +1013,7 @@ unsigned int irq_alloc_virt(struct irq_host *host,
if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
return NO_IRQ;
- spin_lock_irqsave(&irq_big_lock, flags);
+ raw_spin_lock_irqsave(&irq_big_lock, flags);
/* Use hint for 1 interrupt if any */
if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
@@ -991,7 +1037,7 @@ unsigned int irq_alloc_virt(struct irq_host *host,
}
}
if (found == NO_IRQ) {
- spin_unlock_irqrestore(&irq_big_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_big_lock, flags);
return NO_IRQ;
}
hint_found:
@@ -1000,7 +1046,7 @@ unsigned int irq_alloc_virt(struct irq_host *host,
smp_wmb();
irq_map[i].host = host;
}
- spin_unlock_irqrestore(&irq_big_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_big_lock, flags);
return found;
}
@@ -1012,7 +1058,7 @@ void irq_free_virt(unsigned int virq, unsigned int count)
WARN_ON (virq < NUM_ISA_INTERRUPTS);
WARN_ON (count == 0 || (virq + count) > irq_virq_count);
- spin_lock_irqsave(&irq_big_lock, flags);
+ raw_spin_lock_irqsave(&irq_big_lock, flags);
for (i = virq; i < (virq + count); i++) {
struct irq_host *host;
@@ -1025,7 +1071,7 @@ void irq_free_virt(unsigned int virq, unsigned int count)
smp_wmb();
irq_map[i].host = NULL;
}
- spin_unlock_irqrestore(&irq_big_lock, flags);
+ raw_spin_unlock_irqrestore(&irq_big_lock, flags);
}
int arch_early_irq_init(void)