diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2010-10-01 16:03:45 +0200 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2010-10-12 16:39:04 +0200 |
commit | e144710b302525de5b90b9c3ba43562458d8957f (patch) | |
tree | 0a6ef61ccb4957512ebf4a1887ba3bc54e78f99e /kernel/irq/internals.h | |
parent | fe21221386e46b8e0f2cbd83559a29680c28473b (diff) | |
download | kernel_goldelico_gta04-e144710b302525de5b90b9c3ba43562458d8957f.zip kernel_goldelico_gta04-e144710b302525de5b90b9c3ba43562458d8957f.tar.gz kernel_goldelico_gta04-e144710b302525de5b90b9c3ba43562458d8957f.tar.bz2 |
genirq: Distangle irq.h
Move irq_desc and internal functions out of irq.h
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/irq/internals.h')
-rw-r--r-- | kernel/irq/internals.h | 100 |
1 files changed, 100 insertions, 0 deletions
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index b905f0a..e281e45 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -1,6 +1,7 @@ /* * IRQ subsystem internal functions and variables: */ +#include <linux/irqdesc.h> extern int noirqdebug; @@ -22,6 +23,9 @@ extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); extern void clear_kstat_irqs(struct irq_desc *desc); extern raw_spinlock_t sparse_irq_lock; +/* Resending of interrupts :*/ +void check_irq_resend(struct irq_desc *desc, unsigned int irq); + #ifdef CONFIG_SPARSE_IRQ void replace_irq_desc(unsigned int irq, struct irq_desc *desc); #endif @@ -105,3 +109,99 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) #undef P +/* Stuff below will be cleaned up after the sparse allocator is done */ + +#ifdef CONFIG_SMP +/** + * alloc_desc_masks - allocate cpumasks for irq_desc + * @desc: pointer to irq_desc struct + * @node: node which will be handling the cpumasks + * @boot: true if need bootmem + * + * Allocates affinity and pending_mask cpumask if required. + * Returns true if successful (or not required). + */ +static inline bool alloc_desc_masks(struct irq_desc *desc, int node, + bool boot) +{ + gfp_t gfp = GFP_ATOMIC; + + if (boot) + gfp = GFP_NOWAIT; + +#ifdef CONFIG_CPUMASK_OFFSTACK + if (!alloc_cpumask_var_node(&desc->irq_data.affinity, gfp, node)) + return false; + +#ifdef CONFIG_GENERIC_PENDING_IRQ + if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) { + free_cpumask_var(desc->irq_data.affinity); + return false; + } +#endif +#endif + return true; +} + +static inline void init_desc_masks(struct irq_desc *desc) +{ + cpumask_setall(desc->irq_data.affinity); +#ifdef CONFIG_GENERIC_PENDING_IRQ + cpumask_clear(desc->pending_mask); +#endif +} + +/** + * init_copy_desc_masks - copy cpumasks for irq_desc + * @old_desc: pointer to old irq_desc struct + * @new_desc: pointer to new irq_desc struct + * + * Insures affinity and pending_masks are copied to new irq_desc. + * If !CONFIG_CPUMASKS_OFFSTACK the cpumasks are embedded in the + * irq_desc struct so the copy is redundant. + */ + +static inline void init_copy_desc_masks(struct irq_desc *old_desc, + struct irq_desc *new_desc) +{ +#ifdef CONFIG_CPUMASK_OFFSTACK + cpumask_copy(new_desc->irq_data.affinity, old_desc->irq_data.affinity); + +#ifdef CONFIG_GENERIC_PENDING_IRQ + cpumask_copy(new_desc->pending_mask, old_desc->pending_mask); +#endif +#endif +} + +static inline void free_desc_masks(struct irq_desc *old_desc, + struct irq_desc *new_desc) +{ + free_cpumask_var(old_desc->irq_data.affinity); + +#ifdef CONFIG_GENERIC_PENDING_IRQ + free_cpumask_var(old_desc->pending_mask); +#endif +} + +#else /* !CONFIG_SMP */ + +static inline bool alloc_desc_masks(struct irq_desc *desc, int node, + bool boot) +{ + return true; +} + +static inline void init_desc_masks(struct irq_desc *desc) +{ +} + +static inline void init_copy_desc_masks(struct irq_desc *old_desc, + struct irq_desc *new_desc) +{ +} + +static inline void free_desc_masks(struct irq_desc *old_desc, + struct irq_desc *new_desc) +{ +} +#endif /* CONFIG_SMP */ |