diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 09:02:01 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 09:02:01 -0800 |
commit | 8f0ddf91f2aeb09602373e400cf8b403e9017210 (patch) | |
tree | b907c35c79caadafff6ad46a91614e30afd2f967 /arch | |
parent | 050cbb09dac0402672edeaeac06094ef8ff1749a (diff) | |
parent | b5f91da0a6973bb6f9ff3b91b0e92c0773a458f3 (diff) | |
download | kernel_samsung_tuna-8f0ddf91f2aeb09602373e400cf8b403e9017210.zip kernel_samsung_tuna-8f0ddf91f2aeb09602373e400cf8b403e9017210.tar.gz kernel_samsung_tuna-8f0ddf91f2aeb09602373e400cf8b403e9017210.tar.bz2 |
Merge branch 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (26 commits)
clockevents: Convert to raw_spinlock
clockevents: Make tick_device_lock static
debugobjects: Convert to raw_spinlocks
perf_event: Convert to raw_spinlock
hrtimers: Convert to raw_spinlocks
genirq: Convert irq_desc.lock to raw_spinlock
smp: Convert smplocks to raw_spinlocks
rtmutes: Convert rtmutex.lock to raw_spinlock
sched: Convert pi_lock to raw_spinlock
sched: Convert cpupri lock to raw_spinlock
sched: Convert rt_runtime_lock to raw_spinlock
sched: Convert rq->lock to raw_spinlock
plist: Make plist debugging raw_spinlock aware
bkl: Fixup core_lock fallout
locking: Cleanup the name space completely
locking: Further name space cleanups
alpha: Fix fallout from locking changes
locking: Implement new raw_spinlock
locking: Convert raw_rwlock functions to arch_rwlock
locking: Convert raw_rwlock to arch_rwlock
...
Diffstat (limited to 'arch')
74 files changed, 701 insertions, 701 deletions
diff --git a/arch/alpha/include/asm/core_t2.h b/arch/alpha/include/asm/core_t2.h index 46bfff5..471c072 100644 --- a/arch/alpha/include/asm/core_t2.h +++ b/arch/alpha/include/asm/core_t2.h @@ -435,7 +435,7 @@ extern inline void t2_outl(u32 b, unsigned long addr) set_hae(msb); \ } -extern spinlock_t t2_hae_lock; +extern raw_spinlock_t t2_hae_lock; /* * NOTE: take T2_DENSE_MEM off in each readX/writeX routine, since @@ -448,12 +448,12 @@ __EXTERN_INLINE u8 t2_readb(const volatile void __iomem *xaddr) unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long result, msb; unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); + raw_spin_lock_irqsave(&t2_hae_lock, flags); t2_set_hae; result = *(vip) ((addr << 5) + T2_SPARSE_MEM + 0x00); - spin_unlock_irqrestore(&t2_hae_lock, flags); + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); return __kernel_extbl(result, addr & 3); } @@ -462,12 +462,12 @@ __EXTERN_INLINE u16 t2_readw(const volatile void __iomem *xaddr) unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long result, msb; unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); + raw_spin_lock_irqsave(&t2_hae_lock, flags); t2_set_hae; result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08); - spin_unlock_irqrestore(&t2_hae_lock, flags); + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); return __kernel_extwl(result, addr & 3); } @@ -480,12 +480,12 @@ __EXTERN_INLINE u32 t2_readl(const volatile void __iomem *xaddr) unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long result, msb; unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); + raw_spin_lock_irqsave(&t2_hae_lock, flags); t2_set_hae; result = *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18); - spin_unlock_irqrestore(&t2_hae_lock, flags); + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); return result & 0xffffffffUL; } @@ -494,14 +494,14 @@ __EXTERN_INLINE u64 t2_readq(const volatile void __iomem *xaddr) unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long r0, r1, work, msb; unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); + raw_spin_lock_irqsave(&t2_hae_lock, flags); t2_set_hae; work = (addr << 5) + T2_SPARSE_MEM + 0x18; r0 = *(vuip)(work); r1 = *(vuip)(work + (4 << 5)); - spin_unlock_irqrestore(&t2_hae_lock, flags); + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); return r1 << 32 | r0; } @@ -510,13 +510,13 @@ __EXTERN_INLINE void t2_writeb(u8 b, volatile void __iomem *xaddr) unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long msb, w; unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); + raw_spin_lock_irqsave(&t2_hae_lock, flags); t2_set_hae; w = __kernel_insbl(b, addr & 3); *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x00) = w; - spin_unlock_irqrestore(&t2_hae_lock, flags); + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); } __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) @@ -524,13 +524,13 @@ __EXTERN_INLINE void t2_writew(u16 b, volatile void __iomem *xaddr) unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long msb, w; unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); + raw_spin_lock_irqsave(&t2_hae_lock, flags); t2_set_hae; w = __kernel_inswl(b, addr & 3); *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x08) = w; - spin_unlock_irqrestore(&t2_hae_lock, flags); + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); } /* @@ -542,12 +542,12 @@ __EXTERN_INLINE void t2_writel(u32 b, volatile void __iomem *xaddr) unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long msb; unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); + raw_spin_lock_irqsave(&t2_hae_lock, flags); t2_set_hae; *(vuip) ((addr << 5) + T2_SPARSE_MEM + 0x18) = b; - spin_unlock_irqrestore(&t2_hae_lock, flags); + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); } __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr) @@ -555,14 +555,14 @@ __EXTERN_INLINE void t2_writeq(u64 b, volatile void __iomem *xaddr) unsigned long addr = (unsigned long) xaddr - T2_DENSE_MEM; unsigned long msb, work; unsigned long flags; - spin_lock_irqsave(&t2_hae_lock, flags); + raw_spin_lock_irqsave(&t2_hae_lock, flags); t2_set_hae; work = (addr << 5) + T2_SPARSE_MEM + 0x18; *(vuip)work = b; *(vuip)(work + (4 << 5)) = b >> 32; - spin_unlock_irqrestore(&t2_hae_lock, flags); + raw_spin_unlock_irqrestore(&t2_hae_lock, flags); } __EXTERN_INLINE void __iomem *t2_ioportmap(unsigned long addr) diff --git a/arch/alpha/include/asm/spinlock.h b/arch/alpha/include/asm/spinlock.h index e38fb95..d0faca1 100644 --- a/arch/alpha/include/asm/spinlock.h +++ b/arch/alpha/include/asm/spinlock.h @@ -12,18 +12,18 @@ * We make no fairness assumptions. They have a cost. */ -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_spin_is_locked(x) ((x)->lock != 0) -#define __raw_spin_unlock_wait(x) \ +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_spin_is_locked(x) ((x)->lock != 0) +#define arch_spin_unlock_wait(x) \ do { cpu_relax(); } while ((x)->lock) -static inline void __raw_spin_unlock(raw_spinlock_t * lock) +static inline void arch_spin_unlock(arch_spinlock_t * lock) { mb(); lock->lock = 0; } -static inline void __raw_spin_lock(raw_spinlock_t * lock) +static inline void arch_spin_lock(arch_spinlock_t * lock) { long tmp; @@ -43,24 +43,24 @@ static inline void __raw_spin_lock(raw_spinlock_t * lock) : "m"(lock->lock) : "memory"); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { return !test_and_set_bit(0, &lock->lock); } /***********************************************************/ -static inline int __raw_read_can_lock(raw_rwlock_t *lock) +static inline int arch_read_can_lock(arch_rwlock_t *lock) { return (lock->lock & 1) == 0; } -static inline int __raw_write_can_lock(raw_rwlock_t *lock) +static inline int arch_write_can_lock(arch_rwlock_t *lock) { return lock->lock == 0; } -static inline void __raw_read_lock(raw_rwlock_t *lock) +static inline void arch_read_lock(arch_rwlock_t *lock) { long regx; @@ -80,7 +80,7 @@ static inline void __raw_read_lock(raw_rwlock_t *lock) : "m" (*lock) : "memory"); } -static inline void __raw_write_lock(raw_rwlock_t *lock) +static inline void arch_write_lock(arch_rwlock_t *lock) { long regx; @@ -100,7 +100,7 @@ static inline void __raw_write_lock(raw_rwlock_t *lock) : "m" (*lock) : "memory"); } -static inline int __raw_read_trylock(raw_rwlock_t * lock) +static inline int arch_read_trylock(arch_rwlock_t * lock) { long regx; int success; @@ -122,7 +122,7 @@ static inline int __raw_read_trylock(raw_rwlock_t * lock) return success; } -static inline int __raw_write_trylock(raw_rwlock_t * lock) +static inline int arch_write_trylock(arch_rwlock_t * lock) { long regx; int success; @@ -144,7 +144,7 @@ static inline int __raw_write_trylock(raw_rwlock_t * lock) return success; } -static inline void __raw_read_unlock(raw_rwlock_t * lock) +static inline void arch_read_unlock(arch_rwlock_t * lock) { long regx; __asm__ __volatile__( @@ -160,17 +160,17 @@ static inline void __raw_read_unlock(raw_rwlock_t * lock) : "m" (*lock) : "memory"); } -static inline void __raw_write_unlock(raw_rwlock_t * lock) +static inline void arch_write_unlock(arch_rwlock_t * lock) { mb(); lock->lock = 0; } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* _ALPHA_SPINLOCK_H */ diff --git a/arch/alpha/include/asm/spinlock_types.h b/arch/alpha/include/asm/spinlock_types.h index 8141eb5..54c2afc 100644 --- a/arch/alpha/include/asm/spinlock_types.h +++ b/arch/alpha/include/asm/spinlock_types.h @@ -7,14 +7,14 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c index d9980d4..e6d9056 100644 --- a/arch/alpha/kernel/core_t2.c +++ b/arch/alpha/kernel/core_t2.c @@ -74,7 +74,7 @@ # define DBG(args) #endif -DEFINE_SPINLOCK(t2_hae_lock); +DEFINE_RAW_SPINLOCK(t2_hae_lock); static volatile unsigned int t2_mcheck_any_expected; static volatile unsigned int t2_mcheck_last_taken; diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c index c0de072..5f2cf23 100644 --- a/arch/alpha/kernel/irq.c +++ b/arch/alpha/kernel/irq.c @@ -81,7 +81,7 @@ show_interrupts(struct seq_file *p, void *v) #endif if (irq < ACTUAL_NR_IRQS) { - spin_lock_irqsave(&irq_desc[irq].lock, flags); + raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); action = irq_desc[irq].action; if (!action) goto unlock; @@ -105,7 +105,7 @@ show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); unlock: - spin_unlock_irqrestore(&irq_desc[irq].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); } else if (irq == ACTUAL_NR_IRQS) { #ifdef CONFIG_SMP seq_puts(p, "IPI: "); diff --git a/arch/arm/include/asm/mach/irq.h b/arch/arm/include/asm/mach/irq.h index acac530..8920b2d6 100644 --- a/arch/arm/include/asm/mach/irq.h +++ b/arch/arm/include/asm/mach/irq.h @@ -26,9 +26,9 @@ extern int show_fiq_list(struct seq_file *, void *); */ #define do_bad_IRQ(irq,desc) \ do { \ - spin_lock(&desc->lock); \ + raw_spin_lock(&desc->lock); \ handle_bad_irq(irq, desc); \ - spin_unlock(&desc->lock); \ + raw_spin_unlock(&desc->lock); \ } while(0) #endif diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h index c13681a..c91c64c 100644 --- a/arch/arm/include/asm/spinlock.h +++ b/arch/arm/include/asm/spinlock.h @@ -17,13 +17,13 @@ * Locked value: 1 */ -#define __raw_spin_is_locked(x) ((x)->lock != 0) -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +#define arch_spin_is_locked(x) ((x)->lock != 0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; @@ -43,7 +43,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) smp_mb(); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp; @@ -63,7 +63,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) } } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { smp_mb(); @@ -86,7 +86,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) * just write zero since the lock is exclusively held. */ -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned long tmp; @@ -106,7 +106,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) smp_mb(); } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned long tmp; @@ -126,7 +126,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) } } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { smp_mb(); @@ -142,7 +142,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) } /* write_can_lock - would write_trylock() succeed? */ -#define __raw_write_can_lock(x) ((x)->lock == 0) +#define arch_write_can_lock(x) ((x)->lock == 0) /* * Read locks are a bit more hairy: @@ -156,7 +156,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) * currently active. However, we know we won't have any write * locks. */ -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned long tmp, tmp2; @@ -176,7 +176,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) smp_mb(); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long tmp, tmp2; @@ -198,7 +198,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) : "cc"); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned long tmp, tmp2 = 1; @@ -215,13 +215,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) } /* read_can_lock - would read_trylock() succeed? */ -#define __raw_read_can_lock(x) ((x)->lock < 0x80000000) +#define arch_read_can_lock(x) ((x)->lock < 0x80000000) -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/arm/include/asm/spinlock_types.h b/arch/arm/include/asm/spinlock_types.h index 43e83f6..d14d197 100644 --- a/arch/arm/include/asm/spinlock_types.h +++ b/arch/arm/include/asm/spinlock_types.h @@ -7,14 +7,14 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index c9a8619..b7cb45b 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c @@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto unlock; @@ -84,7 +84,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); unlock: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { #ifdef CONFIG_FIQ show_fiq_list(p, v); @@ -139,7 +139,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) } desc = irq_desc + irq; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; if (iflags & IRQF_VALID) desc->status &= ~IRQ_NOREQUEST; @@ -147,7 +147,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags) desc->status &= ~IRQ_NOPROBE; if (!(iflags & IRQF_NOAUTOEN)) desc->status &= ~IRQ_NOAUTOEN; - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } void __init init_IRQ(void) @@ -166,9 +166,9 @@ static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) { pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu); - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); desc->chip->set_affinity(irq, cpumask_of(cpu)); - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); } /* diff --git a/arch/arm/mach-ns9xxx/irq.c b/arch/arm/mach-ns9xxx/irq.c index feb0e54..038f24d 100644 --- a/arch/arm/mach-ns9xxx/irq.c +++ b/arch/arm/mach-ns9xxx/irq.c @@ -66,7 +66,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) struct irqaction *action; irqreturn_t action_ret; - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); BUG_ON(desc->status & IRQ_INPROGRESS); @@ -78,7 +78,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) goto out_mask; desc->status |= IRQ_INPROGRESS; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); @@ -87,7 +87,7 @@ static void handle_prio_irq(unsigned int irq, struct irq_desc *desc) * Maybe this function should go to kernel/irq/chip.c? */ note_interrupt(irq, desc, action_ret); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->status &= ~IRQ_INPROGRESS; if (desc->status & IRQ_DISABLED) @@ -97,7 +97,7 @@ out_mask: /* ack unconditionally to unmask lower prio irqs */ desc->chip->ack(irq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } #define handle_irq handle_prio_irq #endif diff --git a/arch/avr32/kernel/irq.c b/arch/avr32/kernel/irq.c index 09904d2..9604f77 100644 --- a/arch/avr32/kernel/irq.c +++ b/arch/avr32/kernel/irq.c @@ -42,7 +42,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto unlock; @@ -57,7 +57,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); unlock: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; diff --git a/arch/blackfin/include/asm/spinlock.h b/arch/blackfin/include/asm/spinlock.h index b0c7f0e..1942ccf 100644 --- a/arch/blackfin/include/asm/spinlock.h +++ b/arch/blackfin/include/asm/spinlock.h @@ -17,84 +17,84 @@ asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr); asmlinkage void __raw_spin_lock_asm(volatile int *ptr); asmlinkage int __raw_spin_trylock_asm(volatile int *ptr); asmlinkage void __raw_spin_unlock_asm(volatile int *ptr); -asmlinkage void __raw_read_lock_asm(volatile int *ptr); -asmlinkage int __raw_read_trylock_asm(volatile int *ptr); -asmlinkage void __raw_read_unlock_asm(volatile int *ptr); -asmlinkage void __raw_write_lock_asm(volatile int *ptr); -asmlinkage int __raw_write_trylock_asm(volatile int *ptr); -asmlinkage void __raw_write_unlock_asm(volatile int *ptr); - -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +asmlinkage void arch_read_lock_asm(volatile int *ptr); +asmlinkage int arch_read_trylock_asm(volatile int *ptr); +asmlinkage void arch_read_unlock_asm(volatile int *ptr); +asmlinkage void arch_write_lock_asm(volatile int *ptr); +asmlinkage int arch_write_trylock_asm(volatile int *ptr); +asmlinkage void arch_write_unlock_asm(volatile int *ptr); + +static inline int arch_spin_is_locked(arch_spinlock_t *lock) { return __raw_spin_is_locked_asm(&lock->lock); } -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { __raw_spin_lock_asm(&lock->lock); } -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { return __raw_spin_trylock_asm(&lock->lock); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { __raw_spin_unlock_asm(&lock->lock); } -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { - while (__raw_spin_is_locked(lock)) + while (arch_spin_is_locked(lock)) cpu_relax(); } -static inline int __raw_read_can_lock(raw_rwlock_t *rw) +static inline int arch_read_can_lock(arch_rwlock_t *rw) { return __raw_uncached_fetch_asm(&rw->lock) > 0; } -static inline int __raw_write_can_lock(raw_rwlock_t *rw) +static inline int arch_write_can_lock(arch_rwlock_t *rw) { return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS; } -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { - __raw_read_lock_asm(&rw->lock); + arch_read_lock_asm(&rw->lock); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { - return __raw_read_trylock_asm(&rw->lock); + return arch_read_trylock_asm(&rw->lock); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { - __raw_read_unlock_asm(&rw->lock); + arch_read_unlock_asm(&rw->lock); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { - __raw_write_lock_asm(&rw->lock); + arch_write_lock_asm(&rw->lock); } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { - return __raw_write_trylock_asm(&rw->lock); + return arch_write_trylock_asm(&rw->lock); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { - __raw_write_unlock_asm(&rw->lock); + arch_write_unlock_asm(&rw->lock); } -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif diff --git a/arch/blackfin/include/asm/spinlock_types.h b/arch/blackfin/include/asm/spinlock_types.h index be75762..1a33608 100644 --- a/arch/blackfin/include/asm/spinlock_types.h +++ b/arch/blackfin/include/asm/spinlock_types.h @@ -15,14 +15,14 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } +#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif diff --git a/arch/blackfin/kernel/irqchip.c b/arch/blackfin/kernel/irqchip.c index db9f9c9..64cff54 100644 --- a/arch/blackfin/kernel/irqchip.c +++ b/arch/blackfin/kernel/irqchip.c @@ -23,7 +23,7 @@ void ack_bad_irq(unsigned int irq) static struct irq_desc bad_irq_desc = { .handle_irq = handle_bad_irq, - .lock = __SPIN_LOCK_UNLOCKED(irq_desc->lock), + .lock = __RAW_SPIN_LOCK_UNLOCKED(bad_irq_desc.lock), }; #ifdef CONFIG_CPUMASK_OFFSTACK @@ -39,7 +39,7 @@ int show_interrupts(struct seq_file *p, void *v) unsigned long flags; if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "NMI: "); for_each_online_cpu(j) diff --git a/arch/blackfin/kernel/traps.c b/arch/blackfin/kernel/traps.c index 78cb3d3..9636bac 100644 --- a/arch/blackfin/kernel/traps.c +++ b/arch/blackfin/kernel/traps.c @@ -1140,7 +1140,7 @@ void show_regs(struct pt_regs *fp) if (fp->ipend & ~0x3F) { for (i = 0; i < (NR_IRQS - 1); i++) { if (!in_atomic) - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) @@ -1155,7 +1155,7 @@ void show_regs(struct pt_regs *fp) verbose_printk("\n"); unlock: if (!in_atomic) - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } } diff --git a/arch/cris/include/arch-v32/arch/spinlock.h b/arch/cris/include/arch-v32/arch/spinlock.h index 367a53e..f171a66 100644 --- a/arch/cris/include/arch-v32/arch/spinlock.h +++ b/arch/cris/include/arch-v32/arch/spinlock.h @@ -9,12 +9,12 @@ extern void cris_spin_unlock(void *l, int val); extern void cris_spin_lock(void *l); extern int cris_spin_trylock(void *l); -static inline int __raw_spin_is_locked(raw_spinlock_t *x) +static inline int arch_spin_is_locked(arch_spinlock_t *x) { return *(volatile signed char *)(&(x)->slock) <= 0; } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ volatile ("move.d %1,%0" \ : "=m" (lock->slock) \ @@ -22,26 +22,26 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) : "memory"); } -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { - while (__raw_spin_is_locked(lock)) + while (arch_spin_is_locked(lock)) cpu_relax(); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { return cris_spin_trylock((void *)&lock->slock); } -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { cris_spin_lock((void *)&lock->slock); } static inline void -__raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { - __raw_spin_lock(lock); + arch_spin_lock(lock); } /* @@ -56,76 +56,76 @@ __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) * */ -static inline int __raw_read_can_lock(raw_rwlock_t *x) +static inline int arch_read_can_lock(arch_rwlock_t *x) { return (int)(x)->lock > 0; } -static inline int __raw_write_can_lock(raw_rwlock_t *x) +static inline int arch_write_can_lock(arch_rwlock_t *x) { return (x)->lock == RW_LOCK_BIAS; } -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); while (rw->lock == 0); rw->lock--; - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); while (rw->lock != RW_LOCK_BIAS); rw->lock = 0; - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); rw->lock++; - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); while (rw->lock != RW_LOCK_BIAS); rw->lock = RW_LOCK_BIAS; - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { int ret = 0; - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); if (rw->lock != 0) { rw->lock--; ret = 1; } - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); return ret; } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { int ret = 0; - __raw_spin_lock(&rw->slock); + arch_spin_lock(&rw->slock); if (rw->lock == RW_LOCK_BIAS) { rw->lock = 0; ret = 1; } - __raw_spin_unlock(&rw->slock); + arch_spin_unlock(&rw->slock); return 1; } #define _raw_read_lock_flags(lock, flags) _raw_read_lock(lock) #define _raw_write_lock_flags(lock, flags) _raw_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_ARCH_SPINLOCK_H */ diff --git a/arch/cris/kernel/irq.c b/arch/cris/kernel/irq.c index 0ca7d98..b5ce072 100644 --- a/arch/cris/kernel/irq.c +++ b/arch/cris/kernel/irq.c @@ -52,7 +52,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; } diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c index af3e824..62d1aba 100644 --- a/arch/frv/kernel/irq.c +++ b/arch/frv/kernel/irq.c @@ -69,7 +69,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (action) { seq_printf(p, "%3d: ", i); @@ -85,7 +85,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); } - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "Err: %10u\n", atomic_read(&irq_err_count)); } diff --git a/arch/h8300/kernel/irq.c b/arch/h8300/kernel/irq.c index 5c913d4..c25dc2c 100644 --- a/arch/h8300/kernel/irq.c +++ b/arch/h8300/kernel/irq.c @@ -186,7 +186,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_puts(p, " CPU0"); if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto unlock; @@ -200,7 +200,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_printf(p, ", %s", action->name); seq_putc(p, '\n'); unlock: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; } diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h index 57a2787..6ebc229 100644 --- a/arch/ia64/include/asm/bitops.h +++ b/arch/ia64/include/asm/bitops.h @@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr) * @addr: Address to start counting from * * Similarly to clear_bit_unlock, the implementation uses a store - * with release semantics. See also __raw_spin_unlock(). + * with release semantics. See also arch_spin_unlock(). */ static __inline__ void __clear_bit_unlock(int nr, void *addr) diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 239ecdc..1a91c91 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -17,7 +17,7 @@ #include <asm/intrinsics.h> #include <asm/system.h> -#define __raw_spin_lock_init(x) ((x)->lock = 0) +#define arch_spin_lock_init(x) ((x)->lock = 0) /* * Ticket locks are conceptually two parts, one indicating the current head of @@ -38,7 +38,7 @@ #define TICKET_BITS 15 #define TICKET_MASK ((1 << TICKET_BITS) - 1) -static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) { int *p = (int *)&lock->lock, ticket, serve; @@ -58,7 +58,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) } } -static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->lock); @@ -67,7 +67,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) return 0; } -static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) { unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; @@ -75,7 +75,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) ACCESS_ONCE(*p) = (tmp + 2) & ~1; } -static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock) { int *p = (int *)&lock->lock, ticket; @@ -89,64 +89,64 @@ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) } } -static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) +static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) { long tmp = ACCESS_ONCE(lock->lock); return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); } -static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) +static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) { long tmp = ACCESS_ONCE(lock->lock); return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; } -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +static inline int arch_spin_is_locked(arch_spinlock_t *lock) { return __ticket_spin_is_locked(lock); } -static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +static inline int arch_spin_is_contended(arch_spinlock_t *lock) { return __ticket_spin_is_contended(lock); } -#define __raw_spin_is_contended __raw_spin_is_contended +#define arch_spin_is_contended arch_spin_is_contended -static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) +static __always_inline void arch_spin_lock(arch_spinlock_t *lock) { __ticket_spin_lock(lock); } -static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) +static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) { return __ticket_spin_trylock(lock); } -static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) +static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) { __ticket_spin_unlock(lock); } -static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, +static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { - __raw_spin_lock(lock); + arch_spin_lock(lock); } -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { __ticket_spin_unlock_wait(lock); } -#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) -#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) +#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0) +#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0) #ifdef ASM_SUPPORTED static __always_inline void -__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) +arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags) { __asm__ __volatile__ ( "tbit.nz p6, p0 = %1,%2\n" @@ -169,15 +169,15 @@ __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) : "p6", "p7", "r2", "memory"); } -#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0) +#define arch_read_lock(lock) arch_read_lock_flags(lock, 0) #else /* !ASM_SUPPORTED */ -#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) +#define arch_read_lock_flags(rw, flags) arch_read_lock(rw) -#define __raw_read_lock(rw) \ +#define arch_read_lock(rw) \ do { \ - raw_rwlock_t *__read_lock_ptr = (rw); \ + arch_rwlock_t *__read_lock_ptr = (rw); \ \ while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ @@ -188,16 +188,16 @@ do { \ #endif /* !ASM_SUPPORTED */ -#define __raw_read_unlock(rw) \ +#define arch_read_unlock(rw) \ do { \ - raw_rwlock_t *__read_lock_ptr = (rw); \ + arch_rwlock_t *__read_lock_ptr = (rw); \ ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ } while (0) #ifdef ASM_SUPPORTED static __always_inline void -__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) +arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags) { __asm__ __volatile__ ( "tbit.nz p6, p0 = %1, %2\n" @@ -221,9 +221,9 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); } -#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0) +#define arch_write_lock(rw) arch_write_lock_flags(rw, 0) -#define __raw_write_trylock(rw) \ +#define arch_write_trylock(rw) \ ({ \ register long result; \ \ @@ -235,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) (result == 0); \ }) -static inline void __raw_write_unlock(raw_rwlock_t *x) +static inline void arch_write_unlock(arch_rwlock_t *x) { u8 *y = (u8 *)x; barrier(); @@ -244,9 +244,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) #else /* !ASM_SUPPORTED */ -#define __raw_write_lock_flags(l, flags) __raw_write_lock(l) +#define arch_write_lock_flags(l, flags) arch_write_lock(l) -#define __raw_write_lock(l) \ +#define arch_write_lock(l) \ ({ \ __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ @@ -257,7 +257,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) } while (ia64_val); \ }) -#define __raw_write_trylock(rw) \ +#define arch_write_trylock(rw) \ ({ \ __u64 ia64_val; \ __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ @@ -265,7 +265,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) (ia64_val == 0); \ }) -static inline void __raw_write_unlock(raw_rwlock_t *x) +static inline void arch_write_unlock(arch_rwlock_t *x) { barrier(); x->write_lock = 0; @@ -273,10 +273,10 @@ static inline void __raw_write_unlock(raw_rwlock_t *x) #endif /* !ASM_SUPPORTED */ -static inline int __raw_read_trylock(raw_rwlock_t *x) +static inline int arch_read_trylock(arch_rwlock_t *x) { union { - raw_rwlock_t lock; + arch_rwlock_t lock; __u32 word; } old, new; old.lock = new.lock = *x; @@ -285,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x) return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; } -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* _ASM_IA64_SPINLOCK_H */ diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index 474e46f..e2b42a5 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h @@ -7,15 +7,15 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int read_counter : 31; volatile unsigned int write_lock : 1; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0, 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0, 0 } #endif diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index dab4d39..95ac77a 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -793,12 +793,12 @@ iosapic_register_intr (unsigned int gsi, goto unlock_iosapic_lock; } - spin_lock(&irq_desc[irq].lock); + raw_spin_lock(&irq_desc[irq].lock); dest = get_target_cpu(gsi, irq); dmode = choose_dmode(); err = register_intr(gsi, irq, dmode, polarity, trigger); if (err < 0) { - spin_unlock(&irq_desc[irq].lock); + raw_spin_unlock(&irq_desc[irq].lock); irq = err; goto unlock_iosapic_lock; } @@ -817,7 +817,7 @@ iosapic_register_intr (unsigned int gsi, (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), cpu_logical_id(dest), dest, irq_to_vector(irq)); - spin_unlock(&irq_desc[irq].lock); + raw_spin_unlock(&irq_desc[irq].lock); unlock_iosapic_lock: spin_unlock_irqrestore(&iosapic_lock, flags); return irq; diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index 7d89512..94ee9d0 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); return 0; diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c index dd9d7b5..70e4bad 100644 --- a/arch/ia64/kernel/irq_ia64.c +++ b/arch/ia64/kernel/irq_ia64.c @@ -345,7 +345,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) desc = irq_desc + irq; cfg = irq_cfg + irq; - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (!cfg->move_cleanup_count) goto unlock; @@ -358,7 +358,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id) spin_unlock_irqrestore(&vector_lock, flags); cfg->move_cleanup_count--; unlock: - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } return IRQ_HANDLED; } diff --git a/arch/m32r/include/asm/spinlock.h b/arch/m32r/include/asm/spinlock.h index dded923..179a064 100644 --- a/arch/m32r/include/asm/spinlock.h +++ b/arch/m32r/include/asm/spinlock.h @@ -24,19 +24,19 @@ * We make no fairness assumptions. They have a cost. */ -#define __raw_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_spin_unlock_wait(x) \ - do { cpu_relax(); } while (__raw_spin_is_locked(x)) +#define arch_spin_is_locked(x) (*(volatile int *)(&(x)->slock) <= 0) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_spin_unlock_wait(x) \ + do { cpu_relax(); } while (arch_spin_is_locked(x)) /** - * __raw_spin_trylock - Try spin lock and return a result + * arch_spin_trylock - Try spin lock and return a result * @lock: Pointer to the lock variable * - * __raw_spin_trylock() tries to get the lock and returns a result. + * arch_spin_trylock() tries to get the lock and returns a result. * On the m32r, the result value is 1 (= Success) or 0 (= Failure). */ -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { int oldval; unsigned long tmp1, tmp2; @@ -50,7 +50,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) * } */ __asm__ __volatile__ ( - "# __raw_spin_trylock \n\t" + "# arch_spin_trylock \n\t" "ldi %1, #0; \n\t" "mvfc %2, psw; \n\t" "clrpsw #0x40 -> nop; \n\t" @@ -69,7 +69,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return (oldval > 0); } -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp0, tmp1; @@ -84,7 +84,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) * } */ __asm__ __volatile__ ( - "# __raw_spin_lock \n\t" + "# arch_spin_lock \n\t" ".fillinsn \n" "1: \n\t" "mvfc %1, psw; \n\t" @@ -111,7 +111,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) ); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { mb(); lock->slock = 1; @@ -140,15 +140,15 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(x) ((int)(x)->lock > 0) +#define arch_read_can_lock(x) ((int)(x)->lock > 0) /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) +#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1; @@ -199,7 +199,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) ); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1, tmp2; @@ -252,7 +252,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) ); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1; @@ -274,7 +274,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) ); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { unsigned long tmp0, tmp1, tmp2; @@ -298,7 +298,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *rw) ); } -static inline int __raw_read_trylock(raw_rwlock_t *lock) +static inline int arch_read_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t*)lock; if (atomic_dec_return(count) >= 0) @@ -307,7 +307,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock) return 0; } -static inline int __raw_write_trylock(raw_rwlock_t *lock) +static inline int arch_write_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; if (atomic_sub_and_test(RW_LOCK_BIAS, count)) @@ -316,11 +316,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) return 0; } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* _ASM_M32R_SPINLOCK_H */ diff --git a/arch/m32r/include/asm/spinlock_types.h b/arch/m32r/include/asm/spinlock_types.h index 83f5210..92e2767 100644 --- a/arch/m32r/include/asm/spinlock_types.h +++ b/arch/m32r/include/asm/spinlock_types.h @@ -7,17 +7,17 @@ typedef struct { volatile int slock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 1 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 1 } typedef struct { volatile int lock; -} raw_rwlock_t; +} arch_rwlock_t; #define RW_LOCK_BIAS 0x01000000 #define RW_LOCK_BIAS_STR "0x01000000" -#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } +#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif /* _ASM_M32R_SPINLOCK_TYPES_H */ diff --git a/arch/m32r/kernel/irq.c b/arch/m32r/kernel/irq.c index 8dfd31e..3c71f77 100644 --- a/arch/m32r/kernel/irq.c +++ b/arch/m32r/kernel/irq.c @@ -40,7 +40,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -59,7 +59,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; } diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c index 7d5ddd6..0f06034 100644 --- a/arch/microblaze/kernel/irq.c +++ b/arch/microblaze/kernel/irq.c @@ -68,7 +68,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < nr_irq) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -89,7 +89,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; } diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h index 5b60a09..21ef9ef 100644 --- a/arch/mips/include/asm/spinlock.h +++ b/arch/mips/include/asm/spinlock.h @@ -34,33 +34,33 @@ * becomes equal to the the initial value of the tail. */ -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +static inline int arch_spin_is_locked(arch_spinlock_t *lock) { unsigned int counters = ACCESS_ONCE(lock->lock); return ((counters >> 14) ^ counters) & 0x1fff; } -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_spin_unlock_wait(x) \ - while (__raw_spin_is_locked(x)) { cpu_relax(); } +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_spin_unlock_wait(x) \ + while (arch_spin_is_locked(x)) { cpu_relax(); } -static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +static inline int arch_spin_is_contended(arch_spinlock_t *lock) { unsigned int counters = ACCESS_ONCE(lock->lock); return (((counters >> 14) - counters) & 0x1fff) > 1; } -#define __raw_spin_is_contended __raw_spin_is_contended +#define arch_spin_is_contended arch_spin_is_contended -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { int my_ticket; int tmp; if (R10000_LLSC_WAR) { __asm__ __volatile__ ( - " .set push # __raw_spin_lock \n" + " .set push # arch_spin_lock \n" " .set noreorder \n" " \n" "1: ll %[ticket], %[ticket_ptr] \n" @@ -94,7 +94,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) [my_ticket] "=&r" (my_ticket)); } else { __asm__ __volatile__ ( - " .set push # __raw_spin_lock \n" + " .set push # arch_spin_lock \n" " .set noreorder \n" " \n" " ll %[ticket], %[ticket_ptr] \n" @@ -134,7 +134,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) smp_llsc_mb(); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { int tmp; @@ -142,7 +142,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) if (R10000_LLSC_WAR) { __asm__ __volatile__ ( - " # __raw_spin_unlock \n" + " # arch_spin_unlock \n" "1: ll %[ticket], %[ticket_ptr] \n" " addiu %[ticket], %[ticket], 1 \n" " ori %[ticket], %[ticket], 0x2000 \n" @@ -153,7 +153,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) [ticket] "=&r" (tmp)); } else { __asm__ __volatile__ ( - " .set push # __raw_spin_unlock \n" + " .set push # arch_spin_unlock \n" " .set noreorder \n" " \n" " ll %[ticket], %[ticket_ptr] \n" @@ -174,13 +174,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) } } -static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) +static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock) { int tmp, tmp2, tmp3; if (R10000_LLSC_WAR) { __asm__ __volatile__ ( - " .set push # __raw_spin_trylock \n" + " .set push # arch_spin_trylock \n" " .set noreorder \n" " \n" "1: ll %[ticket], %[ticket_ptr] \n" @@ -204,7 +204,7 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) [now_serving] "=&r" (tmp3)); } else { __asm__ __volatile__ ( - " .set push # __raw_spin_trylock \n" + " .set push # arch_spin_trylock \n" " .set noreorder \n" " \n" " ll %[ticket], %[ticket_ptr] \n" @@ -248,21 +248,21 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(rw) ((rw)->lock >= 0) +#define arch_read_can_lock(rw) ((rw)->lock >= 0) /* * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(rw) (!(rw)->lock) +#define arch_write_can_lock(rw) (!(rw)->lock) -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned int tmp; if (R10000_LLSC_WAR) { __asm__ __volatile__( - " .set noreorder # __raw_read_lock \n" + " .set noreorder # arch_read_lock \n" "1: ll %1, %2 \n" " bltz %1, 1b \n" " addu %1, 1 \n" @@ -275,7 +275,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # __raw_read_lock \n" + " .set noreorder # arch_read_lock \n" "1: ll %1, %2 \n" " bltz %1, 2f \n" " addu %1, 1 \n" @@ -301,7 +301,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) /* Note the use of sub, not subu which will make the kernel die with an overflow exception if we ever try to unlock an rwlock that is already unlocked or is being held by a writer. */ -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned int tmp; @@ -309,7 +309,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) if (R10000_LLSC_WAR) { __asm__ __volatile__( - "1: ll %1, %2 # __raw_read_unlock \n" + "1: ll %1, %2 # arch_read_unlock \n" " sub %1, 1 \n" " sc %1, %0 \n" " beqzl %1, 1b \n" @@ -318,7 +318,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # __raw_read_unlock \n" + " .set noreorder # arch_read_unlock \n" "1: ll %1, %2 \n" " sub %1, 1 \n" " sc %1, %0 \n" @@ -335,13 +335,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) } } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned int tmp; if (R10000_LLSC_WAR) { __asm__ __volatile__( - " .set noreorder # __raw_write_lock \n" + " .set noreorder # arch_write_lock \n" "1: ll %1, %2 \n" " bnez %1, 1b \n" " lui %1, 0x8000 \n" @@ -354,7 +354,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # __raw_write_lock \n" + " .set noreorder # arch_write_lock \n" "1: ll %1, %2 \n" " bnez %1, 2f \n" " lui %1, 0x8000 \n" @@ -377,26 +377,26 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) smp_llsc_mb(); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { smp_mb(); __asm__ __volatile__( - " # __raw_write_unlock \n" + " # arch_write_unlock \n" " sw $0, %0 \n" : "=m" (rw->lock) : "m" (rw->lock) : "memory"); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned int tmp; int ret; if (R10000_LLSC_WAR) { __asm__ __volatile__( - " .set noreorder # __raw_read_trylock \n" + " .set noreorder # arch_read_trylock \n" " li %2, 0 \n" "1: ll %1, %3 \n" " bltz %1, 2f \n" @@ -413,7 +413,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # __raw_read_trylock \n" + " .set noreorder # arch_read_trylock \n" " li %2, 0 \n" "1: ll %1, %3 \n" " bltz %1, 2f \n" @@ -433,14 +433,14 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) return ret; } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned int tmp; int ret; if (R10000_LLSC_WAR) { __asm__ __volatile__( - " .set noreorder # __raw_write_trylock \n" + " .set noreorder # arch_write_trylock \n" " li %2, 0 \n" "1: ll %1, %3 \n" " bnez %1, 2f \n" @@ -457,7 +457,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) : "memory"); } else { __asm__ __volatile__( - " .set noreorder # __raw_write_trylock \n" + " .set noreorder # arch_write_trylock \n" " li %2, 0 \n" "1: ll %1, %3 \n" " bnez %1, 2f \n" @@ -480,11 +480,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) return ret; } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* _ASM_SPINLOCK_H */ diff --git a/arch/mips/include/asm/spinlock_types.h b/arch/mips/include/asm/spinlock_types.h index adeedaa..ee197c2 100644 --- a/arch/mips/include/asm/spinlock_types.h +++ b/arch/mips/include/asm/spinlock_types.h @@ -12,14 +12,14 @@ typedef struct { * bits 15..28: ticket */ unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c index 7b845ba..8b0b418 100644 --- a/arch/mips/kernel/irq.c +++ b/arch/mips/kernel/irq.c @@ -99,7 +99,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -118,7 +118,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_putc(p, '\n'); seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); diff --git a/arch/mips/vr41xx/common/icu.c b/arch/mips/vr41xx/common/icu.c index 6d39e22..6153b6a 100644 --- a/arch/mips/vr41xx/common/icu.c +++ b/arch/mips/vr41xx/common/icu.c @@ -159,9 +159,9 @@ void vr41xx_enable_piuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_set(MPIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -174,9 +174,9 @@ void vr41xx_disable_piuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_clear(MPIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -189,9 +189,9 @@ void vr41xx_enable_aiuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_set(MAIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -204,9 +204,9 @@ void vr41xx_disable_aiuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_clear(MAIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -219,9 +219,9 @@ void vr41xx_enable_kiuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_set(MKIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -234,9 +234,9 @@ void vr41xx_disable_kiuint(uint16_t mask) if (current_cpu_type() == CPU_VR4111 || current_cpu_type() == CPU_VR4121) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_clear(MKIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -247,9 +247,9 @@ void vr41xx_enable_macint(uint16_t mask) struct irq_desc *desc = irq_desc + ETHERNET_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_set(MMACINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_enable_macint); @@ -259,9 +259,9 @@ void vr41xx_disable_macint(uint16_t mask) struct irq_desc *desc = irq_desc + ETHERNET_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_clear(MMACINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_disable_macint); @@ -271,9 +271,9 @@ void vr41xx_enable_dsiuint(uint16_t mask) struct irq_desc *desc = irq_desc + DSIU_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_set(MDSIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_enable_dsiuint); @@ -283,9 +283,9 @@ void vr41xx_disable_dsiuint(uint16_t mask) struct irq_desc *desc = irq_desc + DSIU_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu1_clear(MDSIUINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_disable_dsiuint); @@ -295,9 +295,9 @@ void vr41xx_enable_firint(uint16_t mask) struct irq_desc *desc = irq_desc + FIR_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_set(MFIRINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_enable_firint); @@ -307,9 +307,9 @@ void vr41xx_disable_firint(uint16_t mask) struct irq_desc *desc = irq_desc + FIR_IRQ; unsigned long flags; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_clear(MFIRINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } EXPORT_SYMBOL(vr41xx_disable_firint); @@ -322,9 +322,9 @@ void vr41xx_enable_pciint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_write(MPCIINTREG, PCIINT0); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -338,9 +338,9 @@ void vr41xx_disable_pciint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_write(MPCIINTREG, 0); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -354,9 +354,9 @@ void vr41xx_enable_scuint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_write(MSCUINTREG, SCUINT0); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -370,9 +370,9 @@ void vr41xx_disable_scuint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_write(MSCUINTREG, 0); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -386,9 +386,9 @@ void vr41xx_enable_csiint(uint16_t mask) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_set(MCSIINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -402,9 +402,9 @@ void vr41xx_disable_csiint(uint16_t mask) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_clear(MCSIINTREG, mask); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -418,9 +418,9 @@ void vr41xx_enable_bcuint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_write(MBCUINTREG, BCUINTR); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -434,9 +434,9 @@ void vr41xx_disable_bcuint(void) if (current_cpu_type() == CPU_VR4122 || current_cpu_type() == CPU_VR4131 || current_cpu_type() == CPU_VR4133) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); icu2_write(MBCUINTREG, 0); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } @@ -486,7 +486,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) pin = SYSINT1_IRQ_TO_PIN(irq); - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); intassign0 = icu1_read(INTASSIGN0); intassign1 = icu1_read(INTASSIGN1); @@ -525,7 +525,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) intassign1 |= (uint16_t)assign << 9; break; default: - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); return -EINVAL; } @@ -533,7 +533,7 @@ static inline int set_sysint1_assign(unsigned int irq, unsigned char assign) icu1_write(INTASSIGN0, intassign0); icu1_write(INTASSIGN1, intassign1); - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); return 0; } @@ -546,7 +546,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign) pin = SYSINT2_IRQ_TO_PIN(irq); - spin_lock_irq(&desc->lock); + raw_spin_lock_irq(&desc->lock); intassign2 = icu1_read(INTASSIGN2); intassign3 = icu1_read(INTASSIGN3); @@ -593,7 +593,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign) intassign3 |= (uint16_t)assign << 12; break; default: - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); return -EINVAL; } @@ -601,7 +601,7 @@ static inline int set_sysint2_assign(unsigned int irq, unsigned char assign) icu1_write(INTASSIGN2, intassign2); icu1_write(INTASSIGN3, intassign3); - spin_unlock_irq(&desc->lock); + raw_spin_unlock_irq(&desc->lock); return 0; } diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c index 4c3c58e..e2d5ed8 100644 --- a/arch/mn10300/kernel/irq.c +++ b/arch/mn10300/kernel/irq.c @@ -215,7 +215,7 @@ int show_interrupts(struct seq_file *p, void *v) /* display information rows, one per active CPU */ case 1 ... NR_IRQS - 1: - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (action) { @@ -235,7 +235,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); } - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); break; /* polish off with NMI and error counters */ diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h index 8bc9e96..716634d 100644 --- a/arch/parisc/include/asm/atomic.h +++ b/arch/parisc/include/asm/atomic.h @@ -27,19 +27,19 @@ # define ATOMIC_HASH_SIZE 4 # define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) (a))/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) -extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; +extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; /* Can't use raw_spin_lock_irq because of #include problems, so * this is the substitute */ #define _atomic_spin_lock_irqsave(l,f) do { \ - raw_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ local_irq_save(f); \ - __raw_spin_lock(s); \ + arch_spin_lock(s); \ } while(0) #define _atomic_spin_unlock_irqrestore(l,f) do { \ - raw_spinlock_t *s = ATOMIC_HASH(l); \ - __raw_spin_unlock(s); \ + arch_spinlock_t *s = ATOMIC_HASH(l); \ + arch_spin_unlock(s); \ local_irq_restore(f); \ } while(0) diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h index fae03e1..74036f4 100644 --- a/arch/parisc/include/asm/spinlock.h +++ b/arch/parisc/include/asm/spinlock.h @@ -5,17 +5,17 @@ #include <asm/processor.h> #include <asm/spinlock_types.h> -static inline int __raw_spin_is_locked(raw_spinlock_t *x) +static inline int arch_spin_is_locked(arch_spinlock_t *x) { volatile unsigned int *a = __ldcw_align(x); return *a == 0; } -#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) -#define __raw_spin_unlock_wait(x) \ - do { cpu_relax(); } while (__raw_spin_is_locked(x)) +#define arch_spin_lock(lock) arch_spin_lock_flags(lock, 0) +#define arch_spin_unlock_wait(x) \ + do { cpu_relax(); } while (arch_spin_is_locked(x)) -static inline void __raw_spin_lock_flags(raw_spinlock_t *x, +static inline void arch_spin_lock_flags(arch_spinlock_t *x, unsigned long flags) { volatile unsigned int *a; @@ -33,7 +33,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *x, mb(); } -static inline void __raw_spin_unlock(raw_spinlock_t *x) +static inline void arch_spin_unlock(arch_spinlock_t *x) { volatile unsigned int *a; mb(); @@ -42,7 +42,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *x) mb(); } -static inline int __raw_spin_trylock(raw_spinlock_t *x) +static inline int arch_spin_trylock(arch_spinlock_t *x) { volatile unsigned int *a; int ret; @@ -69,38 +69,38 @@ static inline int __raw_spin_trylock(raw_spinlock_t *x) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to grab the same read lock */ -static __inline__ void __raw_read_lock(raw_rwlock_t *rw) +static __inline__ void arch_read_lock(arch_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); - __raw_spin_lock_flags(&rw->lock, flags); + arch_spin_lock_flags(&rw->lock, flags); rw->counter++; - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); local_irq_restore(flags); } /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to grab the same read lock */ -static __inline__ void __raw_read_unlock(raw_rwlock_t *rw) +static __inline__ void arch_read_unlock(arch_rwlock_t *rw) { unsigned long flags; local_irq_save(flags); - __raw_spin_lock_flags(&rw->lock, flags); + arch_spin_lock_flags(&rw->lock, flags); rw->counter--; - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); local_irq_restore(flags); } /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to grab the same read lock */ -static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) +static __inline__ int arch_read_trylock(arch_rwlock_t *rw) { unsigned long flags; retry: local_irq_save(flags); - if (__raw_spin_trylock(&rw->lock)) { + if (arch_spin_trylock(&rw->lock)) { rw->counter++; - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); local_irq_restore(flags); return 1; } @@ -111,7 +111,7 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) return 0; /* Wait until we have a realistic chance at the lock */ - while (__raw_spin_is_locked(&rw->lock) && rw->counter >= 0) + while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0) cpu_relax(); goto retry; @@ -119,15 +119,15 @@ static __inline__ int __raw_read_trylock(raw_rwlock_t *rw) /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to read_trylock() this lock */ -static __inline__ void __raw_write_lock(raw_rwlock_t *rw) +static __inline__ void arch_write_lock(arch_rwlock_t *rw) { unsigned long flags; retry: local_irq_save(flags); - __raw_spin_lock_flags(&rw->lock, flags); + arch_spin_lock_flags(&rw->lock, flags); if (rw->counter != 0) { - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); local_irq_restore(flags); while (rw->counter != 0) @@ -141,27 +141,27 @@ retry: local_irq_restore(flags); } -static __inline__ void __raw_write_unlock(raw_rwlock_t *rw) +static __inline__ void arch_write_unlock(arch_rwlock_t *rw) { rw->counter = 0; - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); } /* Note that we have to ensure interrupts are disabled in case we're * interrupted by some other code that wants to read_trylock() this lock */ -static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) +static __inline__ int arch_write_trylock(arch_rwlock_t *rw) { unsigned long flags; int result = 0; local_irq_save(flags); - if (__raw_spin_trylock(&rw->lock)) { + if (arch_spin_trylock(&rw->lock)) { if (rw->counter == 0) { rw->counter = -1; result = 1; } else { /* Read-locked. Oh well. */ - __raw_spin_unlock(&rw->lock); + arch_spin_unlock(&rw->lock); } } local_irq_restore(flags); @@ -173,7 +173,7 @@ static __inline__ int __raw_write_trylock(raw_rwlock_t *rw) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) +static __inline__ int arch_read_can_lock(arch_rwlock_t *rw) { return rw->counter >= 0; } @@ -182,16 +182,16 @@ static __inline__ int __raw_read_can_lock(raw_rwlock_t *rw) * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -static __inline__ int __raw_write_can_lock(raw_rwlock_t *rw) +static __inline__ int arch_write_can_lock(arch_rwlock_t *rw) { return !rw->counter; } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/parisc/include/asm/spinlock_types.h b/arch/parisc/include/asm/spinlock_types.h index 3f72f47..8c373aa 100644 --- a/arch/parisc/include/asm/spinlock_types.h +++ b/arch/parisc/include/asm/spinlock_types.h @@ -4,18 +4,18 @@ typedef struct { #ifdef CONFIG_PA20 volatile unsigned int slock; -# define __RAW_SPIN_LOCK_UNLOCKED { 1 } +# define __ARCH_SPIN_LOCK_UNLOCKED { 1 } #else volatile unsigned int lock[4]; -# define __RAW_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } +# define __ARCH_SPIN_LOCK_UNLOCKED { { 1, 1, 1, 1 } } #endif -} raw_spinlock_t; +} arch_spinlock_t; typedef struct { - raw_spinlock_t lock; + arch_spinlock_t lock; volatile int counter; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { __RAW_SPIN_LOCK_UNLOCKED, 0 } +#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 } #endif diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c index 2e7610c..f47465e 100644 --- a/arch/parisc/kernel/irq.c +++ b/arch/parisc/kernel/irq.c @@ -180,7 +180,7 @@ int show_interrupts(struct seq_file *p, void *v) if (i < NR_IRQS) { struct irqaction *action; - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -224,7 +224,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } return 0; diff --git a/arch/parisc/lib/bitops.c b/arch/parisc/lib/bitops.c index e3eb739..353963d 100644 --- a/arch/parisc/lib/bitops.c +++ b/arch/parisc/lib/bitops.c @@ -12,8 +12,8 @@ #include <asm/atomic.h> #ifdef CONFIG_SMP -raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { - [0 ... (ATOMIC_HASH_SIZE-1)] = __RAW_SPIN_LOCK_UNLOCKED +arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { + [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED }; #endif diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 168fce7..20de73c 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h @@ -58,7 +58,7 @@ struct rtas_t { unsigned long entry; /* physical address pointer */ unsigned long base; /* physical address pointer */ unsigned long size; - raw_spinlock_t lock; + arch_spinlock_t lock; struct rtas_args args; struct device_node *dev; /* virtual address pointer */ }; diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h index 198266c..764094c 100644 --- a/arch/powerpc/include/asm/spinlock.h +++ b/arch/powerpc/include/asm/spinlock.h @@ -28,7 +28,7 @@ #include <asm/asm-compat.h> #include <asm/synch.h> -#define __raw_spin_is_locked(x) ((x)->slock != 0) +#define arch_spin_is_locked(x) ((x)->slock != 0) #ifdef CONFIG_PPC64 /* use 0x800000yy when locked, where yy == CPU number */ @@ -54,7 +54,7 @@ * This returns the old value in the lock, so we succeeded * in getting the lock if the return value is 0. */ -static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock) +static inline unsigned long __arch_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp, token; @@ -73,10 +73,10 @@ static inline unsigned long arch_spin_trylock(raw_spinlock_t *lock) return tmp; } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { CLEAR_IO_SYNC; - return arch_spin_trylock(lock) == 0; + return __arch_spin_trylock(lock) == 0; } /* @@ -96,19 +96,19 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) #if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) /* We only yield to the hypervisor if we are in shared processor mode */ #define SHARED_PROCESSOR (get_lppaca()->shared_proc) -extern void __spin_yield(raw_spinlock_t *lock); -extern void __rw_yield(raw_rwlock_t *lock); +extern void __spin_yield(arch_spinlock_t *lock); +extern void __rw_yield(arch_rwlock_t *lock); #else /* SPLPAR || ISERIES */ #define __spin_yield(x) barrier() #define __rw_yield(x) barrier() #define SHARED_PROCESSOR 0 #endif -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { CLEAR_IO_SYNC; while (1) { - if (likely(arch_spin_trylock(lock) == 0)) + if (likely(__arch_spin_trylock(lock) == 0)) break; do { HMT_low(); @@ -120,13 +120,13 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) } static inline -void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { unsigned long flags_dis; CLEAR_IO_SYNC; while (1) { - if (likely(arch_spin_trylock(lock) == 0)) + if (likely(__arch_spin_trylock(lock) == 0)) break; local_save_flags(flags_dis); local_irq_restore(flags); @@ -140,19 +140,19 @@ void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) } } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { SYNC_IO; - __asm__ __volatile__("# __raw_spin_unlock\n\t" + __asm__ __volatile__("# arch_spin_unlock\n\t" LWSYNC_ON_SMP: : :"memory"); lock->slock = 0; } #ifdef CONFIG_PPC64 -extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); +extern void arch_spin_unlock_wait(arch_spinlock_t *lock); #else -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) #endif /* @@ -166,8 +166,8 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); * read-locks. */ -#define __raw_read_can_lock(rw) ((rw)->lock >= 0) -#define __raw_write_can_lock(rw) (!(rw)->lock) +#define arch_read_can_lock(rw) ((rw)->lock >= 0) +#define arch_write_can_lock(rw) (!(rw)->lock) #ifdef CONFIG_PPC64 #define __DO_SIGN_EXTEND "extsw %0,%0\n" @@ -181,7 +181,7 @@ extern void __raw_spin_unlock_wait(raw_spinlock_t *lock); * This returns the old value in the lock + 1, * so we got a read lock if the return value is > 0. */ -static inline long arch_read_trylock(raw_rwlock_t *rw) +static inline long __arch_read_trylock(arch_rwlock_t *rw) { long tmp; @@ -205,7 +205,7 @@ static inline long arch_read_trylock(raw_rwlock_t *rw) * This returns the old value in the lock, * so we got the write lock if the return value is 0. */ -static inline long arch_write_trylock(raw_rwlock_t *rw) +static inline long __arch_write_trylock(arch_rwlock_t *rw) { long tmp, token; @@ -225,10 +225,10 @@ static inline long arch_write_trylock(raw_rwlock_t *rw) return tmp; } -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { while (1) { - if (likely(arch_read_trylock(rw) > 0)) + if (likely(__arch_read_trylock(rw) > 0)) break; do { HMT_low(); @@ -239,10 +239,10 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) } } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { while (1) { - if (likely(arch_write_trylock(rw) == 0)) + if (likely(__arch_write_trylock(rw) == 0)) break; do { HMT_low(); @@ -253,17 +253,17 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) } } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { - return arch_read_trylock(rw) > 0; + return __arch_read_trylock(rw) > 0; } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { - return arch_write_trylock(rw) == 0; + return __arch_write_trylock(rw) == 0; } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { long tmp; @@ -280,19 +280,19 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) : "cr0", "xer", "memory"); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { __asm__ __volatile__("# write_unlock\n\t" LWSYNC_ON_SMP: : :"memory"); rw->lock = 0; } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#define _raw_spin_relax(lock) __spin_yield(lock) -#define _raw_read_relax(lock) __rw_yield(lock) -#define _raw_write_relax(lock) __rw_yield(lock) +#define arch_spin_relax(lock) __spin_yield(lock) +#define arch_read_relax(lock) __rw_yield(lock) +#define arch_write_relax(lock) __rw_yield(lock) #endif /* __KERNEL__ */ #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/powerpc/include/asm/spinlock_types.h b/arch/powerpc/include/asm/spinlock_types.h index 74236c9..2351adc 100644 --- a/arch/powerpc/include/asm/spinlock_types.h +++ b/arch/powerpc/include/asm/spinlock_types.h @@ -7,14 +7,14 @@ typedef struct { volatile unsigned int slock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile signed int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index f6dca4f..9040330 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c @@ -210,7 +210,7 @@ int show_interrupts(struct seq_file *p, void *v) if (!desc) return 0; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); action = desc->action; if (!action || !action->handler) @@ -237,7 +237,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } @@ -1112,7 +1112,7 @@ static int virq_debug_show(struct seq_file *m, void *private) if (!desc) continue; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); if (desc->action && desc->action->handler) { seq_printf(m, "%5d ", i); @@ -1131,7 +1131,7 @@ static int virq_debug_show(struct seq_file *m, void *private) seq_printf(m, "%s\n", p); } - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } return 0; diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index bf90361..fd0d294 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -42,7 +42,7 @@ #include <asm/mmu.h> struct rtas_t rtas = { - .lock = __RAW_SPIN_LOCK_UNLOCKED + .lock = __ARCH_SPIN_LOCK_UNLOCKED }; EXPORT_SYMBOL(rtas); @@ -80,13 +80,13 @@ static unsigned long lock_rtas(void) local_irq_save(flags); preempt_disable(); - __raw_spin_lock_flags(&rtas.lock, flags); + arch_spin_lock_flags(&rtas.lock, flags); return flags; } static void unlock_rtas(unsigned long flags) { - __raw_spin_unlock(&rtas.lock); + arch_spin_unlock(&rtas.lock); local_irq_restore(flags); preempt_enable(); } @@ -978,7 +978,7 @@ int __init early_init_dt_scan_rtas(unsigned long node, return 1; } -static raw_spinlock_t timebase_lock; +static arch_spinlock_t timebase_lock; static u64 timebase = 0; void __cpuinit rtas_give_timebase(void) @@ -987,10 +987,10 @@ void __cpuinit rtas_give_timebase(void) local_irq_save(flags); hard_irq_disable(); - __raw_spin_lock(&timebase_lock); + arch_spin_lock(&timebase_lock); rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); timebase = get_tb(); - __raw_spin_unlock(&timebase_lock); + arch_spin_unlock(&timebase_lock); while (timebase) barrier(); @@ -1002,8 +1002,8 @@ void __cpuinit rtas_take_timebase(void) { while (!timebase) barrier(); - __raw_spin_lock(&timebase_lock); + arch_spin_lock(&timebase_lock); set_tb(timebase >> 32, timebase & 0xffffffff); timebase = 0; - __raw_spin_unlock(&timebase_lock); + arch_spin_unlock(&timebase_lock); } diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c index 79d0fa3..58e14fb 100644 --- a/arch/powerpc/lib/locks.c +++ b/arch/powerpc/lib/locks.c @@ -25,7 +25,7 @@ #include <asm/smp.h> #include <asm/firmware.h> -void __spin_yield(raw_spinlock_t *lock) +void __spin_yield(arch_spinlock_t *lock) { unsigned int lock_value, holder_cpu, yield_count; @@ -55,7 +55,7 @@ void __spin_yield(raw_spinlock_t *lock) * This turns out to be the same for read and write locks, since * we only know the holder if it is write-locked. */ -void __rw_yield(raw_rwlock_t *rw) +void __rw_yield(arch_rwlock_t *rw) { int lock_value; unsigned int holder_cpu, yield_count; @@ -82,7 +82,7 @@ void __rw_yield(raw_rwlock_t *rw) } #endif -void __raw_spin_unlock_wait(raw_spinlock_t *lock) +void arch_spin_unlock_wait(arch_spinlock_t *lock) { while (lock->slock) { HMT_low(); @@ -92,4 +92,4 @@ void __raw_spin_unlock_wait(raw_spinlock_t *lock) HMT_medium(); } -EXPORT_SYMBOL(__raw_spin_unlock_wait); +EXPORT_SYMBOL(arch_spin_unlock_wait); diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index cc0c854..0bac3a3 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c @@ -86,9 +86,9 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) u32 status, enable; /* Mask off the cascaded IRQ */ - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->chip->mask(virq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); /* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs * are pending. 'ffs()' is 1 based */ @@ -104,11 +104,11 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) } /* Processing done; can reenable the cascade now */ - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->chip->ack(virq); if (!(desc->status & IRQ_DISABLED)) desc->chip->unmask(virq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } static int media5200_irq_map(struct irq_host *h, unsigned int virq, diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 7267eff..6829cf7 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c @@ -237,7 +237,7 @@ extern int noirqdebug; static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) { - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); @@ -265,18 +265,18 @@ static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) goto out_eoi; desc->status &= ~IRQ_PENDING; - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); action_ret = handle_IRQ_event(irq, action); if (!noirqdebug) note_interrupt(irq, desc, action_ret); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); desc->status &= ~IRQ_INPROGRESS; out_eoi: desc->chip->eoi(irq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } static int iic_host_map(struct irq_host *h, unsigned int virq, diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c index 0776225..86c4b29 100644 --- a/arch/powerpc/platforms/iseries/irq.c +++ b/arch/powerpc/platforms/iseries/irq.c @@ -217,9 +217,9 @@ void __init iSeries_activate_IRQs() struct irq_desc *desc = irq_to_desc(irq); if (desc && desc->chip && desc->chip->startup) { - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); desc->chip->startup(irq); - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } } diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index a461934..242f809 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c @@ -71,7 +71,7 @@ static void pas_restart(char *cmd) } #ifdef CONFIG_SMP -static raw_spinlock_t timebase_lock; +static arch_spinlock_t timebase_lock; static unsigned long timebase; static void __devinit pas_give_timebase(void) @@ -80,11 +80,11 @@ static void __devinit pas_give_timebase(void) local_irq_save(flags); hard_irq_disable(); - __raw_spin_lock(&timebase_lock); + arch_spin_lock(&timebase_lock); mtspr(SPRN_TBCTL, TBCTL_FREEZE); isync(); timebase = get_tb(); - __raw_spin_unlock(&timebase_lock); + arch_spin_unlock(&timebase_lock); while (timebase) barrier(); @@ -97,10 +97,10 @@ static void __devinit pas_take_timebase(void) while (!timebase) smp_rmb(); - __raw_spin_lock(&timebase_lock); + arch_spin_lock(&timebase_lock); set_tb(timebase >> 32, timebase & 0xffffffff); timebase = 0; - __raw_spin_unlock(&timebase_lock); + arch_spin_unlock(&timebase_lock); } struct smp_ops_t pas_smp_ops = { diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 7d01b58..b9b9e11 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c @@ -906,7 +906,7 @@ void xics_migrate_irqs_away(void) || desc->chip->set_affinity == NULL) continue; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); if (status) { @@ -930,7 +930,7 @@ void xics_migrate_irqs_away(void) cpumask_setall(irq_to_desc(virq)->affinity); desc->chip->set_affinity(virq, cpu_all_mask); unlock: - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } } #endif diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 62e5025..c6e11b0 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c @@ -173,7 +173,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) u32 intr_index; u32 have_shift = 0; - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) { if (desc->chip->mask_ack) desc->chip->mask_ack(irq); @@ -225,7 +225,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) break; } unlock: - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } static int __devinit fsl_of_msi_probe(struct of_device *dev, diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c index 7d10074..6f220a9 100644 --- a/arch/powerpc/sysdev/uic.c +++ b/arch/powerpc/sysdev/uic.c @@ -225,12 +225,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) int src; int subvirq; - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (desc->status & IRQ_LEVEL) desc->chip->mask(virq); else desc->chip->mask_ack(virq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); msr = mfdcr(uic->dcrbase + UIC_MSR); if (!msr) /* spurious interrupt */ @@ -242,12 +242,12 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) generic_handle_irq(subvirq); uic_irq_ret: - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (desc->status & IRQ_LEVEL) desc->chip->ack(virq); if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) desc->chip->unmask(virq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } static struct uic * __init uic_init_one(struct device_node *node) diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h index c9af0d19..a587907 100644 --- a/arch/s390/include/asm/spinlock.h +++ b/arch/s390/include/asm/spinlock.h @@ -52,27 +52,27 @@ _raw_compare_and_swap(volatile unsigned int *lock, * (the type definitions are in asm/spinlock_types.h) */ -#define __raw_spin_is_locked(x) ((x)->owner_cpu != 0) -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) \ - _raw_spin_relax(lock); } while (0) +#define arch_spin_is_locked(x) ((x)->owner_cpu != 0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) \ + arch_spin_relax(lock); } while (0) -extern void _raw_spin_lock_wait(raw_spinlock_t *); -extern void _raw_spin_lock_wait_flags(raw_spinlock_t *, unsigned long flags); -extern int _raw_spin_trylock_retry(raw_spinlock_t *); -extern void _raw_spin_relax(raw_spinlock_t *lock); +extern void arch_spin_lock_wait(arch_spinlock_t *); +extern void arch_spin_lock_wait_flags(arch_spinlock_t *, unsigned long flags); +extern int arch_spin_trylock_retry(arch_spinlock_t *); +extern void arch_spin_relax(arch_spinlock_t *lock); -static inline void __raw_spin_lock(raw_spinlock_t *lp) +static inline void arch_spin_lock(arch_spinlock_t *lp) { int old; old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); if (likely(old == 0)) return; - _raw_spin_lock_wait(lp); + arch_spin_lock_wait(lp); } -static inline void __raw_spin_lock_flags(raw_spinlock_t *lp, +static inline void arch_spin_lock_flags(arch_spinlock_t *lp, unsigned long flags) { int old; @@ -80,20 +80,20 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lp, old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); if (likely(old == 0)) return; - _raw_spin_lock_wait_flags(lp, flags); + arch_spin_lock_wait_flags(lp, flags); } -static inline int __raw_spin_trylock(raw_spinlock_t *lp) +static inline int arch_spin_trylock(arch_spinlock_t *lp) { int old; old = _raw_compare_and_swap(&lp->owner_cpu, 0, ~smp_processor_id()); if (likely(old == 0)) return 1; - return _raw_spin_trylock_retry(lp); + return arch_spin_trylock_retry(lp); } -static inline void __raw_spin_unlock(raw_spinlock_t *lp) +static inline void arch_spin_unlock(arch_spinlock_t *lp) { _raw_compare_and_swap(&lp->owner_cpu, lp->owner_cpu, 0); } @@ -113,22 +113,22 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lp) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(x) ((int)(x)->lock >= 0) +#define arch_read_can_lock(x) ((int)(x)->lock >= 0) /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(x) ((x)->lock == 0) +#define arch_write_can_lock(x) ((x)->lock == 0) -extern void _raw_read_lock_wait(raw_rwlock_t *lp); -extern void _raw_read_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); -extern int _raw_read_trylock_retry(raw_rwlock_t *lp); -extern void _raw_write_lock_wait(raw_rwlock_t *lp); -extern void _raw_write_lock_wait_flags(raw_rwlock_t *lp, unsigned long flags); -extern int _raw_write_trylock_retry(raw_rwlock_t *lp); +extern void _raw_read_lock_wait(arch_rwlock_t *lp); +extern void _raw_read_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); +extern int _raw_read_trylock_retry(arch_rwlock_t *lp); +extern void _raw_write_lock_wait(arch_rwlock_t *lp); +extern void _raw_write_lock_wait_flags(arch_rwlock_t *lp, unsigned long flags); +extern int _raw_write_trylock_retry(arch_rwlock_t *lp); -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -136,7 +136,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) _raw_read_lock_wait(rw); } -static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags) +static inline void arch_read_lock_flags(arch_rwlock_t *rw, unsigned long flags) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -144,7 +144,7 @@ static inline void __raw_read_lock_flags(raw_rwlock_t *rw, unsigned long flags) _raw_read_lock_wait_flags(rw, flags); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned int old, cmp; @@ -155,24 +155,24 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) } while (cmp != old); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) _raw_write_lock_wait(rw); } -static inline void __raw_write_lock_flags(raw_rwlock_t *rw, unsigned long flags) +static inline void arch_write_lock_flags(arch_rwlock_t *rw, unsigned long flags) { if (unlikely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) != 0)) _raw_write_lock_wait_flags(rw, flags); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { _raw_compare_and_swap(&rw->lock, 0x80000000, 0); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned int old; old = rw->lock & 0x7fffffffU; @@ -181,14 +181,14 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) return _raw_read_trylock_retry(rw); } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { if (likely(_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0)) return 1; return _raw_write_trylock_retry(rw); } -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SPINLOCK_H */ diff --git a/arch/s390/include/asm/spinlock_types.h b/arch/s390/include/asm/spinlock_types.h index 654abc4..9c76656 100644 --- a/arch/s390/include/asm/spinlock_types.h +++ b/arch/s390/include/asm/spinlock_types.h @@ -7,14 +7,14 @@ typedef struct { volatile unsigned int owner_cpu; -} __attribute__ ((aligned (4))) raw_spinlock_t; +} __attribute__ ((aligned (4))) arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/s390/lib/spinlock.c b/arch/s390/lib/spinlock.c index f7e0d30..10754a3 100644 --- a/arch/s390/lib/spinlock.c +++ b/arch/s390/lib/spinlock.c @@ -39,7 +39,7 @@ static inline void _raw_yield_cpu(int cpu) _raw_yield(); } -void _raw_spin_lock_wait(raw_spinlock_t *lp) +void arch_spin_lock_wait(arch_spinlock_t *lp) { int count = spin_retry; unsigned int cpu = ~smp_processor_id(); @@ -51,15 +51,15 @@ void _raw_spin_lock_wait(raw_spinlock_t *lp) _raw_yield_cpu(~owner); count = spin_retry; } - if (__raw_spin_is_locked(lp)) + if (arch_spin_is_locked(lp)) continue; if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) return; } } -EXPORT_SYMBOL(_raw_spin_lock_wait); +EXPORT_SYMBOL(arch_spin_lock_wait); -void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) +void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) { int count = spin_retry; unsigned int cpu = ~smp_processor_id(); @@ -72,7 +72,7 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) _raw_yield_cpu(~owner); count = spin_retry; } - if (__raw_spin_is_locked(lp)) + if (arch_spin_is_locked(lp)) continue; local_irq_disable(); if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) @@ -80,32 +80,32 @@ void _raw_spin_lock_wait_flags(raw_spinlock_t *lp, unsigned long flags) local_irq_restore(flags); } } -EXPORT_SYMBOL(_raw_spin_lock_wait_flags); +EXPORT_SYMBOL(arch_spin_lock_wait_flags); -int _raw_spin_trylock_retry(raw_spinlock_t *lp) +int arch_spin_trylock_retry(arch_spinlock_t *lp) { unsigned int cpu = ~smp_processor_id(); int count; for (count = spin_retry; count > 0; count--) { - if (__raw_spin_is_locked(lp)) + if (arch_spin_is_locked(lp)) continue; if (_raw_compare_and_swap(&lp->owner_cpu, 0, cpu) == 0) return 1; } return 0; } -EXPORT_SYMBOL(_raw_spin_trylock_retry); +EXPORT_SYMBOL(arch_spin_trylock_retry); -void _raw_spin_relax(raw_spinlock_t *lock) +void arch_spin_relax(arch_spinlock_t *lock) { unsigned int cpu = lock->owner_cpu; if (cpu != 0) _raw_yield_cpu(~cpu); } -EXPORT_SYMBOL(_raw_spin_relax); +EXPORT_SYMBOL(arch_spin_relax); -void _raw_read_lock_wait(raw_rwlock_t *rw) +void _raw_read_lock_wait(arch_rwlock_t *rw) { unsigned int old; int count = spin_retry; @@ -115,7 +115,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw) _raw_yield(); count = spin_retry; } - if (!__raw_read_can_lock(rw)) + if (!arch_read_can_lock(rw)) continue; old = rw->lock & 0x7fffffffU; if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) @@ -124,7 +124,7 @@ void _raw_read_lock_wait(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_read_lock_wait); -void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) +void _raw_read_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) { unsigned int old; int count = spin_retry; @@ -135,7 +135,7 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) _raw_yield(); count = spin_retry; } - if (!__raw_read_can_lock(rw)) + if (!arch_read_can_lock(rw)) continue; old = rw->lock & 0x7fffffffU; local_irq_disable(); @@ -145,13 +145,13 @@ void _raw_read_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) } EXPORT_SYMBOL(_raw_read_lock_wait_flags); -int _raw_read_trylock_retry(raw_rwlock_t *rw) +int _raw_read_trylock_retry(arch_rwlock_t *rw) { unsigned int old; int count = spin_retry; while (count-- > 0) { - if (!__raw_read_can_lock(rw)) + if (!arch_read_can_lock(rw)) continue; old = rw->lock & 0x7fffffffU; if (_raw_compare_and_swap(&rw->lock, old, old + 1) == old) @@ -161,7 +161,7 @@ int _raw_read_trylock_retry(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_read_trylock_retry); -void _raw_write_lock_wait(raw_rwlock_t *rw) +void _raw_write_lock_wait(arch_rwlock_t *rw) { int count = spin_retry; @@ -170,7 +170,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw) _raw_yield(); count = spin_retry; } - if (!__raw_write_can_lock(rw)) + if (!arch_write_can_lock(rw)) continue; if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) return; @@ -178,7 +178,7 @@ void _raw_write_lock_wait(raw_rwlock_t *rw) } EXPORT_SYMBOL(_raw_write_lock_wait); -void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) +void _raw_write_lock_wait_flags(arch_rwlock_t *rw, unsigned long flags) { int count = spin_retry; @@ -188,7 +188,7 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) _raw_yield(); count = spin_retry; } - if (!__raw_write_can_lock(rw)) + if (!arch_write_can_lock(rw)) continue; local_irq_disable(); if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) @@ -197,12 +197,12 @@ void _raw_write_lock_wait_flags(raw_rwlock_t *rw, unsigned long flags) } EXPORT_SYMBOL(_raw_write_lock_wait_flags); -int _raw_write_trylock_retry(raw_rwlock_t *rw) +int _raw_write_trylock_retry(arch_rwlock_t *rw) { int count = spin_retry; while (count-- > 0) { - if (!__raw_write_can_lock(rw)) + if (!arch_write_can_lock(rw)) continue; if (_raw_compare_and_swap(&rw->lock, 0, 0x80000000) == 0) return 1; diff --git a/arch/sh/include/asm/spinlock.h b/arch/sh/include/asm/spinlock.h index a28c9f0..bdc0f3b 100644 --- a/arch/sh/include/asm/spinlock.h +++ b/arch/sh/include/asm/spinlock.h @@ -23,10 +23,10 @@ * Your basic SMP spinlocks, allowing only a single CPU anywhere */ -#define __raw_spin_is_locked(x) ((x)->lock <= 0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_spin_unlock_wait(x) \ - do { while (__raw_spin_is_locked(x)) cpu_relax(); } while (0) +#define arch_spin_is_locked(x) ((x)->lock <= 0) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_spin_unlock_wait(x) \ + do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) /* * Simple spin lock operations. There are two variants, one clears IRQ's @@ -34,14 +34,14 @@ * * We make no fairness assumptions. They have a cost. */ -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; unsigned long oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_spin_lock \n\t" + "movli.l @%2, %0 ! arch_spin_lock \n\t" "mov %0, %1 \n\t" "mov #0, %0 \n\t" "movco.l %0, @%2 \n\t" @@ -54,12 +54,12 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) ); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { unsigned long tmp; __asm__ __volatile__ ( - "mov #1, %0 ! __raw_spin_unlock \n\t" + "mov #1, %0 ! arch_spin_unlock \n\t" "mov.l %0, @%1 \n\t" : "=&z" (tmp) : "r" (&lock->lock) @@ -67,13 +67,13 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) ); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned long tmp, oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_spin_trylock \n\t" + "movli.l @%2, %0 ! arch_spin_trylock \n\t" "mov %0, %1 \n\t" "mov #0, %0 \n\t" "movco.l %0, @%2 \n\t" @@ -100,21 +100,21 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_read_can_lock(x) ((x)->lock > 0) +#define arch_read_can_lock(x) ((x)->lock > 0) /** * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -#define __raw_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) +#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { unsigned long tmp; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! __raw_read_lock \n\t" + "movli.l @%1, %0 ! arch_read_lock \n\t" "cmp/pl %0 \n\t" "bf 1b \n\t" "add #-1, %0 \n\t" @@ -126,13 +126,13 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) ); } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { unsigned long tmp; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! __raw_read_unlock \n\t" + "movli.l @%1, %0 ! arch_read_unlock \n\t" "add #1, %0 \n\t" "movco.l %0, @%1 \n\t" "bf 1b \n\t" @@ -142,13 +142,13 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw) ); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { unsigned long tmp; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%1, %0 ! __raw_write_lock \n\t" + "movli.l @%1, %0 ! arch_write_lock \n\t" "cmp/hs %2, %0 \n\t" "bf 1b \n\t" "sub %2, %0 \n\t" @@ -160,23 +160,23 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) ); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { __asm__ __volatile__ ( - "mov.l %1, @%0 ! __raw_write_unlock \n\t" + "mov.l %1, @%0 ! arch_write_unlock \n\t" : : "r" (&rw->lock), "r" (RW_LOCK_BIAS) : "t", "memory" ); } -static inline int __raw_read_trylock(raw_rwlock_t *rw) +static inline int arch_read_trylock(arch_rwlock_t *rw) { unsigned long tmp, oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_read_trylock \n\t" + "movli.l @%2, %0 ! arch_read_trylock \n\t" "mov %0, %1 \n\t" "cmp/pl %0 \n\t" "bf 2f \n\t" @@ -193,13 +193,13 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw) return (oldval > 0); } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned long tmp, oldval; __asm__ __volatile__ ( "1: \n\t" - "movli.l @%2, %0 ! __raw_write_trylock \n\t" + "movli.l @%2, %0 ! arch_write_trylock \n\t" "mov %0, %1 \n\t" "cmp/hs %3, %0 \n\t" "bf 2f \n\t" @@ -216,11 +216,11 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) return (oldval > (RW_LOCK_BIAS - 1)); } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* __ASM_SH_SPINLOCK_H */ diff --git a/arch/sh/include/asm/spinlock_types.h b/arch/sh/include/asm/spinlock_types.h index b4d244e..9b7560d 100644 --- a/arch/sh/include/asm/spinlock_types.h +++ b/arch/sh/include/asm/spinlock_types.h @@ -7,15 +7,15 @@ typedef struct { volatile unsigned int lock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 1 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 1 } typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; #define RW_LOCK_BIAS 0x01000000 -#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } +#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif diff --git a/arch/sh/kernel/irq.c b/arch/sh/kernel/irq.c index e1913f2..d2d41d0 100644 --- a/arch/sh/kernel/irq.c +++ b/arch/sh/kernel/irq.c @@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v) if (!desc) return 0; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); for_each_online_cpu(j) any_count |= kstat_irqs_cpu(i, j); action = desc->action; @@ -97,7 +97,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); out: - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } #endif diff --git a/arch/sparc/include/asm/spinlock_32.h b/arch/sparc/include/asm/spinlock_32.h index 857630c..7f9b9db 100644 --- a/arch/sparc/include/asm/spinlock_32.h +++ b/arch/sparc/include/asm/spinlock_32.h @@ -10,12 +10,12 @@ #include <asm/psr.h> -#define __raw_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) +#define arch_spin_is_locked(lock) (*((volatile unsigned char *)(lock)) != 0) -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) +#define arch_spin_unlock_wait(lock) \ + do { while (arch_spin_is_locked(lock)) cpu_relax(); } while (0) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { __asm__ __volatile__( "\n1:\n\t" @@ -35,7 +35,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) : "g2", "memory", "cc"); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned int result; __asm__ __volatile__("ldstub [%1], %0" @@ -45,7 +45,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return (result == 0); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); } @@ -65,7 +65,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) * Sort of like atomic_t's on Sparc, but even more clever. * * ------------------------------------ - * | 24-bit counter | wlock | raw_rwlock_t + * | 24-bit counter | wlock | arch_rwlock_t * ------------------------------------ * 31 8 7 0 * @@ -76,9 +76,9 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) * * Unfortunately this scheme limits us to ~16,000,000 cpus. */ -static inline void arch_read_lock(raw_rwlock_t *rw) +static inline void __arch_read_lock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" @@ -89,16 +89,16 @@ static inline void arch_read_lock(raw_rwlock_t *rw) : "g2", "g4", "memory", "cc"); } -#define __raw_read_lock(lock) \ +#define arch_read_lock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ - arch_read_lock(lock); \ + __arch_read_lock(lock); \ local_irq_restore(flags); \ } while(0) -static inline void arch_read_unlock(raw_rwlock_t *rw) +static inline void __arch_read_unlock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" @@ -109,16 +109,16 @@ static inline void arch_read_unlock(raw_rwlock_t *rw) : "g2", "g4", "memory", "cc"); } -#define __raw_read_unlock(lock) \ +#define arch_read_unlock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ - arch_read_unlock(lock); \ + __arch_read_unlock(lock); \ local_irq_restore(flags); \ } while(0) -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); lp = rw; __asm__ __volatile__( "mov %%o7, %%g4\n\t" @@ -130,7 +130,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) *(volatile __u32 *)&lp->lock = ~0U; } -static inline int __raw_write_trylock(raw_rwlock_t *rw) +static inline int arch_write_trylock(arch_rwlock_t *rw) { unsigned int val; @@ -150,9 +150,9 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw) return (val == 0); } -static inline int arch_read_trylock(raw_rwlock_t *rw) +static inline int __arch_read_trylock(arch_rwlock_t *rw) { - register raw_rwlock_t *lp asm("g1"); + register arch_rwlock_t *lp asm("g1"); register int res asm("o0"); lp = rw; __asm__ __volatile__( @@ -165,27 +165,27 @@ static inline int arch_read_trylock(raw_rwlock_t *rw) return res; } -#define __raw_read_trylock(lock) \ +#define arch_read_trylock(lock) \ ({ unsigned long flags; \ int res; \ local_irq_save(flags); \ - res = arch_read_trylock(lock); \ + res = __arch_read_trylock(lock); \ local_irq_restore(flags); \ res; \ }) -#define __raw_write_unlock(rw) do { (rw)->lock = 0; } while(0) +#define arch_write_unlock(rw) do { (rw)->lock = 0; } while(0) -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) -#define __raw_write_lock_flags(rw, flags) __raw_write_lock(rw) +#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) +#define arch_read_lock_flags(rw, flags) arch_read_lock(rw) +#define arch_write_lock_flags(rw, flags) arch_write_lock(rw) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() -#define __raw_read_can_lock(rw) (!((rw)->lock & 0xff)) -#define __raw_write_can_lock(rw) (!(rw)->lock) +#define arch_read_can_lock(rw) (!((rw)->lock & 0xff)) +#define arch_write_can_lock(rw) (!(rw)->lock) #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h index 43e5147..073936a 100644 --- a/arch/sparc/include/asm/spinlock_64.h +++ b/arch/sparc/include/asm/spinlock_64.h @@ -21,13 +21,13 @@ * the spinner sections must be pre-V9 branches. */ -#define __raw_spin_is_locked(lp) ((lp)->lock != 0) +#define arch_spin_is_locked(lp) ((lp)->lock != 0) -#define __raw_spin_unlock_wait(lp) \ +#define arch_spin_unlock_wait(lp) \ do { rmb(); \ } while((lp)->lock) -static inline void __raw_spin_lock(raw_spinlock_t *lock) +static inline void arch_spin_lock(arch_spinlock_t *lock) { unsigned long tmp; @@ -46,7 +46,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) : "memory"); } -static inline int __raw_spin_trylock(raw_spinlock_t *lock) +static inline int arch_spin_trylock(arch_spinlock_t *lock) { unsigned long result; @@ -59,7 +59,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) return (result == 0UL); } -static inline void __raw_spin_unlock(raw_spinlock_t *lock) +static inline void arch_spin_unlock(arch_spinlock_t *lock) { __asm__ __volatile__( " stb %%g0, [%0]" @@ -68,7 +68,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock) : "memory"); } -static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { unsigned long tmp1, tmp2; @@ -92,7 +92,7 @@ static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long fla /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */ -static void inline arch_read_lock(raw_rwlock_t *lock) +static void inline arch_read_lock(arch_rwlock_t *lock) { unsigned long tmp1, tmp2; @@ -115,7 +115,7 @@ static void inline arch_read_lock(raw_rwlock_t *lock) : "memory"); } -static int inline arch_read_trylock(raw_rwlock_t *lock) +static int inline arch_read_trylock(arch_rwlock_t *lock) { int tmp1, tmp2; @@ -136,7 +136,7 @@ static int inline arch_read_trylock(raw_rwlock_t *lock) return tmp1; } -static void inline arch_read_unlock(raw_rwlock_t *lock) +static void inline arch_read_unlock(arch_rwlock_t *lock) { unsigned long tmp1, tmp2; @@ -152,7 +152,7 @@ static void inline arch_read_unlock(raw_rwlock_t *lock) : "memory"); } -static void inline arch_write_lock(raw_rwlock_t *lock) +static void inline arch_write_lock(arch_rwlock_t *lock) { unsigned long mask, tmp1, tmp2; @@ -177,7 +177,7 @@ static void inline arch_write_lock(raw_rwlock_t *lock) : "memory"); } -static void inline arch_write_unlock(raw_rwlock_t *lock) +static void inline arch_write_unlock(arch_rwlock_t *lock) { __asm__ __volatile__( " stw %%g0, [%0]" @@ -186,7 +186,7 @@ static void inline arch_write_unlock(raw_rwlock_t *lock) : "memory"); } -static int inline arch_write_trylock(raw_rwlock_t *lock) +static int inline arch_write_trylock(arch_rwlock_t *lock) { unsigned long mask, tmp1, tmp2, result; @@ -210,21 +210,21 @@ static int inline arch_write_trylock(raw_rwlock_t *lock) return result; } -#define __raw_read_lock(p) arch_read_lock(p) -#define __raw_read_lock_flags(p, f) arch_read_lock(p) -#define __raw_read_trylock(p) arch_read_trylock(p) -#define __raw_read_unlock(p) arch_read_unlock(p) -#define __raw_write_lock(p) arch_write_lock(p) -#define __raw_write_lock_flags(p, f) arch_write_lock(p) -#define __raw_write_unlock(p) arch_write_unlock(p) -#define __raw_write_trylock(p) arch_write_trylock(p) - -#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) -#define __raw_write_can_lock(rw) (!(rw)->lock) - -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_read_lock(p) arch_read_lock(p) +#define arch_read_lock_flags(p, f) arch_read_lock(p) +#define arch_read_trylock(p) arch_read_trylock(p) +#define arch_read_unlock(p) arch_read_unlock(p) +#define arch_write_lock(p) arch_write_lock(p) +#define arch_write_lock_flags(p, f) arch_write_lock(p) +#define arch_write_unlock(p) arch_write_unlock(p) +#define arch_write_trylock(p) arch_write_trylock(p) + +#define arch_read_can_lock(rw) (!((rw)->lock & 0x80000000UL)) +#define arch_write_can_lock(rw) (!(rw)->lock) + +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() #endif /* !(__ASSEMBLY__) */ diff --git a/arch/sparc/include/asm/spinlock_types.h b/arch/sparc/include/asm/spinlock_types.h index 37cbe01..9c454fd 100644 --- a/arch/sparc/include/asm/spinlock_types.h +++ b/arch/sparc/include/asm/spinlock_types.h @@ -7,14 +7,14 @@ typedef struct { volatile unsigned char lock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { volatile unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { 0 } +#define __ARCH_RW_LOCK_UNLOCKED { 0 } #endif diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index ce996f9..8d6882b 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -176,7 +176,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -195,7 +195,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "NMI: "); for_each_online_cpu(j) @@ -785,14 +785,14 @@ void fixup_irqs(void) for (irq = 0; irq < NR_IRQS; irq++) { unsigned long flags; - spin_lock_irqsave(&irq_desc[irq].lock, flags); + raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); if (irq_desc[irq].action && !(irq_desc[irq].status & IRQ_PER_CPU)) { if (irq_desc[irq].chip->set_affinity) irq_desc[irq].chip->set_affinity(irq, irq_desc[irq].affinity); } - spin_unlock_irqrestore(&irq_desc[irq].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); } tick_ops->disable_irq(); diff --git a/arch/um/kernel/irq.c b/arch/um/kernel/irq.c index 039270b..89474ba 100644 --- a/arch/um/kernel/irq.c +++ b/arch/um/kernel/irq.c @@ -34,7 +34,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -53,7 +53,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) seq_putc(p, '\n'); diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index efb3899..dd59a85 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -731,34 +731,34 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) -static inline int __raw_spin_is_locked(struct raw_spinlock *lock) +static inline int arch_spin_is_locked(struct arch_spinlock *lock) { return PVOP_CALL1(int, pv_lock_ops.spin_is_locked, lock); } -static inline int __raw_spin_is_contended(struct raw_spinlock *lock) +static inline int arch_spin_is_contended(struct arch_spinlock *lock) { return PVOP_CALL1(int, pv_lock_ops.spin_is_contended, lock); } -#define __raw_spin_is_contended __raw_spin_is_contended +#define arch_spin_is_contended arch_spin_is_contended -static __always_inline void __raw_spin_lock(struct raw_spinlock *lock) +static __always_inline void arch_spin_lock(struct arch_spinlock *lock) { PVOP_VCALL1(pv_lock_ops.spin_lock, lock); } -static __always_inline void __raw_spin_lock_flags(struct raw_spinlock *lock, +static __always_inline void arch_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags) { PVOP_VCALL2(pv_lock_ops.spin_lock_flags, lock, flags); } -static __always_inline int __raw_spin_trylock(struct raw_spinlock *lock) +static __always_inline int arch_spin_trylock(struct arch_spinlock *lock) { return PVOP_CALL1(int, pv_lock_ops.spin_trylock, lock); } -static __always_inline void __raw_spin_unlock(struct raw_spinlock *lock) +static __always_inline void arch_spin_unlock(struct arch_spinlock *lock) { PVOP_VCALL1(pv_lock_ops.spin_unlock, lock); } diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 9357473..b1e70d5 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -318,14 +318,14 @@ struct pv_mmu_ops { phys_addr_t phys, pgprot_t flags); }; -struct raw_spinlock; +struct arch_spinlock; struct pv_lock_ops { - int (*spin_is_locked)(struct raw_spinlock *lock); - int (*spin_is_contended)(struct raw_spinlock *lock); - void (*spin_lock)(struct raw_spinlock *lock); - void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags); - int (*spin_trylock)(struct raw_spinlock *lock); - void (*spin_unlock)(struct raw_spinlock *lock); + int (*spin_is_locked)(struct arch_spinlock *lock); + int (*spin_is_contended)(struct arch_spinlock *lock); + void (*spin_lock)(struct arch_spinlock *lock); + void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags); + int (*spin_trylock)(struct arch_spinlock *lock); + void (*spin_unlock)(struct arch_spinlock *lock); }; /* This contains all the paravirt structures: we get a convenient diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index 4e77853..3089f70 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -58,7 +58,7 @@ #if (NR_CPUS < 256) #define TICKET_SHIFT 8 -static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) { short inc = 0x0100; @@ -77,7 +77,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) : "memory", "cc"); } -static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) { int tmp, new; @@ -96,7 +96,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) return tmp; } -static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) { asm volatile(UNLOCK_LOCK_PREFIX "incb %0" : "+m" (lock->slock) @@ -106,7 +106,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) #else #define TICKET_SHIFT 16 -static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock) { int inc = 0x00010000; int tmp; @@ -127,7 +127,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) : "memory", "cc"); } -static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) +static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock) { int tmp; int new; @@ -149,7 +149,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) return tmp; } -static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) +static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock) { asm volatile(UNLOCK_LOCK_PREFIX "incw %0" : "+m" (lock->slock) @@ -158,14 +158,14 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) } #endif -static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) +static inline int __ticket_spin_is_locked(arch_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->slock); return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); } -static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) +static inline int __ticket_spin_is_contended(arch_spinlock_t *lock) { int tmp = ACCESS_ONCE(lock->slock); @@ -174,43 +174,43 @@ static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) #ifndef CONFIG_PARAVIRT_SPINLOCKS -static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +static inline int arch_spin_is_locked(arch_spinlock_t *lock) { return __ticket_spin_is_locked(lock); } -static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +static inline int arch_spin_is_contended(arch_spinlock_t *lock) { return __ticket_spin_is_contended(lock); } -#define __raw_spin_is_contended __raw_spin_is_contended +#define arch_spin_is_contended arch_spin_is_contended -static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) +static __always_inline void arch_spin_lock(arch_spinlock_t *lock) { __ticket_spin_lock(lock); } -static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) +static __always_inline int arch_spin_trylock(arch_spinlock_t *lock) { return __ticket_spin_trylock(lock); } -static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) +static __always_inline void arch_spin_unlock(arch_spinlock_t *lock) { __ticket_spin_unlock(lock); } -static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, +static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { - __raw_spin_lock(lock); + arch_spin_lock(lock); } #endif /* CONFIG_PARAVIRT_SPINLOCKS */ -static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) { - while (__raw_spin_is_locked(lock)) + while (arch_spin_is_locked(lock)) cpu_relax(); } @@ -232,7 +232,7 @@ static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) * read_can_lock - would read_trylock() succeed? * @lock: the rwlock in question. */ -static inline int __raw_read_can_lock(raw_rwlock_t *lock) +static inline int arch_read_can_lock(arch_rwlock_t *lock) { return (int)(lock)->lock > 0; } @@ -241,12 +241,12 @@ static inline int __raw_read_can_lock(raw_rwlock_t *lock) * write_can_lock - would write_trylock() succeed? * @lock: the rwlock in question. */ -static inline int __raw_write_can_lock(raw_rwlock_t *lock) +static inline int arch_write_can_lock(arch_rwlock_t *lock) { return (lock)->lock == RW_LOCK_BIAS; } -static inline void __raw_read_lock(raw_rwlock_t *rw) +static inline void arch_read_lock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" "jns 1f\n" @@ -255,7 +255,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw) ::LOCK_PTR_REG (rw) : "memory"); } -static inline void __raw_write_lock(raw_rwlock_t *rw) +static inline void arch_write_lock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" "jz 1f\n" @@ -264,7 +264,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw) ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); } -static inline int __raw_read_trylock(raw_rwlock_t *lock) +static inline int arch_read_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; @@ -274,7 +274,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *lock) return 0; } -static inline int __raw_write_trylock(raw_rwlock_t *lock) +static inline int arch_write_trylock(arch_rwlock_t *lock) { atomic_t *count = (atomic_t *)lock; @@ -284,23 +284,23 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock) return 0; } -static inline void __raw_read_unlock(raw_rwlock_t *rw) +static inline void arch_read_unlock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); } -static inline void __raw_write_unlock(raw_rwlock_t *rw) +static inline void arch_write_unlock(arch_rwlock_t *rw) { asm volatile(LOCK_PREFIX "addl %1, %0" : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); } -#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock) -#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock) +#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) +#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) -#define _raw_spin_relax(lock) cpu_relax() -#define _raw_read_relax(lock) cpu_relax() -#define _raw_write_relax(lock) cpu_relax() +#define arch_spin_relax(lock) cpu_relax() +#define arch_read_relax(lock) cpu_relax() +#define arch_write_relax(lock) cpu_relax() /* The {read|write|spin}_lock() on x86 are full memory barriers. */ static inline void smp_mb__after_lock(void) { } diff --git a/arch/x86/include/asm/spinlock_types.h b/arch/x86/include/asm/spinlock_types.h index 845f81c..dcb48b2 100644 --- a/arch/x86/include/asm/spinlock_types.h +++ b/arch/x86/include/asm/spinlock_types.h @@ -5,16 +5,16 @@ # error "please don't include this file directly" #endif -typedef struct raw_spinlock { +typedef struct arch_spinlock { unsigned int slock; -} raw_spinlock_t; +} arch_spinlock_t; -#define __RAW_SPIN_LOCK_UNLOCKED { 0 } +#define __ARCH_SPIN_LOCK_UNLOCKED { 0 } typedef struct { unsigned int lock; -} raw_rwlock_t; +} arch_rwlock_t; -#define __RAW_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } +#define __ARCH_RW_LOCK_UNLOCKED { RW_LOCK_BIAS } #endif /* _ASM_X86_SPINLOCK_TYPES_H */ diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index d5d498f..11a5851 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2431,7 +2431,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) continue; cfg = irq_cfg(irq); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain)) goto unlock; @@ -2450,7 +2450,7 @@ asmlinkage void smp_irq_move_cleanup_interrupt(void) } __get_cpu_var(vector_irq)[vector] = -1; unlock: - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } irq_exit(); diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c index b8ce165..0a0aa1c 100644 --- a/arch/x86/kernel/dumpstack.c +++ b/arch/x86/kernel/dumpstack.c @@ -188,7 +188,7 @@ void dump_stack(void) } EXPORT_SYMBOL(dump_stack); -static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED; +static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED; static int die_owner = -1; static unsigned int die_nest_count; @@ -207,11 +207,11 @@ unsigned __kprobes long oops_begin(void) /* racy, but better than risking deadlock. */ raw_local_irq_save(flags); cpu = smp_processor_id(); - if (!__raw_spin_trylock(&die_lock)) { + if (!arch_spin_trylock(&die_lock)) { if (cpu == die_owner) /* nested oops. should stop eventually */; else - __raw_spin_lock(&die_lock); + arch_spin_lock(&die_lock); } die_nest_count++; die_owner = cpu; @@ -231,7 +231,7 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr) die_nest_count--; if (!die_nest_count) /* Nest count reaches zero, release the lock. */ - __raw_spin_unlock(&die_lock); + arch_spin_unlock(&die_lock); raw_local_irq_restore(flags); oops_exit(); diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 664bcb7..91fd0c7 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -149,7 +149,7 @@ int show_interrupts(struct seq_file *p, void *v) if (!desc) return 0; - spin_lock_irqsave(&desc->lock, flags); + raw_spin_lock_irqsave(&desc->lock, flags); for_each_online_cpu(j) any_count |= kstat_irqs_cpu(i, j); action = desc->action; @@ -170,7 +170,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); out: - spin_unlock_irqrestore(&desc->lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); return 0; } @@ -294,12 +294,12 @@ void fixup_irqs(void) continue; /* interrupt's are disabled at this point */ - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); affinity = desc->affinity; if (!irq_has_action(irq) || cpumask_equal(affinity, cpu_online_mask)) { - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); continue; } @@ -326,7 +326,7 @@ void fixup_irqs(void) if (!(desc->status & IRQ_MOVE_PCNTXT) && desc->chip->unmask) desc->chip->unmask(irq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); if (break_affinity && set_affinity) printk("Broke affinity for irq %i\n", irq); @@ -356,10 +356,10 @@ void fixup_irqs(void) irq = __get_cpu_var(vector_irq)[vector]; desc = irq_to_desc(irq); - spin_lock(&desc->lock); + raw_spin_lock(&desc->lock); if (desc->chip->retrigger) desc->chip->retrigger(irq); - spin_unlock(&desc->lock); + raw_spin_unlock(&desc->lock); } } } diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c index 3a7c5a4..676b8c7 100644 --- a/arch/x86/kernel/paravirt-spinlocks.c +++ b/arch/x86/kernel/paravirt-spinlocks.c @@ -8,9 +8,9 @@ #include <asm/paravirt.h> static inline void -default_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags) +default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { - __raw_spin_lock(lock); + arch_spin_lock(lock); } struct pv_lock_ops pv_lock_ops = { diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c index eed1568..0aa5fed8 100644 --- a/arch/x86/kernel/tsc_sync.c +++ b/arch/x86/kernel/tsc_sync.c @@ -33,7 +33,7 @@ static __cpuinitdata atomic_t stop_count; * we want to have the fastest, inlined, non-debug version * of a critical section, to be able to prove TSC time-warps: */ -static __cpuinitdata raw_spinlock_t sync_lock = __RAW_SPIN_LOCK_UNLOCKED; +static __cpuinitdata arch_spinlock_t sync_lock = __ARCH_SPIN_LOCK_UNLOCKED; static __cpuinitdata cycles_t last_tsc; static __cpuinitdata cycles_t max_warp; @@ -62,13 +62,13 @@ static __cpuinit void check_tsc_warp(void) * previous TSC that was measured (possibly on * another CPU) and update the previous TSC timestamp. */ - __raw_spin_lock(&sync_lock); + arch_spin_lock(&sync_lock); prev = last_tsc; rdtsc_barrier(); now = get_cycles(); rdtsc_barrier(); last_tsc = now; - __raw_spin_unlock(&sync_lock); + arch_spin_unlock(&sync_lock); /* * Be nice every now and then (and also check whether @@ -87,10 +87,10 @@ static __cpuinit void check_tsc_warp(void) * we saw a time-warp of the TSC going backwards: */ if (unlikely(prev > now)) { - __raw_spin_lock(&sync_lock); + arch_spin_lock(&sync_lock); max_warp = max(max_warp, prev - now); nr_warps++; - __raw_spin_unlock(&sync_lock); + arch_spin_unlock(&sync_lock); } } WARN(!(now-start), diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c index 36a5141..24ded31 100644 --- a/arch/x86/xen/spinlock.c +++ b/arch/x86/xen/spinlock.c @@ -120,14 +120,14 @@ struct xen_spinlock { unsigned short spinners; /* count of waiting cpus */ }; -static int xen_spin_is_locked(struct raw_spinlock *lock) +static int xen_spin_is_locked(struct arch_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; return xl->lock != 0; } -static int xen_spin_is_contended(struct raw_spinlock *lock) +static int xen_spin_is_contended(struct arch_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; @@ -136,7 +136,7 @@ static int xen_spin_is_contended(struct raw_spinlock *lock) return xl->spinners != 0; } -static int xen_spin_trylock(struct raw_spinlock *lock) +static int xen_spin_trylock(struct arch_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; u8 old = 1; @@ -181,7 +181,7 @@ static inline void unspinning_lock(struct xen_spinlock *xl, struct xen_spinlock __get_cpu_var(lock_spinners) = prev; } -static noinline int xen_spin_lock_slow(struct raw_spinlock *lock, bool irq_enable) +static noinline int xen_spin_lock_slow(struct arch_spinlock *lock, bool irq_enable) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; struct xen_spinlock *prev; @@ -254,7 +254,7 @@ out: return ret; } -static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable) +static inline void __xen_spin_lock(struct arch_spinlock *lock, bool irq_enable) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; unsigned timeout; @@ -291,12 +291,12 @@ static inline void __xen_spin_lock(struct raw_spinlock *lock, bool irq_enable) spin_time_accum_total(start_spin); } -static void xen_spin_lock(struct raw_spinlock *lock) +static void xen_spin_lock(struct arch_spinlock *lock) { __xen_spin_lock(lock, false); } -static void xen_spin_lock_flags(struct raw_spinlock *lock, unsigned long flags) +static void xen_spin_lock_flags(struct arch_spinlock *lock, unsigned long flags) { __xen_spin_lock(lock, !raw_irqs_disabled_flags(flags)); } @@ -317,7 +317,7 @@ static noinline void xen_spin_unlock_slow(struct xen_spinlock *xl) } } -static void xen_spin_unlock(struct raw_spinlock *lock) +static void xen_spin_unlock(struct arch_spinlock *lock) { struct xen_spinlock *xl = (struct xen_spinlock *)lock; diff --git a/arch/xtensa/kernel/irq.c b/arch/xtensa/kernel/irq.c index a1badb3..8cd3848 100644 --- a/arch/xtensa/kernel/irq.c +++ b/arch/xtensa/kernel/irq.c @@ -90,7 +90,7 @@ int show_interrupts(struct seq_file *p, void *v) } if (i < NR_IRQS) { - spin_lock_irqsave(&irq_desc[i].lock, flags); + raw_spin_lock_irqsave(&irq_desc[i].lock, flags); action = irq_desc[i].action; if (!action) goto skip; @@ -109,7 +109,7 @@ int show_interrupts(struct seq_file *p, void *v) seq_putc(p, '\n'); skip: - spin_unlock_irqrestore(&irq_desc[i].lock, flags); + raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); } else if (i == NR_IRQS) { seq_printf(p, "NMI: "); for_each_online_cpu(j) |