diff options
author | H. Nikolaus Schaller <hns@goldelico.com> | 2012-03-26 20:55:28 +0200 |
---|---|---|
committer | H. Nikolaus Schaller <hns@goldelico.com> | 2012-03-26 20:55:28 +0200 |
commit | 92988a21ad4c4c9504295ccb580c9f806134471b (patch) | |
tree | 5effc9f14170112450de05c67dafbe8d5034d595 /u-boot/arch/nios2/include/asm/bitops/atomic.h | |
parent | ca2b506783b676c95762c54ea24dcfdaae1947c9 (diff) | |
download | bootable_bootloader_goldelico_gta04-92988a21ad4c4c9504295ccb580c9f806134471b.zip bootable_bootloader_goldelico_gta04-92988a21ad4c4c9504295ccb580c9f806134471b.tar.gz bootable_bootloader_goldelico_gta04-92988a21ad4c4c9504295ccb580c9f806134471b.tar.bz2 |
added boot script files to repository
Diffstat (limited to 'u-boot/arch/nios2/include/asm/bitops/atomic.h')
-rw-r--r-- | u-boot/arch/nios2/include/asm/bitops/atomic.h | 189 |
1 files changed, 189 insertions, 0 deletions
diff --git a/u-boot/arch/nios2/include/asm/bitops/atomic.h b/u-boot/arch/nios2/include/asm/bitops/atomic.h new file mode 100644 index 0000000..c894646 --- /dev/null +++ b/u-boot/arch/nios2/include/asm/bitops/atomic.h @@ -0,0 +1,189 @@ +#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_ +#define _ASM_GENERIC_BITOPS_ATOMIC_H_ + +#include <asm/types.h> +#include <asm/system.h> + +#ifdef CONFIG_SMP +#include <asm/spinlock.h> +#include <asm/cache.h> /* we use L1_CACHE_BYTES */ + +/* Use an array of spinlocks for our atomic_ts. + * Hash function to index into a different SPINLOCK. + * Since "a" is usually an address, use one spinlock per cacheline. + */ +# define ATOMIC_HASH_SIZE 4 +# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ])) + +extern raw_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; + +/* Can't use raw_spin_lock_irq because of #include problems, so + * this is the substitute */ +#define _atomic_spin_lock_irqsave(l,f) do { \ + raw_spinlock_t *s = ATOMIC_HASH(l); \ + local_irq_save(f); \ + __raw_spin_lock(s); \ +} while(0) + +#define _atomic_spin_unlock_irqrestore(l,f) do { \ + raw_spinlock_t *s = ATOMIC_HASH(l); \ + __raw_spin_unlock(s); \ + local_irq_restore(f); \ +} while(0) + + +#else +# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0) +# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0) +#endif + +/* + * NMI events can occur at any time, including when interrupts have been + * disabled by *_irqsave(). So you can get NMI events occurring while a + * *_bit function is holding a spin lock. If the NMI handler also wants + * to do bit manipulation (and they do) then you can get a deadlock + * between the original caller of *_bit() and the NMI handler. + * + * by Keith Owens + */ + +/** + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * This function is atomic and may not be reordered. See __set_bit() + * if you do not require the atomic guarantees. + * + * Note: there are no guarantees that this function will not be reordered + * on non x86 architectures, so if you are writing portable code, + * make sure not to rely on its reordering guarantees. + * + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long flags; + + _atomic_spin_lock_irqsave(p, flags); + *p |= mask; + _atomic_spin_unlock_irqrestore(p, flags); +} + +/** + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * clear_bit() is atomic and may not be reordered. However, it does + * not contain a memory barrier, so if it is used for locking purposes, + * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * in order to ensure changes are visible on other processors. + */ +static inline void clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long flags; + + _atomic_spin_lock_irqsave(p, flags); + *p &= ~mask; + _atomic_spin_unlock_irqrestore(p, flags); +} + +/** + * change_bit - Toggle a bit in memory + * @nr: Bit to change + * @addr: Address to start counting from + * + * change_bit() is atomic and may not be reordered. It may be + * reordered on other architectures than x86. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void change_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long flags; + + _atomic_spin_lock_irqsave(p, flags); + *p ^= mask; + _atomic_spin_unlock_irqrestore(p, flags); +} + +/** + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It may be reordered on other architectures than x86. + * It also implies a memory barrier. + */ +static inline int test_and_set_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old | mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} + +/** + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It can be reorderdered on other architectures other than x86. + * It also implies a memory barrier. + */ +static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old & ~mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} + +/** + * test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static inline int test_and_change_bit(int nr, volatile unsigned long *addr) +{ + unsigned long mask = BIT_MASK(nr); + unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); + unsigned long old; + unsigned long flags; + + _atomic_spin_lock_irqsave(p, flags); + old = *p; + *p = old ^ mask; + _atomic_spin_unlock_irqrestore(p, flags); + + return (old & mask) != 0; +} + +#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */ |