diff options
author | Tony Luck <tony.luck@intel.com> | 2010-08-13 16:41:07 -0700 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2010-08-13 16:41:07 -0700 |
commit | 01d69a82e1d3c938da16bf55aab000672243aa24 (patch) | |
tree | 9d9763c1a71e29111cab48632aa8c177a54e63c9 /arch | |
parent | ad41a1e0cab07c5125456e8d38e5b1ab148d04aa (diff) | |
download | kernel_goldelico_gta04-01d69a82e1d3c938da16bf55aab000672243aa24.zip kernel_goldelico_gta04-01d69a82e1d3c938da16bf55aab000672243aa24.tar.gz kernel_goldelico_gta04-01d69a82e1d3c938da16bf55aab000672243aa24.tar.bz2 |
[IA64] Fix 64-bit atomic routines to return "long"
These have been broken (returning "int") since the dawn of
time. But there were no users that needed the whole value
until commit
424acaaeb3a3932d64a9b4bd59df6cf72c22d8f3
rwsem: wake queued readers when writer blocks on active read lock
made this change:
- (rwsem_atomic_update(0, sem) & RWSEM_ACTIVE_MASK))
- /* Someone grabbed the sem already */
+ rwsem_atomic_update(0, sem) < RWSEM_WAITING_BIAS)
+ /* Someone grabbed the sem for write already */
RWSEM_ACTIVE_MASK is 0xffffffffL, so the old code only looked
at the low order 32-bits. The new code needs to see all 64 bits.
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/ia64/include/asm/atomic.h | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h index 4e19484..4468814 100644 --- a/arch/ia64/include/asm/atomic.h +++ b/arch/ia64/include/asm/atomic.h @@ -41,7 +41,7 @@ ia64_atomic_add (int i, atomic_t *v) return new; } -static __inline__ int +static __inline__ long ia64_atomic64_add (__s64 i, atomic64_t *v) { __s64 old, new; @@ -69,7 +69,7 @@ ia64_atomic_sub (int i, atomic_t *v) return new; } -static __inline__ int +static __inline__ long ia64_atomic64_sub (__s64 i, atomic64_t *v) { __s64 old, new; @@ -107,7 +107,7 @@ static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) -static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +static __inline__ long atomic64_add_unless(atomic64_t *v, long a, long u) { long c, old; c = atomic64_read(v); @@ -158,7 +158,7 @@ atomic_add_negative (int i, atomic_t *v) return atomic_add_return(i, v) < 0; } -static __inline__ int +static __inline__ long atomic64_add_negative (__s64 i, atomic64_t *v) { return atomic64_add_return(i, v) < 0; |