diff options
Diffstat (limited to 'include/asm-alpha')
-rw-r--r-- | include/asm-alpha/bitops.h | 55 | ||||
-rw-r--r-- | include/asm-alpha/tlbflush.h | 11 |
2 files changed, 55 insertions, 11 deletions
diff --git a/include/asm-alpha/bitops.h b/include/asm-alpha/bitops.h index 9e71201..9e19a70 100644 --- a/include/asm-alpha/bitops.h +++ b/include/asm-alpha/bitops.h @@ -1,7 +1,12 @@ #ifndef _ALPHA_BITOPS_H #define _ALPHA_BITOPS_H +#ifndef _LINUX_BITOPS_H +#error only <linux/bitops.h> can be included directly +#endif + #include <asm/compiler.h> +#include <asm/barrier.h> /* * Copyright 1994, Linus Torvalds. @@ -69,6 +74,13 @@ clear_bit(unsigned long nr, volatile void * addr) :"Ir" (1UL << (nr & 31)), "m" (*m)); } +static inline void +clear_bit_unlock(unsigned long nr, volatile void * addr) +{ + smp_mb(); + clear_bit(nr, addr); +} + /* * WARNING: non atomic version. */ @@ -81,6 +93,13 @@ __clear_bit(unsigned long nr, volatile void * addr) } static inline void +__clear_bit_unlock(unsigned long nr, volatile void * addr) +{ + smp_mb(); + __clear_bit(nr, addr); +} + +static inline void change_bit(unsigned long nr, volatile void * addr) { unsigned long temp; @@ -117,6 +136,36 @@ test_and_set_bit(unsigned long nr, volatile void *addr) int *m = ((int *) addr) + (nr >> 5); __asm__ __volatile__( +#ifdef CONFIG_SMP + " mb\n" +#endif + "1: ldl_l %0,%4\n" + " and %0,%3,%2\n" + " bne %2,2f\n" + " xor %0,%3,%0\n" + " stl_c %0,%1\n" + " beq %0,3f\n" + "2:\n" +#ifdef CONFIG_SMP + " mb\n" +#endif + ".subsection 2\n" + "3: br 1b\n" + ".previous" + :"=&r" (temp), "=m" (*m), "=&r" (oldbit) + :"Ir" (1UL << (nr & 31)), "m" (*m) : "memory"); + + return oldbit != 0; +} + +static inline int +test_and_set_bit_lock(unsigned long nr, volatile void *addr) +{ + unsigned long oldbit; + unsigned long temp; + int *m = ((int *) addr) + (nr >> 5); + + __asm__ __volatile__( "1: ldl_l %0,%4\n" " and %0,%3,%2\n" " bne %2,2f\n" @@ -158,6 +207,9 @@ test_and_clear_bit(unsigned long nr, volatile void * addr) int *m = ((int *) addr) + (nr >> 5); __asm__ __volatile__( +#ifdef CONFIG_SMP + " mb\n" +#endif "1: ldl_l %0,%4\n" " and %0,%3,%2\n" " beq %2,2f\n" @@ -199,6 +251,9 @@ test_and_change_bit(unsigned long nr, volatile void * addr) int *m = ((int *) addr) + (nr >> 5); __asm__ __volatile__( +#ifdef CONFIG_SMP + " mb\n" +#endif "1: ldl_l %0,%4\n" " and %0,%3,%2\n" " xor %0,%3,%0\n" diff --git a/include/asm-alpha/tlbflush.h b/include/asm-alpha/tlbflush.h index 1ca3ed3..eefab3f 100644 --- a/include/asm-alpha/tlbflush.h +++ b/include/asm-alpha/tlbflush.h @@ -92,17 +92,6 @@ flush_tlb_other(struct mm_struct *mm) if (*mmc) *mmc = 0; } -/* Flush a specified range of user mapping page tables from TLB. - Although Alpha uses VPTE caches, this can be a nop, as Alpha does - not have finegrained tlb flushing, so it will flush VPTE stuff - during next flush_tlb_range. */ - -static inline void -flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, - unsigned long end) -{ -} - #ifndef CONFIG_SMP /* Flush everything (kernel mapping may also have changed due to vmalloc/vfree). */ |