diff options
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r-- | include/asm-sparc64/atomic.h | 8 | ||||
-rw-r--r-- | include/asm-sparc64/bitops.h | 60 | ||||
-rw-r--r-- | include/asm-sparc64/cpudata.h | 4 | ||||
-rw-r--r-- | include/asm-sparc64/emergency-restart.h | 6 | ||||
-rw-r--r-- | include/asm-sparc64/hardirq.h | 16 | ||||
-rw-r--r-- | include/asm-sparc64/io.h | 47 | ||||
-rw-r--r-- | include/asm-sparc64/page.h | 16 | ||||
-rw-r--r-- | include/asm-sparc64/pci.h | 2 | ||||
-rw-r--r-- | include/asm-sparc64/pgtable.h | 3 | ||||
-rw-r--r-- | include/asm-sparc64/processor.h | 1 | ||||
-rw-r--r-- | include/asm-sparc64/ptrace.h | 5 | ||||
-rw-r--r-- | include/asm-sparc64/rwsem.h | 48 | ||||
-rw-r--r-- | include/asm-sparc64/segment.h | 6 | ||||
-rw-r--r-- | include/asm-sparc64/sfafsr.h | 82 | ||||
-rw-r--r-- | include/asm-sparc64/socket.h | 2 | ||||
-rw-r--r-- | include/asm-sparc64/spinlock.h | 42 | ||||
-rw-r--r-- | include/asm-sparc64/spitfire.h | 130 | ||||
-rw-r--r-- | include/asm-sparc64/system.h | 28 | ||||
-rw-r--r-- | include/asm-sparc64/thread_info.h | 17 | ||||
-rw-r--r-- | include/asm-sparc64/timer.h | 41 | ||||
-rw-r--r-- | include/asm-sparc64/types.h | 2 | ||||
-rw-r--r-- | include/asm-sparc64/unistd.h | 6 |
22 files changed, 231 insertions, 341 deletions
diff --git a/include/asm-sparc64/atomic.h b/include/asm-sparc64/atomic.h index d80f3379..e175afc 100644 --- a/include/asm-sparc64/atomic.h +++ b/include/asm-sparc64/atomic.h @@ -72,10 +72,10 @@ extern int atomic64_sub_ret(int, atomic64_t *); /* Atomic operations are already serializing */ #ifdef CONFIG_SMP -#define smp_mb__before_atomic_dec() membar("#StoreLoad | #LoadLoad") -#define smp_mb__after_atomic_dec() membar("#StoreLoad | #StoreStore") -#define smp_mb__before_atomic_inc() membar("#StoreLoad | #LoadLoad") -#define smp_mb__after_atomic_inc() membar("#StoreLoad | #StoreStore") +#define smp_mb__before_atomic_dec() membar_storeload_loadload(); +#define smp_mb__after_atomic_dec() membar_storeload_storestore(); +#define smp_mb__before_atomic_inc() membar_storeload_loadload(); +#define smp_mb__after_atomic_inc() membar_storeload_storestore(); #else #define smp_mb__before_atomic_dec() barrier() #define smp_mb__after_atomic_dec() barrier() diff --git a/include/asm-sparc64/bitops.h b/include/asm-sparc64/bitops.h index 9d722dc..6388b83 100644 --- a/include/asm-sparc64/bitops.h +++ b/include/asm-sparc64/bitops.h @@ -20,72 +20,72 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr); /* "non-atomic" versions... */ -static __inline__ void __set_bit(int nr, volatile unsigned long *addr) +static inline void __set_bit(int nr, volatile unsigned long *addr) { - volatile unsigned long *m = addr + (nr >> 6); + unsigned long *m = ((unsigned long *)addr) + (nr >> 6); *m |= (1UL << (nr & 63)); } -static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) +static inline void __clear_bit(int nr, volatile unsigned long *addr) { - volatile unsigned long *m = addr + (nr >> 6); + unsigned long *m = ((unsigned long *)addr) + (nr >> 6); *m &= ~(1UL << (nr & 63)); } -static __inline__ void __change_bit(int nr, volatile unsigned long *addr) +static inline void __change_bit(int nr, volatile unsigned long *addr) { - volatile unsigned long *m = addr + (nr >> 6); + unsigned long *m = ((unsigned long *)addr) + (nr >> 6); *m ^= (1UL << (nr & 63)); } -static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr) +static inline int __test_and_set_bit(int nr, volatile unsigned long *addr) { - volatile unsigned long *m = addr + (nr >> 6); - long old = *m; - long mask = (1UL << (nr & 63)); + unsigned long *m = ((unsigned long *)addr) + (nr >> 6); + unsigned long old = *m; + unsigned long mask = (1UL << (nr & 63)); *m = (old | mask); return ((old & mask) != 0); } -static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr) +static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr) { - volatile unsigned long *m = addr + (nr >> 6); - long old = *m; - long mask = (1UL << (nr & 63)); + unsigned long *m = ((unsigned long *)addr) + (nr >> 6); + unsigned long old = *m; + unsigned long mask = (1UL << (nr & 63)); *m = (old & ~mask); return ((old & mask) != 0); } -static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr) +static inline int __test_and_change_bit(int nr, volatile unsigned long *addr) { - volatile unsigned long *m = addr + (nr >> 6); - long old = *m; - long mask = (1UL << (nr & 63)); + unsigned long *m = ((unsigned long *)addr) + (nr >> 6); + unsigned long old = *m; + unsigned long mask = (1UL << (nr & 63)); *m = (old ^ mask); return ((old & mask) != 0); } #ifdef CONFIG_SMP -#define smp_mb__before_clear_bit() membar("#StoreLoad | #LoadLoad") -#define smp_mb__after_clear_bit() membar("#StoreLoad | #StoreStore") +#define smp_mb__before_clear_bit() membar_storeload_loadload() +#define smp_mb__after_clear_bit() membar_storeload_storestore() #else #define smp_mb__before_clear_bit() barrier() #define smp_mb__after_clear_bit() barrier() #endif -static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr) +static inline int test_bit(int nr, __const__ volatile unsigned long *addr) { - return (1UL & ((addr)[nr >> 6] >> (nr & 63))) != 0UL; + return (1UL & (addr[nr >> 6] >> (nr & 63))) != 0UL; } /* The easy/cheese version for now. */ -static __inline__ unsigned long ffz(unsigned long word) +static inline unsigned long ffz(unsigned long word) { unsigned long result; @@ -103,7 +103,7 @@ static __inline__ unsigned long ffz(unsigned long word) * * Undefined if no bit exists, so code should check against 0 first. */ -static __inline__ unsigned long __ffs(unsigned long word) +static inline unsigned long __ffs(unsigned long word) { unsigned long result = 0; @@ -144,7 +144,7 @@ static inline int sched_find_first_bit(unsigned long *b) * the libc and compiler builtin ffs routines, therefore * differs in spirit from the above ffz (man ffs). */ -static __inline__ int ffs(int x) +static inline int ffs(int x) { if (!x) return 0; @@ -158,7 +158,7 @@ static __inline__ int ffs(int x) #ifdef ULTRA_HAS_POPULATION_COUNT -static __inline__ unsigned int hweight64(unsigned long w) +static inline unsigned int hweight64(unsigned long w) { unsigned int res; @@ -166,7 +166,7 @@ static __inline__ unsigned int hweight64(unsigned long w) return res; } -static __inline__ unsigned int hweight32(unsigned int w) +static inline unsigned int hweight32(unsigned int w) { unsigned int res; @@ -174,7 +174,7 @@ static __inline__ unsigned int hweight32(unsigned int w) return res; } -static __inline__ unsigned int hweight16(unsigned int w) +static inline unsigned int hweight16(unsigned int w) { unsigned int res; @@ -182,7 +182,7 @@ static __inline__ unsigned int hweight16(unsigned int w) return res; } -static __inline__ unsigned int hweight8(unsigned int w) +static inline unsigned int hweight8(unsigned int w) { unsigned int res; @@ -236,7 +236,7 @@ extern unsigned long find_next_zero_bit(const unsigned long *, #define test_and_clear_le_bit(nr,addr) \ test_and_clear_bit((nr) ^ 0x38, (addr)) -static __inline__ int test_le_bit(int nr, __const__ unsigned long * addr) +static inline int test_le_bit(int nr, __const__ unsigned long * addr) { int mask; __const__ unsigned char *ADDR = (__const__ unsigned char *) addr; diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h index cc7198a..9a3a81f 100644 --- a/include/asm-sparc64/cpudata.h +++ b/include/asm-sparc64/cpudata.h @@ -1,6 +1,6 @@ /* cpudata.h: Per-cpu parameters. * - * Copyright (C) 2003 David S. Miller (davem@redhat.com) + * Copyright (C) 2003, 2005 David S. Miller (davem@redhat.com) */ #ifndef _SPARC64_CPUDATA_H @@ -10,7 +10,7 @@ typedef struct { /* Dcache line 1 */ - unsigned int __pad0; /* bh_count moved to irq_stat for consistency. KAO */ + unsigned int __softirq_pending; /* must be 1st, see rtrap.S */ unsigned int multiplier; unsigned int counter; unsigned int idle_volume; diff --git a/include/asm-sparc64/emergency-restart.h b/include/asm-sparc64/emergency-restart.h new file mode 100644 index 0000000..108d8c4 --- /dev/null +++ b/include/asm-sparc64/emergency-restart.h @@ -0,0 +1,6 @@ +#ifndef _ASM_EMERGENCY_RESTART_H +#define _ASM_EMERGENCY_RESTART_H + +#include <asm-generic/emergency-restart.h> + +#endif /* _ASM_EMERGENCY_RESTART_H */ diff --git a/include/asm-sparc64/hardirq.h b/include/asm-sparc64/hardirq.h index d6db1ae..f0cf713 100644 --- a/include/asm-sparc64/hardirq.h +++ b/include/asm-sparc64/hardirq.h @@ -1,22 +1,16 @@ /* hardirq.h: 64-bit Sparc hard IRQ support. * - * Copyright (C) 1997, 1998 David S. Miller (davem@caip.rutgers.edu) + * Copyright (C) 1997, 1998, 2005 David S. Miller (davem@davemloft.net) */ #ifndef __SPARC64_HARDIRQ_H #define __SPARC64_HARDIRQ_H -#include <linux/config.h> -#include <linux/threads.h> -#include <linux/spinlock.h> -#include <linux/cache.h> +#include <asm/cpudata.h> -/* rtrap.S is sensitive to the offsets of these fields */ -typedef struct { - unsigned int __softirq_pending; -} ____cacheline_aligned irq_cpustat_t; - -#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */ +#define __ARCH_IRQ_STAT +#define local_softirq_pending() \ + (local_cpu_data().__softirq_pending) #define HARDIRQ_BITS 8 diff --git a/include/asm-sparc64/io.h b/include/asm-sparc64/io.h index afdcea9..0056770 100644 --- a/include/asm-sparc64/io.h +++ b/include/asm-sparc64/io.h @@ -100,18 +100,41 @@ static __inline__ void _outl(u32 l, unsigned long addr) #define inl_p(__addr) inl(__addr) #define outl_p(__l, __addr) outl(__l, __addr) -extern void outsb(void __iomem *addr, const void *src, unsigned long count); -extern void outsw(void __iomem *addr, const void *src, unsigned long count); -extern void outsl(void __iomem *addr, const void *src, unsigned long count); -extern void insb(void __iomem *addr, void *dst, unsigned long count); -extern void insw(void __iomem *addr, void *dst, unsigned long count); -extern void insl(void __iomem *addr, void *dst, unsigned long count); -#define ioread8_rep(a,d,c) insb(a,d,c) -#define ioread16_rep(a,d,c) insw(a,d,c) -#define ioread32_rep(a,d,c) insl(a,d,c) -#define iowrite8_rep(a,s,c) outsb(a,s,c) -#define iowrite16_rep(a,s,c) outsw(a,s,c) -#define iowrite32_rep(a,s,c) outsl(a,s,c) +extern void outsb(unsigned long, const void *, unsigned long); +extern void outsw(unsigned long, const void *, unsigned long); +extern void outsl(unsigned long, const void *, unsigned long); +extern void insb(unsigned long, void *, unsigned long); +extern void insw(unsigned long, void *, unsigned long); +extern void insl(unsigned long, void *, unsigned long); + +static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count) +{ + insb((unsigned long __force)port, buf, count); +} +static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count) +{ + insw((unsigned long __force)port, buf, count); +} + +static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count) +{ + insl((unsigned long __force)port, buf, count); +} + +static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count) +{ + outsb((unsigned long __force)port, buf, count); +} + +static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count) +{ + outsw((unsigned long __force)port, buf, count); +} + +static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count) +{ + outsl((unsigned long __force)port, buf, count); +} /* Memory functions, same as I/O accesses on Ultra. */ static inline u8 _readb(const volatile void __iomem *addr) diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h index b87dbbd..c9f8ef2 100644 --- a/include/asm-sparc64/page.h +++ b/include/asm-sparc64/page.h @@ -150,20 +150,6 @@ struct sparc_phys_banks { extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; -/* Pure 2^n version of get_order */ -static __inline__ int get_order(unsigned long size) -{ - int order; - - size = (size-1) >> (PAGE_SHIFT-1); - order = -1; - do { - size >>= 1; - order++; - } while (size); - return order; -} - #endif /* !(__ASSEMBLY__) */ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ @@ -171,4 +157,6 @@ static __inline__ int get_order(unsigned long size) #endif /* !(__KERNEL__) */ +#include <asm-generic/page.h> + #endif /* !(_SPARC64_PAGE_H) */ diff --git a/include/asm-sparc64/pci.h b/include/asm-sparc64/pci.h index 84e41c1..a4ab0ec 100644 --- a/include/asm-sparc64/pci.h +++ b/include/asm-sparc64/pci.h @@ -23,7 +23,7 @@ static inline void pcibios_set_master(struct pci_dev *dev) /* No special bus mastering setup handling */ } -static inline void pcibios_penalize_isa_irq(int irq) +static inline void pcibios_penalize_isa_irq(int irq, int active) { /* We don't do dynamic PCI IRQ allocation */ } diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 1ae00c5..a2b4f5e 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -410,9 +410,6 @@ extern unsigned long *sparc64_valid_addr_bitmap; #define kern_addr_valid(addr) \ (test_bit(__pa((unsigned long)(addr))>>22, sparc64_valid_addr_bitmap)) -extern int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, - unsigned long offset, - unsigned long size, pgprot_t prot, int space); extern int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot); diff --git a/include/asm-sparc64/processor.h b/include/asm-sparc64/processor.h index d0bee24..3169f3e 100644 --- a/include/asm-sparc64/processor.h +++ b/include/asm-sparc64/processor.h @@ -18,7 +18,6 @@ #include <asm/a.out.h> #include <asm/pstate.h> #include <asm/ptrace.h> -#include <asm/segment.h> #include <asm/page.h> /* The sparc has no problems with write protection */ diff --git a/include/asm-sparc64/ptrace.h b/include/asm-sparc64/ptrace.h index 2d2b5a1..6194f77 100644 --- a/include/asm-sparc64/ptrace.h +++ b/include/asm-sparc64/ptrace.h @@ -94,8 +94,9 @@ struct sparc_trapf { #define STACKFRAME32_SZ sizeof(struct sparc_stackf32) #ifdef __KERNEL__ -#define force_successful_syscall_return() \ - set_thread_flag(TIF_SYSCALL_SUCCESS) +#define force_successful_syscall_return() \ +do { current_thread_info()->syscall_noerror = 1; \ +} while (0) #define user_mode(regs) (!((regs)->tstate & TSTATE_PRIV)) #define instruction_pointer(regs) ((regs)->tpc) #ifdef CONFIG_SMP diff --git a/include/asm-sparc64/rwsem.h b/include/asm-sparc64/rwsem.h index a1cc94f..4568ee4 100644 --- a/include/asm-sparc64/rwsem.h +++ b/include/asm-sparc64/rwsem.h @@ -46,54 +46,14 @@ extern void __up_read(struct rw_semaphore *sem); extern void __up_write(struct rw_semaphore *sem); extern void __downgrade_write(struct rw_semaphore *sem); -static __inline__ int rwsem_atomic_update(int delta, struct rw_semaphore *sem) +static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) { - int tmp = delta; - - __asm__ __volatile__( - "1:\tlduw [%2], %%g1\n\t" - "add %%g1, %1, %%g7\n\t" - "cas [%2], %%g1, %%g7\n\t" - "cmp %%g1, %%g7\n\t" - "membar #StoreLoad | #StoreStore\n\t" - "bne,pn %%icc, 1b\n\t" - " nop\n\t" - "mov %%g7, %0\n\t" - : "=&r" (tmp) - : "0" (tmp), "r" (sem) - : "g1", "g7", "memory", "cc"); - - return tmp + delta; -} - -#define rwsem_atomic_add rwsem_atomic_update - -static __inline__ __u16 rwsem_cmpxchgw(struct rw_semaphore *sem, __u16 __old, __u16 __new) -{ - u32 old = (sem->count & 0xffff0000) | (u32) __old; - u32 new = (old & 0xffff0000) | (u32) __new; - u32 prev; - -again: - __asm__ __volatile__("cas [%2], %3, %0\n\t" - "membar #StoreLoad | #StoreStore" - : "=&r" (prev) - : "0" (new), "r" (sem), "r" (old) - : "memory"); - - /* To give the same semantics as x86 cmpxchgw, keep trying - * if only the upper 16-bits changed. - */ - if (prev != old && - ((prev & 0xffff) == (old & 0xffff))) - goto again; - - return prev & 0xffff; + return atomic_add_return(delta, (atomic_t *)(&sem->count)); } -static __inline__ signed long rwsem_cmpxchg(struct rw_semaphore *sem, signed long old, signed long new) +static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) { - return cmpxchg(&sem->count,old,new); + atomic_add(delta, (atomic_t *)(&sem->count)); } #endif /* __KERNEL__ */ diff --git a/include/asm-sparc64/segment.h b/include/asm-sparc64/segment.h deleted file mode 100644 index b03e709..0000000 --- a/include/asm-sparc64/segment.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __SPARC64_SEGMENT_H -#define __SPARC64_SEGMENT_H - -/* Only here because we have some old header files that expect it.. */ - -#endif diff --git a/include/asm-sparc64/sfafsr.h b/include/asm-sparc64/sfafsr.h new file mode 100644 index 0000000..2f792c2 --- /dev/null +++ b/include/asm-sparc64/sfafsr.h @@ -0,0 +1,82 @@ +#ifndef _SPARC64_SFAFSR_H +#define _SPARC64_SFAFSR_H + +#include <asm/const.h> + +/* Spitfire Asynchronous Fault Status register, ASI=0x4C VA<63:0>=0x0 */ + +#define SFAFSR_ME (_AC(1,UL) << SFAFSR_ME_SHIFT) +#define SFAFSR_ME_SHIFT 32 +#define SFAFSR_PRIV (_AC(1,UL) << SFAFSR_PRIV_SHIFT) +#define SFAFSR_PRIV_SHIFT 31 +#define SFAFSR_ISAP (_AC(1,UL) << SFAFSR_ISAP_SHIFT) +#define SFAFSR_ISAP_SHIFT 30 +#define SFAFSR_ETP (_AC(1,UL) << SFAFSR_ETP_SHIFT) +#define SFAFSR_ETP_SHIFT 29 +#define SFAFSR_IVUE (_AC(1,UL) << SFAFSR_IVUE_SHIFT) +#define SFAFSR_IVUE_SHIFT 28 +#define SFAFSR_TO (_AC(1,UL) << SFAFSR_TO_SHIFT) +#define SFAFSR_TO_SHIFT 27 +#define SFAFSR_BERR (_AC(1,UL) << SFAFSR_BERR_SHIFT) +#define SFAFSR_BERR_SHIFT 26 +#define SFAFSR_LDP (_AC(1,UL) << SFAFSR_LDP_SHIFT) +#define SFAFSR_LDP_SHIFT 25 +#define SFAFSR_CP (_AC(1,UL) << SFAFSR_CP_SHIFT) +#define SFAFSR_CP_SHIFT 24 +#define SFAFSR_WP (_AC(1,UL) << SFAFSR_WP_SHIFT) +#define SFAFSR_WP_SHIFT 23 +#define SFAFSR_EDP (_AC(1,UL) << SFAFSR_EDP_SHIFT) +#define SFAFSR_EDP_SHIFT 22 +#define SFAFSR_UE (_AC(1,UL) << SFAFSR_UE_SHIFT) +#define SFAFSR_UE_SHIFT 21 +#define SFAFSR_CE (_AC(1,UL) << SFAFSR_CE_SHIFT) +#define SFAFSR_CE_SHIFT 20 +#define SFAFSR_ETS (_AC(0xf,UL) << SFAFSR_ETS_SHIFT) +#define SFAFSR_ETS_SHIFT 16 +#define SFAFSR_PSYND (_AC(0xffff,UL) << SFAFSR_PSYND_SHIFT) +#define SFAFSR_PSYND_SHIFT 0 + +/* UDB Error Register, ASI=0x7f VA<63:0>=0x0(High),0x18(Low) for read + * ASI=0x77 VA<63:0>=0x0(High),0x18(Low) for write + */ + +#define UDBE_UE (_AC(1,UL) << 9) +#define UDBE_CE (_AC(1,UL) << 8) +#define UDBE_E_SYNDR (_AC(0xff,UL) << 0) + +/* The trap handlers for asynchronous errors encode the AFSR and + * other pieces of information into a 64-bit argument for C code + * encoded as follows: + * + * ----------------------------------------------- + * | UDB_H | UDB_L | TL>1 | TT | AFSR | + * ----------------------------------------------- + * 63 54 53 44 42 41 33 32 0 + * + * The AFAR is passed in unchanged. + */ +#define SFSTAT_UDBH_MASK (_AC(0x3ff,UL) << SFSTAT_UDBH_SHIFT) +#define SFSTAT_UDBH_SHIFT 54 +#define SFSTAT_UDBL_MASK (_AC(0x3ff,UL) << SFSTAT_UDBH_SHIFT) +#define SFSTAT_UDBL_SHIFT 44 +#define SFSTAT_TL_GT_ONE (_AC(1,UL) << SFSTAT_TL_GT_ONE_SHIFT) +#define SFSTAT_TL_GT_ONE_SHIFT 42 +#define SFSTAT_TRAP_TYPE (_AC(0x1FF,UL) << SFSTAT_TRAP_TYPE_SHIFT) +#define SFSTAT_TRAP_TYPE_SHIFT 33 +#define SFSTAT_AFSR_MASK (_AC(0x1ffffffff,UL) << SFSTAT_AFSR_SHIFT) +#define SFSTAT_AFSR_SHIFT 0 + +/* ESTATE Error Enable Register, ASI=0x4b VA<63:0>=0x0 */ +#define ESTATE_ERR_CE 0x1 /* Correctable errors */ +#define ESTATE_ERR_NCE 0x2 /* TO, BERR, LDP, ETP, EDP, WP, UE, IVUE */ +#define ESTATE_ERR_ISAP 0x4 /* System address parity error */ +#define ESTATE_ERR_ALL (ESTATE_ERR_CE | \ + ESTATE_ERR_NCE | \ + ESTATE_ERR_ISAP) + +/* The various trap types that report using the above state. */ +#define TRAP_TYPE_IAE 0x09 /* Instruction Access Error */ +#define TRAP_TYPE_DAE 0x32 /* Data Access Error */ +#define TRAP_TYPE_CEE 0x63 /* Correctable ECC Error */ + +#endif /* _SPARC64_SFAFSR_H */ diff --git a/include/asm-sparc64/socket.h b/include/asm-sparc64/socket.h index 865547a..59987da 100644 --- a/include/asm-sparc64/socket.h +++ b/include/asm-sparc64/socket.h @@ -29,6 +29,8 @@ #define SO_SNDBUF 0x1001 #define SO_RCVBUF 0x1002 +#define SO_SNDBUFFORCE 0x100a +#define SO_RCVBUFFORCE 0x100b #define SO_ERROR 0x1007 #define SO_TYPE 0x1008 diff --git a/include/asm-sparc64/spinlock.h b/include/asm-sparc64/spinlock.h index 9cb93a5..a02c437 100644 --- a/include/asm-sparc64/spinlock.h +++ b/include/asm-sparc64/spinlock.h @@ -43,7 +43,7 @@ typedef struct { #define spin_is_locked(lp) ((lp)->lock != 0) #define spin_unlock_wait(lp) \ -do { membar("#LoadLoad"); \ +do { rmb(); \ } while((lp)->lock) static inline void _raw_spin_lock(spinlock_t *lock) @@ -129,15 +129,18 @@ typedef struct { #define spin_is_locked(__lock) ((__lock)->lock != 0) #define spin_unlock_wait(__lock) \ do { \ - membar("#LoadLoad"); \ + rmb(); \ } while((__lock)->lock) -extern void _do_spin_lock (spinlock_t *lock, char *str); -extern void _do_spin_unlock (spinlock_t *lock); -extern int _do_spin_trylock (spinlock_t *lock); +extern void _do_spin_lock(spinlock_t *lock, char *str, unsigned long caller); +extern void _do_spin_unlock(spinlock_t *lock); +extern int _do_spin_trylock(spinlock_t *lock, unsigned long caller); -#define _raw_spin_trylock(lp) _do_spin_trylock(lp) -#define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock") +#define _raw_spin_trylock(lp) \ + _do_spin_trylock(lp, (unsigned long) __builtin_return_address(0)) +#define _raw_spin_lock(lock) \ + _do_spin_lock(lock, "spin_lock", \ + (unsigned long) __builtin_return_address(0)) #define _raw_spin_unlock(lock) _do_spin_unlock(lock) #define _raw_spin_lock_flags(lock, flags) _raw_spin_lock(lock) @@ -279,37 +282,41 @@ typedef struct { #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0, 0xff, { } } #define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while(0) -extern void _do_read_lock(rwlock_t *rw, char *str); -extern void _do_read_unlock(rwlock_t *rw, char *str); -extern void _do_write_lock(rwlock_t *rw, char *str); -extern void _do_write_unlock(rwlock_t *rw); -extern int _do_write_trylock(rwlock_t *rw, char *str); +extern void _do_read_lock(rwlock_t *rw, char *str, unsigned long caller); +extern void _do_read_unlock(rwlock_t *rw, char *str, unsigned long caller); +extern void _do_write_lock(rwlock_t *rw, char *str, unsigned long caller); +extern void _do_write_unlock(rwlock_t *rw, unsigned long caller); +extern int _do_write_trylock(rwlock_t *rw, char *str, unsigned long caller); #define _raw_read_lock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ - _do_read_lock(lock, "read_lock"); \ + _do_read_lock(lock, "read_lock", \ + (unsigned long) __builtin_return_address(0)); \ local_irq_restore(flags); \ } while(0) #define _raw_read_unlock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ - _do_read_unlock(lock, "read_unlock"); \ + _do_read_unlock(lock, "read_unlock", \ + (unsigned long) __builtin_return_address(0)); \ local_irq_restore(flags); \ } while(0) #define _raw_write_lock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ - _do_write_lock(lock, "write_lock"); \ + _do_write_lock(lock, "write_lock", \ + (unsigned long) __builtin_return_address(0)); \ local_irq_restore(flags); \ } while(0) #define _raw_write_unlock(lock) \ do { unsigned long flags; \ local_irq_save(flags); \ - _do_write_unlock(lock); \ + _do_write_unlock(lock, \ + (unsigned long) __builtin_return_address(0)); \ local_irq_restore(flags); \ } while(0) @@ -317,7 +324,8 @@ do { unsigned long flags; \ ({ unsigned long flags; \ int val; \ local_irq_save(flags); \ - val = _do_write_trylock(lock, "write_trylock"); \ + val = _do_write_trylock(lock, "write_trylock", \ + (unsigned long) __builtin_return_address(0)); \ local_irq_restore(flags); \ val; \ }) diff --git a/include/asm-sparc64/spitfire.h b/include/asm-sparc64/spitfire.h index 1aa9327..962638c 100644 --- a/include/asm-sparc64/spitfire.h +++ b/include/asm-sparc64/spitfire.h @@ -56,52 +56,6 @@ extern void cheetah_enable_pcache(void); SPITFIRE_HIGHEST_LOCKED_TLBENT : \ CHEETAH_HIGHEST_LOCKED_TLBENT) -static __inline__ unsigned long spitfire_get_isfsr(void) -{ - unsigned long ret; - - __asm__ __volatile__("ldxa [%1] %2, %0" - : "=r" (ret) - : "r" (TLB_SFSR), "i" (ASI_IMMU)); - return ret; -} - -static __inline__ unsigned long spitfire_get_dsfsr(void) -{ - unsigned long ret; - - __asm__ __volatile__("ldxa [%1] %2, %0" - : "=r" (ret) - : "r" (TLB_SFSR), "i" (ASI_DMMU)); - return ret; -} - -static __inline__ unsigned long spitfire_get_sfar(void) -{ - unsigned long ret; - - __asm__ __volatile__("ldxa [%1] %2, %0" - : "=r" (ret) - : "r" (DMMU_SFAR), "i" (ASI_DMMU)); - return ret; -} - -static __inline__ void spitfire_put_isfsr(unsigned long sfsr) -{ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_IMMU)); -} - -static __inline__ void spitfire_put_dsfsr(unsigned long sfsr) -{ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_DMMU)); -} - /* The data cache is write through, so this just invalidates the * specified line. */ @@ -193,90 +147,6 @@ static __inline__ void spitfire_put_itlb_data(int entry, unsigned long data) "i" (ASI_ITLB_DATA_ACCESS)); } -/* Spitfire hardware assisted TLB flushes. */ - -/* Context level flushes. */ -static __inline__ void spitfire_flush_dtlb_primary_context(void) -{ - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* No outputs */ - : "r" (0x40), "i" (ASI_DMMU_DEMAP)); -} - -static __inline__ void spitfire_flush_itlb_primary_context(void) -{ - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* No outputs */ - : "r" (0x40), "i" (ASI_IMMU_DEMAP)); -} - -static __inline__ void spitfire_flush_dtlb_secondary_context(void) -{ - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* No outputs */ - : "r" (0x50), "i" (ASI_DMMU_DEMAP)); -} - -static __inline__ void spitfire_flush_itlb_secondary_context(void) -{ - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* No outputs */ - : "r" (0x50), "i" (ASI_IMMU_DEMAP)); -} - -static __inline__ void spitfire_flush_dtlb_nucleus_context(void) -{ - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* No outputs */ - : "r" (0x60), "i" (ASI_DMMU_DEMAP)); -} - -static __inline__ void spitfire_flush_itlb_nucleus_context(void) -{ - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* No outputs */ - : "r" (0x60), "i" (ASI_IMMU_DEMAP)); -} - -/* Page level flushes. */ -static __inline__ void spitfire_flush_dtlb_primary_page(unsigned long page) -{ - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* No outputs */ - : "r" (page), "i" (ASI_DMMU_DEMAP)); -} - -static __inline__ void spitfire_flush_itlb_primary_page(unsigned long page) -{ - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* No outputs */ - : "r" (page), "i" (ASI_IMMU_DEMAP)); -} - -static __inline__ void spitfire_flush_dtlb_secondary_page(unsigned long page) -{ - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* No outputs */ - : "r" (page | 0x10), "i" (ASI_DMMU_DEMAP)); -} - -static __inline__ void spitfire_flush_itlb_secondary_page(unsigned long page) -{ - __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" - "membar #Sync" - : /* No outputs */ - : "r" (page | 0x10), "i" (ASI_IMMU_DEMAP)); -} - static __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page) { __asm__ __volatile__("stxa %%g0, [%0] %1\n\t" diff --git a/include/asm-sparc64/system.h b/include/asm-sparc64/system.h index f9be2c5..5e94c05 100644 --- a/include/asm-sparc64/system.h +++ b/include/asm-sparc64/system.h @@ -28,6 +28,14 @@ enum sparc_cpu { #define ARCH_SUN4C_SUN4 0 #define ARCH_SUN4 0 +extern void mb(void); +extern void rmb(void); +extern void wmb(void); +extern void membar_storeload(void); +extern void membar_storeload_storestore(void); +extern void membar_storeload_loadload(void); +extern void membar_storestore_loadstore(void); + #endif #define setipl(__new_ipl) \ @@ -78,16 +86,11 @@ enum sparc_cpu { #define nop() __asm__ __volatile__ ("nop") -#define membar(type) __asm__ __volatile__ ("membar " type : : : "memory") -#define mb() \ - membar("#LoadLoad | #LoadStore | #StoreStore | #StoreLoad") -#define rmb() membar("#LoadLoad") -#define wmb() membar("#StoreStore") #define read_barrier_depends() do { } while(0) #define set_mb(__var, __value) \ - do { __var = __value; membar("#StoreLoad | #StoreStore"); } while(0) + do { __var = __value; membar_storeload_storestore(); } while(0) #define set_wmb(__var, __value) \ - do { __var = __value; membar("#StoreStore"); } while(0) + do { __var = __value; wmb(); } while(0) #ifdef CONFIG_SMP #define smp_mb() mb() @@ -190,24 +193,23 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ "wrpr %%g1, %%cwp\n\t" \ "ldx [%%g6 + %3], %%o6\n\t" \ "ldub [%%g6 + %2], %%o5\n\t" \ - "ldx [%%g6 + %4], %%o7\n\t" \ + "ldub [%%g6 + %4], %%o7\n\t" \ "mov %%g6, %%l2\n\t" \ "wrpr %%o5, 0x0, %%wstate\n\t" \ "ldx [%%sp + 2047 + 0x70], %%i6\n\t" \ "ldx [%%sp + 2047 + 0x78], %%i7\n\t" \ "wrpr %%g0, 0x94, %%pstate\n\t" \ "mov %%l2, %%g6\n\t" \ - "ldx [%%g6 + %7], %%g4\n\t" \ + "ldx [%%g6 + %6], %%g4\n\t" \ "wrpr %%g0, 0x96, %%pstate\n\t" \ - "andcc %%o7, %6, %%g0\n\t" \ - "beq,pt %%icc, 1f\n\t" \ + "brz,pt %%o7, 1f\n\t" \ " mov %%g7, %0\n\t" \ "b,a ret_from_syscall\n\t" \ "1:\n\t" \ : "=&r" (last) \ : "0" (next->thread_info), \ - "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_FLAGS), "i" (TI_CWP), \ - "i" (_TIF_NEWCHILD), "i" (TI_TASK) \ + "i" (TI_WSTATE), "i" (TI_KSP), "i" (TI_NEW_CHILD), \ + "i" (TI_CWP), "i" (TI_TASK) \ : "cc", \ "g1", "g2", "g3", "g7", \ "l2", "l3", "l4", "l5", "l6", "l7", \ diff --git a/include/asm-sparc64/thread_info.h b/include/asm-sparc64/thread_info.h index a1d25c0..c94d8b3 100644 --- a/include/asm-sparc64/thread_info.h +++ b/include/asm-sparc64/thread_info.h @@ -47,7 +47,9 @@ struct thread_info { struct pt_regs *kregs; struct exec_domain *exec_domain; int preempt_count; /* 0 => preemptable, <0 => BUG */ - int __pad; + __u8 new_child; + __u8 syscall_noerror; + __u16 __pad; unsigned long *utraps; @@ -66,6 +68,9 @@ struct thread_info { struct restart_block restart_block; + struct pt_regs *kern_una_regs; + unsigned int kern_una_insn; + unsigned long fpregs[0] __attribute__ ((aligned(64))); }; @@ -87,6 +92,8 @@ struct thread_info { #define TI_KREGS 0x00000028 #define TI_EXEC_DOMAIN 0x00000030 #define TI_PRE_COUNT 0x00000038 +#define TI_NEW_CHILD 0x0000003c +#define TI_SYS_NOERROR 0x0000003d #define TI_UTRAPS 0x00000040 #define TI_REG_WINDOW 0x00000048 #define TI_RWIN_SPTRS 0x000003c8 @@ -99,6 +106,8 @@ struct thread_info { #define TI_PCR 0x00000490 #define TI_CEE_STUFF 0x00000498 #define TI_RESTART_BLOCK 0x000004a0 +#define TI_KUNA_REGS 0x000004c8 +#define TI_KUNA_INSN 0x000004d0 #define TI_FPREGS 0x00000500 /* We embed this in the uppermost byte of thread_info->flags */ @@ -219,10 +228,10 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */ #define TIF_NEWSIGNALS 6 /* wants new-style signals */ #define TIF_32BIT 7 /* 32-bit binary */ -#define TIF_NEWCHILD 8 /* just-spawned child process */ +/* flag bit 8 is available */ #define TIF_SECCOMP 9 /* secure computing */ #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */ -#define TIF_SYSCALL_SUCCESS 11 +/* flag bit 11 is available */ /* NOTE: Thread flags >= 12 should be ones we have no interest * in using in assembly, else we can't use the mask as * an immediate value in instructions such as andcc. @@ -239,10 +248,8 @@ register struct thread_info *current_thread_info_reg asm("g6"); #define _TIF_UNALIGNED (1<<TIF_UNALIGNED) #define _TIF_NEWSIGNALS (1<<TIF_NEWSIGNALS) #define _TIF_32BIT (1<<TIF_32BIT) -#define _TIF_NEWCHILD (1<<TIF_NEWCHILD) #define _TIF_SECCOMP (1<<TIF_SECCOMP) #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT) -#define _TIF_SYSCALL_SUCCESS (1<<TIF_SYSCALL_SUCCESS) #define _TIF_ABI_PENDING (1<<TIF_ABI_PENDING) #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) diff --git a/include/asm-sparc64/timer.h b/include/asm-sparc64/timer.h index ba33a2b..edc8e08 100644 --- a/include/asm-sparc64/timer.h +++ b/include/asm-sparc64/timer.h @@ -9,49 +9,8 @@ #include <linux/types.h> -/* How timers work: - * - * On uniprocessors we just use counter zero for the system wide - * ticker, this performs thread scheduling, clock book keeping, - * and runs timer based events. Previously we used the Ultra - * %tick interrupt for this purpose. - * - * On multiprocessors we pick one cpu as the master level 10 tick - * processor. Here this counter zero tick handles clock book - * keeping and timer events only. Each Ultra has it's level - * 14 %tick interrupt set to fire off as well, even the master - * tick cpu runs this locally. This ticker performs thread - * scheduling, system/user tick counting for the current thread, - * and also profiling if enabled. - */ - #include <linux/config.h> -/* Two timers, traditionally steered to PIL's 10 and 14 respectively. - * But since INO packets are used on sun5, we could use any PIL level - * we like, however for now we use the normal ones. - * - * The 'reg' and 'interrupts' properties for these live in nodes named - * 'counter-timer'. The first of three 'reg' properties describe where - * the sun5_timer registers are. The other two I have no idea. (XXX) - */ -struct sun5_timer { - u64 count0; - u64 limit0; - u64 count1; - u64 limit1; -}; - -#define SUN5_LIMIT_ENABLE 0x80000000 -#define SUN5_LIMIT_TOZERO 0x40000000 -#define SUN5_LIMIT_ZRESTART 0x20000000 -#define SUN5_LIMIT_CMASK 0x1fffffff - -/* Given a HZ value, set the limit register to so that the timer IRQ - * gets delivered that often. - */ -#define SUN5_HZ_TO_LIMIT(__hz) (1000000/(__hz)) - struct sparc64_tick_ops { void (*init_tick)(unsigned long); unsigned long (*get_tick)(void); diff --git a/include/asm-sparc64/types.h b/include/asm-sparc64/types.h index 6248ed1..d0ee7f1 100644 --- a/include/asm-sparc64/types.h +++ b/include/asm-sparc64/types.h @@ -56,8 +56,6 @@ typedef unsigned long u64; typedef u32 dma_addr_t; typedef u64 dma64_addr_t; -typedef unsigned short kmem_bufctl_t; - #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/include/asm-sparc64/unistd.h b/include/asm-sparc64/unistd.h index f59144c..51ec287 100644 --- a/include/asm-sparc64/unistd.h +++ b/include/asm-sparc64/unistd.h @@ -167,12 +167,12 @@ #define __NR_pciconfig_read 148 /* ENOSYS under SunOS */ #define __NR_pciconfig_write 149 /* ENOSYS under SunOS */ #define __NR_getsockname 150 /* Common */ -/* #define __NR_getmsg 151 SunOS Specific */ -/* #define __NR_putmsg 152 SunOS Specific */ +#define __NR_inotify_init 151 /* Linux specific */ +#define __NR_inotify_add_watch 152 /* Linux specific */ #define __NR_poll 153 /* Common */ #define __NR_getdents64 154 /* Linux specific */ /* #define __NR_fcntl64 155 Linux sparc32 Specific */ -/* #define __NR_getdirentries 156 SunOS Specific */ +#define __NR_inotify_rm_watch 156 /* Linux specific */ #define __NR_statfs 157 /* Common */ #define __NR_fstatfs 158 /* Common */ #define __NR_umount 159 /* Common */ |