diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/asm-ia64/esi.h | 30 | ||||
-rw-r--r-- | include/asm-ia64/futex.h | 122 | ||||
-rw-r--r-- | include/asm-ia64/kprobes.h | 9 | ||||
-rw-r--r-- | include/asm-ia64/mca_asm.h | 2 | ||||
-rw-r--r-- | include/asm-ia64/pal.h | 16 | ||||
-rw-r--r-- | include/asm-ia64/processor.h | 13 | ||||
-rw-r--r-- | include/asm-ia64/smp.h | 1 | ||||
-rw-r--r-- | include/asm-ia64/unistd.h | 3 |
8 files changed, 175 insertions, 21 deletions
diff --git a/include/asm-ia64/esi.h b/include/asm-ia64/esi.h new file mode 100644 index 0000000..84aac0e --- /dev/null +++ b/include/asm-ia64/esi.h @@ -0,0 +1,30 @@ +/* + * ESI service calls. + * + * Copyright (c) Copyright 2005-2006 Hewlett-Packard Development Company, L.P. + * Alex Williamson <alex.williamson@hp.com> + */ +#ifndef esi_h +#define esi_h + +#include <linux/efi.h> + +#define ESI_QUERY 0x00000001 +#define ESI_OPEN_HANDLE 0x02000000 +#define ESI_CLOSE_HANDLE 0x02000001 + +enum esi_proc_type { + ESI_PROC_SERIALIZED, /* calls need to be serialized */ + ESI_PROC_MP_SAFE, /* MP-safe, but not reentrant */ + ESI_PROC_REENTRANT /* MP-safe and reentrant */ +}; + +extern int ia64_esi_init (void); +extern struct ia64_sal_retval esi_call_phys (void *, u64 *); +extern int ia64_esi_call(efi_guid_t, struct ia64_sal_retval *, + enum esi_proc_type, + u64, u64, u64, u64, u64, u64, u64, u64); +extern int ia64_esi_call_phys(efi_guid_t, struct ia64_sal_retval *, u64, u64, + u64, u64, u64, u64, u64, u64); + +#endif /* esi_h */ diff --git a/include/asm-ia64/futex.h b/include/asm-ia64/futex.h index 6a332a9..07d77f3 100644 --- a/include/asm-ia64/futex.h +++ b/include/asm-ia64/futex.h @@ -1,6 +1,124 @@ #ifndef _ASM_FUTEX_H #define _ASM_FUTEX_H -#include <asm-generic/futex.h> +#include <linux/futex.h> +#include <asm/errno.h> +#include <asm/system.h> +#include <asm/uaccess.h> -#endif +#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ +do { \ + register unsigned long r8 __asm ("r8") = 0; \ + __asm__ __volatile__( \ + " mf;; \n" \ + "[1:] " insn ";; \n" \ + " .xdata4 \"__ex_table\", 1b-., 2f-. \n" \ + "[2:]" \ + : "+r" (r8), "=r" (oldval) \ + : "r" (uaddr), "r" (oparg) \ + : "memory"); \ + ret = r8; \ +} while (0) + +#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ +do { \ + register unsigned long r8 __asm ("r8") = 0; \ + int val, newval; \ + do { \ + __asm__ __volatile__( \ + " mf;; \n" \ + "[1:] ld4 %3=[%4];; \n" \ + " mov %2=%3 \n" \ + insn ";; \n" \ + " mov ar.ccv=%2;; \n" \ + "[2:] cmpxchg4.acq %1=[%4],%3,ar.ccv;; \n" \ + " .xdata4 \"__ex_table\", 1b-., 3f-.\n" \ + " .xdata4 \"__ex_table\", 2b-., 3f-.\n" \ + "[3:]" \ + : "+r" (r8), "=r" (val), "=&r" (oldval), \ + "=&r" (newval) \ + : "r" (uaddr), "r" (oparg) \ + : "memory"); \ + if (unlikely (r8)) \ + break; \ + } while (unlikely (val != oldval)); \ + ret = r8; \ +} while (0) + +static inline int +futex_atomic_op_inuser (int encoded_op, int __user *uaddr) +{ + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; + int oparg = (encoded_op << 8) >> 20; + int cmparg = (encoded_op << 20) >> 20; + int oldval = 0, ret; + if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) + oparg = 1 << oparg; + + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + + inc_preempt_count(); + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op1("xchg4 %1=[%2],%3", ret, oldval, uaddr, + oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op2("add %3=%3,%5", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op2("or %3=%3,%5", ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op2("and %3=%3,%5", ret, oldval, uaddr, + ~oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op2("xor %3=%3,%5", ret, oldval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + + dec_preempt_count(); + + if (!ret) { + switch (cmp) { + case FUTEX_OP_CMP_EQ: ret = (oldval == cmparg); break; + case FUTEX_OP_CMP_NE: ret = (oldval != cmparg); break; + case FUTEX_OP_CMP_LT: ret = (oldval < cmparg); break; + case FUTEX_OP_CMP_GE: ret = (oldval >= cmparg); break; + case FUTEX_OP_CMP_LE: ret = (oldval <= cmparg); break; + case FUTEX_OP_CMP_GT: ret = (oldval > cmparg); break; + default: ret = -ENOSYS; + } + } + return ret; +} + +static inline int +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) +{ + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) + return -EFAULT; + + { + register unsigned long r8 __asm ("r8"); + __asm__ __volatile__( + " mf;; \n" + " mov ar.ccv=%3;; \n" + "[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n" + " .xdata4 \"__ex_table\", 1b-., 2f-. \n" + "[2:]" + : "=r" (r8) + : "r" (uaddr), "r" (newval), + "rO" ((long) (unsigned) oldval) + : "memory"); + return r8; + } +} + +#endif /* _ASM_FUTEX_H */ diff --git a/include/asm-ia64/kprobes.h b/include/asm-ia64/kprobes.h index 9389049..1b45b71 100644 --- a/include/asm-ia64/kprobes.h +++ b/include/asm-ia64/kprobes.h @@ -29,7 +29,8 @@ #include <linux/percpu.h> #include <asm/break.h> -#define MAX_INSN_SIZE 16 +#define __ARCH_WANT_KPROBES_INSN_SLOT +#define MAX_INSN_SIZE 1 #define BREAK_INST (long)(__IA64_BREAK_KPROBE << 6) typedef union cmp_inst { @@ -94,7 +95,7 @@ struct kprobe_ctlblk { #define IP_RELATIVE_PREDICT_OPCODE (7) #define LONG_BRANCH_OPCODE (0xC) #define LONG_CALL_OPCODE (0xD) -#define arch_remove_kprobe(p) do {} while (0) +#define flush_insn_slot(p) do { } while (0) typedef struct kprobe_opcode { bundle_t bundle; @@ -108,7 +109,7 @@ struct fnptr { /* Architecture specific copy of original instruction*/ struct arch_specific_insn { /* copy of the instruction to be emulated */ - kprobe_opcode_t insn; + kprobe_opcode_t *insn; #define INST_FLAG_FIX_RELATIVE_IP_ADDR 1 #define INST_FLAG_FIX_BRANCH_REG 2 #define INST_FLAG_BREAK_INST 4 @@ -125,6 +126,6 @@ static inline void jprobe_return(void) } extern void invalidate_stacked_regs(void); extern void flush_register_stack(void); -extern void flush_insn_slot(struct kprobe *p); +extern void arch_remove_kprobe(struct kprobe *p); #endif /* _ASM_KPROBES_H */ diff --git a/include/asm-ia64/mca_asm.h b/include/asm-ia64/mca_asm.h index 27c9203..76203f9 100644 --- a/include/asm-ia64/mca_asm.h +++ b/include/asm-ia64/mca_asm.h @@ -197,9 +197,9 @@ movl temp2 = start_addr; \ ;; \ mov cr.iip = temp2; \ + movl gp = __gp \ ;; \ DATA_PA_TO_VA(sp, temp1); \ - DATA_PA_TO_VA(gp, temp2); \ srlz.i; \ ;; \ nop 1; \ diff --git a/include/asm-ia64/pal.h b/include/asm-ia64/pal.h index 20a8d61..2c8fd92 100644 --- a/include/asm-ia64/pal.h +++ b/include/asm-ia64/pal.h @@ -78,6 +78,7 @@ #define PAL_VM_TR_READ 261 /* read contents of translation register */ #define PAL_GET_PSTATE 262 /* get the current P-state */ #define PAL_SET_PSTATE 263 /* set the P-state */ +#define PAL_BRAND_INFO 274 /* Processor branding information */ #ifndef __ASSEMBLY__ @@ -963,7 +964,8 @@ static inline s64 ia64_pal_cache_read (pal_cache_line_id_u_t line_id, u64 physical_addr) { struct ia64_pal_retval iprv; - PAL_CALL(iprv, PAL_CACHE_READ, line_id.pclid_data, physical_addr, 0); + PAL_CALL_PHYS_STK(iprv, PAL_CACHE_READ, line_id.pclid_data, + physical_addr, 0); return iprv.status; } @@ -985,7 +987,8 @@ static inline s64 ia64_pal_cache_write (pal_cache_line_id_u_t line_id, u64 physical_addr, u64 data) { struct ia64_pal_retval iprv; - PAL_CALL(iprv, PAL_CACHE_WRITE, line_id.pclid_data, physical_addr, data); + PAL_CALL_PHYS_STK(iprv, PAL_CACHE_WRITE, line_id.pclid_data, + physical_addr, data); return iprv.status; } @@ -1133,6 +1136,15 @@ ia64_pal_set_pstate (u64 pstate_index) return iprv.status; } +/* Processor branding information*/ +static inline s64 +ia64_pal_get_brand_info (char *brand_info) +{ + struct ia64_pal_retval iprv; + PAL_CALL_STK(iprv, PAL_BRAND_INFO, 0, (u64)brand_info, 0); + return iprv.status; +} + /* Cause the processor to enter LIGHT HALT state, where prefetching and execution are * suspended, but cache and TLB coherency is maintained. */ diff --git a/include/asm-ia64/processor.h b/include/asm-ia64/processor.h index 265f482..5830d36 100644 --- a/include/asm-ia64/processor.h +++ b/include/asm-ia64/processor.h @@ -20,12 +20,6 @@ #include <asm/ustack.h> #define IA64_NUM_DBG_REGS 8 -/* - * Limits for PMC and PMD are set to less than maximum architected values - * but should be sufficient for a while - */ -#define IA64_NUM_PMC_REGS 64 -#define IA64_NUM_PMD_REGS 64 #define DEFAULT_MAP_BASE __IA64_UL_CONST(0x2000000000000000) #define DEFAULT_TASK_SIZE __IA64_UL_CONST(0xa000000000000000) @@ -163,6 +157,7 @@ struct cpuinfo_ia64 { __u8 family; __u8 archrev; char vendor[16]; + char *model_name; #ifdef CONFIG_NUMA struct ia64_node_data *node_data; @@ -262,13 +257,9 @@ struct thread_struct { # define INIT_THREAD_IA32 #endif /* CONFIG_IA32_SUPPORT */ #ifdef CONFIG_PERFMON - __u64 pmcs[IA64_NUM_PMC_REGS]; - __u64 pmds[IA64_NUM_PMD_REGS]; void *pfm_context; /* pointer to detailed PMU context */ unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */ -# define INIT_THREAD_PM .pmcs = {0UL, }, \ - .pmds = {0UL, }, \ - .pfm_context = NULL, \ +# define INIT_THREAD_PM .pfm_context = NULL, \ .pfm_needs_checking = 0UL, #else # define INIT_THREAD_PM diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h index 74bde1c..60fd4ae 100644 --- a/include/asm-ia64/smp.h +++ b/include/asm-ia64/smp.h @@ -126,6 +126,7 @@ extern void smp_send_reschedule (int cpu); extern void lock_ipi_calllock(void); extern void unlock_ipi_calllock(void); extern void identify_siblings (struct cpuinfo_ia64 *); +extern int is_multithreading_enabled(void); #else diff --git a/include/asm-ia64/unistd.h b/include/asm-ia64/unistd.h index f581662..bb0eb72 100644 --- a/include/asm-ia64/unistd.h +++ b/include/asm-ia64/unistd.h @@ -286,7 +286,8 @@ /* 1294, 1295 reserved for pselect/ppoll */ #define __NR_unshare 1296 #define __NR_splice 1297 -/* 1298, 1299 reserved for set_robust_list/get_robust_list */ +#define __NR_set_robust_list 1298 +#define __NR_get_robust_list 1299 #define __NR_sync_file_range 1300 #define __NR_tee 1301 #define __NR_vmsplice 1302 |