aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86_64
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86_64')
-rw-r--r--include/asm-x86_64/alternative.h21
-rw-r--r--include/asm-x86_64/calgary.h11
-rw-r--r--include/asm-x86_64/elf.h20
-rw-r--r--include/asm-x86_64/kprobes.h1
-rw-r--r--include/asm-x86_64/page.h2
-rw-r--r--include/asm-x86_64/processor.h6
-rw-r--r--include/asm-x86_64/signal.h2
-rw-r--r--include/asm-x86_64/spinlock.h11
-rw-r--r--include/asm-x86_64/swiotlb.h2
-rw-r--r--include/asm-x86_64/system.h1
-rw-r--r--include/asm-x86_64/tce.h8
-rw-r--r--include/asm-x86_64/unistd.h11
-rw-r--r--include/asm-x86_64/unwind.h1
-rw-r--r--include/asm-x86_64/vsyscall.h3
14 files changed, 44 insertions, 56 deletions
diff --git a/include/asm-x86_64/alternative.h b/include/asm-x86_64/alternative.h
index aa67bfd..a584826 100644
--- a/include/asm-x86_64/alternative.h
+++ b/include/asm-x86_64/alternative.h
@@ -4,6 +4,7 @@
#ifdef __KERNEL__
#include <linux/types.h>
+#include <asm/cpufeature.h>
struct alt_instr {
u8 *instr; /* original instruction */
@@ -102,9 +103,6 @@ static inline void alternatives_smp_switch(int smp) {}
/*
* Alternative inline assembly for SMP.
*
- * alternative_smp() takes two versions (SMP first, UP second) and is
- * for more complex stuff such as spinlocks.
- *
* The LOCK_PREFIX macro defined here replaces the LOCK and
* LOCK_PREFIX macros used everywhere in the source tree.
*
@@ -124,21 +122,6 @@ static inline void alternatives_smp_switch(int smp) {}
*/
#ifdef CONFIG_SMP
-#define alternative_smp(smpinstr, upinstr, args...) \
- asm volatile ("661:\n\t" smpinstr "\n662:\n" \
- ".section .smp_altinstructions,\"a\"\n" \
- " .align 8\n" \
- " .quad 661b\n" /* label */ \
- " .quad 663f\n" /* new instruction */ \
- " .byte 0x66\n" /* X86_FEATURE_UP */ \
- " .byte 662b-661b\n" /* sourcelen */ \
- " .byte 664f-663f\n" /* replacementlen */ \
- ".previous\n" \
- ".section .smp_altinstr_replacement,\"awx\"\n" \
- "663:\n\t" upinstr "\n" /* replacement */ \
- "664:\n\t.fill 662b-661b,1,0x42\n" /* space for original */ \
- ".previous" : args)
-
#define LOCK_PREFIX \
".section .smp_locks,\"a\"\n" \
" .align 8\n" \
@@ -147,8 +130,6 @@ static inline void alternatives_smp_switch(int smp) {}
"661:\n\tlock; "
#else /* ! CONFIG_SMP */
-#define alternative_smp(smpinstr, upinstr, args...) \
- asm volatile (upinstr : args)
#define LOCK_PREFIX ""
#endif
diff --git a/include/asm-x86_64/calgary.h b/include/asm-x86_64/calgary.h
index 6e1654f..4e39195 100644
--- a/include/asm-x86_64/calgary.h
+++ b/include/asm-x86_64/calgary.h
@@ -1,8 +1,10 @@
/*
* Derived from include/asm-powerpc/iommu.h
*
- * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation
- * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation
+ * Copyright (C) IBM Corporation, 2006
+ *
+ * Author: Jon Mason <jdmason@us.ibm.com>
+ * Author: Muli Ben-Yehuda <muli@il.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -58,9 +60,4 @@ static inline int calgary_iommu_init(void) { return 1; }
static inline void detect_calgary(void) { return; }
#endif
-static inline unsigned int bus_to_phb(unsigned char busno)
-{
- return ((busno % 15 == 0) ? 0 : busno / 2 + 1);
-}
-
#endif /* _ASM_X86_64_CALGARY_H */
diff --git a/include/asm-x86_64/elf.h b/include/asm-x86_64/elf.h
index b4f8f4a..a406fcb 100644
--- a/include/asm-x86_64/elf.h
+++ b/include/asm-x86_64/elf.h
@@ -7,8 +7,6 @@
#include <asm/ptrace.h>
#include <asm/user.h>
-#include <asm/processor.h>
-#include <asm/compat.h>
/* x86-64 relocation types */
#define R_X86_64_NONE 0 /* No reloc */
@@ -39,18 +37,23 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef struct user_i387_struct elf_fpregset_t;
/*
- * This is used to ensure we don't load something for the wrong architecture.
- */
-#define elf_check_arch(x) \
- ((x)->e_machine == EM_X86_64)
-
-/*
* These are used to set parameters in the core dumps.
*/
#define ELF_CLASS ELFCLASS64
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_X86_64
+#ifdef __KERNEL__
+#include <asm/processor.h>
+#include <asm/compat.h>
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) \
+ ((x)->e_machine == EM_X86_64)
+
+
/* SVR4/i386 ABI (pages 3-31, 3-32) says that when the program starts %edx
contains a pointer to a function which might be registered using `atexit'.
This provides a mean for the dynamic linker to call DT_FINI functions for
@@ -141,7 +144,6 @@ typedef struct user_i387_struct elf_fpregset_t;
/* I'm not sure if we can use '-' here */
#define ELF_PLATFORM ("x86_64")
-#ifdef __KERNEL__
extern void set_personality_64bit(void);
#define SET_PERSONALITY(ex, ibcs2) set_personality_64bit()
/*
diff --git a/include/asm-x86_64/kprobes.h b/include/asm-x86_64/kprobes.h
index d36febd..cf53178 100644
--- a/include/asm-x86_64/kprobes.h
+++ b/include/asm-x86_64/kprobes.h
@@ -47,6 +47,7 @@ typedef u8 kprobe_opcode_t;
void kretprobe_trampoline(void);
extern void arch_remove_kprobe(struct kprobe *p);
+#define flush_insn_slot(p) do { } while (0)
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
diff --git a/include/asm-x86_64/page.h b/include/asm-x86_64/page.h
index f7bf875..10f3461 100644
--- a/include/asm-x86_64/page.h
+++ b/include/asm-x86_64/page.h
@@ -19,7 +19,7 @@
#define EXCEPTION_STACK_ORDER 0
#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
-#define DEBUG_STACK_ORDER EXCEPTION_STACK_ORDER
+#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
#define IRQSTACK_ORDER 2
diff --git a/include/asm-x86_64/processor.h b/include/asm-x86_64/processor.h
index 3b3c121..de9c314 100644
--- a/include/asm-x86_64/processor.h
+++ b/include/asm-x86_64/processor.h
@@ -232,8 +232,14 @@ struct tss_struct {
unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
} __attribute__((packed)) ____cacheline_aligned;
+
extern struct cpuinfo_x86 boot_cpu_data;
DECLARE_PER_CPU(struct tss_struct,init_tss);
+/* Save the original ist values for checking stack pointers during debugging */
+struct orig_ist {
+ unsigned long ist[7];
+};
+DECLARE_PER_CPU(struct orig_ist, orig_ist);
#ifdef CONFIG_X86_VSMP
#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
diff --git a/include/asm-x86_64/signal.h b/include/asm-x86_64/signal.h
index cef7a7d..3ede2a6 100644
--- a/include/asm-x86_64/signal.h
+++ b/include/asm-x86_64/signal.h
@@ -3,13 +3,13 @@
#ifndef __ASSEMBLY__
#include <linux/types.h>
-#include <linux/linkage.h>
#include <linux/time.h>
/* Avoid too many header ordering problems. */
struct siginfo;
#ifdef __KERNEL__
+#include <linux/linkage.h>
/* Most things should be clean enough to redefine this at will, if care
is taken to make libc match. */
diff --git a/include/asm-x86_64/spinlock.h b/include/asm-x86_64/spinlock.h
index 8d34219..248a79f 100644
--- a/include/asm-x86_64/spinlock.h
+++ b/include/asm-x86_64/spinlock.h
@@ -21,7 +21,7 @@
#define __raw_spin_lock_string \
"\n1:\t" \
- "lock ; decl %0\n\t" \
+ LOCK_PREFIX " ; decl %0\n\t" \
"js 2f\n" \
LOCK_SECTION_START("") \
"2:\t" \
@@ -40,10 +40,7 @@
static inline void __raw_spin_lock(raw_spinlock_t *lock)
{
- alternative_smp(
- __raw_spin_lock_string,
- __raw_spin_lock_string_up,
- "=m" (lock->slock) : : "memory");
+ asm volatile(__raw_spin_lock_string : "=m" (lock->slock) : : "memory");
}
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
@@ -125,12 +122,12 @@ static inline int __raw_write_trylock(raw_rwlock_t *lock)
static inline void __raw_read_unlock(raw_rwlock_t *rw)
{
- asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
+ asm volatile(LOCK_PREFIX " ; incl %0" :"=m" (rw->lock) : : "memory");
}
static inline void __raw_write_unlock(raw_rwlock_t *rw)
{
- asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0"
+ asm volatile(LOCK_PREFIX " ; addl $" RW_LOCK_BIAS_STR ",%0"
: "=m" (rw->lock) : : "memory");
}
diff --git a/include/asm-x86_64/swiotlb.h b/include/asm-x86_64/swiotlb.h
index 5f9a018..ba94ab3 100644
--- a/include/asm-x86_64/swiotlb.h
+++ b/include/asm-x86_64/swiotlb.h
@@ -42,6 +42,8 @@ extern void swiotlb_free_coherent (struct device *hwdev, size_t size,
extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
extern void swiotlb_init(void);
+extern int swiotlb_force;
+
#ifdef CONFIG_SWIOTLB
extern int swiotlb;
#else
diff --git a/include/asm-x86_64/system.h b/include/asm-x86_64/system.h
index f67f287..6bf170b 100644
--- a/include/asm-x86_64/system.h
+++ b/include/asm-x86_64/system.h
@@ -240,7 +240,6 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
#endif
#define read_barrier_depends() do {} while(0)
#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
-#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
diff --git a/include/asm-x86_64/tce.h b/include/asm-x86_64/tce.h
index ee51d31..53e9a68 100644
--- a/include/asm-x86_64/tce.h
+++ b/include/asm-x86_64/tce.h
@@ -1,9 +1,11 @@
/*
- * Copyright (C) 2006 Muli Ben-Yehuda <muli@il.ibm.com>, IBM Corporation
- * Copyright (C) 2006 Jon Mason <jdmason@us.ibm.com>, IBM Corporation
- *
* This file is derived from asm-powerpc/tce.h.
*
+ * Copyright (C) IBM Corporation, 2006
+ *
+ * Author: Muli Ben-Yehuda <muli@il.ibm.com>
+ * Author: Jon Mason <jdmason@us.ibm.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
diff --git a/include/asm-x86_64/unistd.h b/include/asm-x86_64/unistd.h
index 94387c9..80fd48e 100644
--- a/include/asm-x86_64/unistd.h
+++ b/include/asm-x86_64/unistd.h
@@ -821,8 +821,6 @@ asmlinkage long sys_fork(struct pt_regs regs);
asmlinkage long sys_vfork(struct pt_regs regs);
asmlinkage long sys_pipe(int *fildes);
-#endif /* __KERNEL_SYSCALLS__ */
-
#ifndef __ASSEMBLY__
#include <linux/linkage.h>
@@ -838,9 +836,9 @@ asmlinkage long sys_rt_sigaction(int sig,
struct sigaction __user *oact,
size_t sigsetsize);
-#endif /* __ASSEMBLY__ */
+#endif /* __ASSEMBLY__ */
-#endif /* __NO_STUBS */
+#endif /* __KERNEL_SYSCALLS__ */
/*
* "Conditional" syscalls
@@ -850,5 +848,8 @@ asmlinkage long sys_rt_sigaction(int sig,
*/
#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
+#endif /* __NO_STUBS */
+
#endif /* __KERNEL__ */
-#endif
+
+#endif /* _ASM_X86_64_UNISTD_H_ */
diff --git a/include/asm-x86_64/unwind.h b/include/asm-x86_64/unwind.h
index f3e7124..1f6e9bf 100644
--- a/include/asm-x86_64/unwind.h
+++ b/include/asm-x86_64/unwind.h
@@ -95,6 +95,7 @@ static inline int arch_unw_user_mode(const struct unwind_frame_info *info)
#else
#define UNW_PC(frame) ((void)(frame), 0)
+#define UNW_SP(frame) ((void)(frame), 0)
static inline int arch_unw_user_mode(const void *info)
{
diff --git a/include/asm-x86_64/vsyscall.h b/include/asm-x86_64/vsyscall.h
index a85e16f..146b244 100644
--- a/include/asm-x86_64/vsyscall.h
+++ b/include/asm-x86_64/vsyscall.h
@@ -1,8 +1,6 @@
#ifndef _ASM_X86_64_VSYSCALL_H_
#define _ASM_X86_64_VSYSCALL_H_
-#include <linux/seqlock.h>
-
enum vsyscall_num {
__NR_vgettimeofday,
__NR_vtime,
@@ -14,6 +12,7 @@ enum vsyscall_num {
#define VSYSCALL_ADDR(vsyscall_nr) (VSYSCALL_START+VSYSCALL_SIZE*(vsyscall_nr))
#ifdef __KERNEL__
+#include <linux/seqlock.h>
#define __section_vxtime __attribute__ ((unused, __section__ (".vxtime"), aligned(16)))
#define __section_wall_jiffies __attribute__ ((unused, __section__ (".wall_jiffies"), aligned(16)))