aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-x86
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-x86')
-rw-r--r--include/asm-x86/bios_ebda.h2
-rw-r--r--include/asm-x86/bitops.h149
-rw-r--r--include/asm-x86/bitops_32.h166
-rw-r--r--include/asm-x86/bitops_64.h162
-rw-r--r--include/asm-x86/bootparam.h14
-rw-r--r--include/asm-x86/e820_64.h3
-rw-r--r--include/asm-x86/io_apic.h6
-rw-r--r--include/asm-x86/kvm.h41
-rw-r--r--include/asm-x86/kvm_host.h99
-rw-r--r--include/asm-x86/kvm_para.h55
-rw-r--r--include/asm-x86/mach-default/smpboot_hooks.h2
-rw-r--r--include/asm-x86/pgtable_32.h8
-rw-r--r--include/asm-x86/posix_types.h8
-rw-r--r--include/asm-x86/processor.h2
-rw-r--r--include/asm-x86/ptrace.h2
-rw-r--r--include/asm-x86/reboot.h2
-rw-r--r--include/asm-x86/rio.h11
-rw-r--r--include/asm-x86/unistd.h8
18 files changed, 363 insertions, 377 deletions
diff --git a/include/asm-x86/bios_ebda.h b/include/asm-x86/bios_ebda.h
index 9cbd9a6..b4a46b7 100644
--- a/include/asm-x86/bios_ebda.h
+++ b/include/asm-x86/bios_ebda.h
@@ -1,6 +1,8 @@
#ifndef _MACH_BIOS_EBDA_H
#define _MACH_BIOS_EBDA_H
+#include <asm/io.h>
+
/*
* there is a real-mode segmented pointer pointing to the
* 4K EBDA area at 0x40E.
diff --git a/include/asm-x86/bitops.h b/include/asm-x86/bitops.h
index 1ae7b27..b81a4d4 100644
--- a/include/asm-x86/bitops.h
+++ b/include/asm-x86/bitops.h
@@ -62,12 +62,9 @@ static inline void set_bit(int nr, volatile void *addr)
*/
static inline void __set_bit(int nr, volatile void *addr)
{
- asm volatile("bts %1,%0"
- : ADDR
- : "Ir" (nr) : "memory");
+ asm volatile("bts %1,%0" : ADDR : "Ir" (nr) : "memory");
}
-
/**
* clear_bit - Clears a bit in memory
* @nr: Bit to clear
@@ -297,19 +294,145 @@ static inline int variable_test_bit(int nr, volatile const void *addr)
static int test_bit(int nr, const volatile unsigned long *addr);
#endif
-#define test_bit(nr,addr) \
- (__builtin_constant_p(nr) ? \
- constant_test_bit((nr),(addr)) : \
- variable_test_bit((nr),(addr)))
+#define test_bit(nr, addr) \
+ (__builtin_constant_p((nr)) \
+ ? constant_test_bit((nr), (addr)) \
+ : variable_test_bit((nr), (addr)))
+
+/**
+ * __ffs - find first set bit in word
+ * @word: The word to search
+ *
+ * Undefined if no bit exists, so code should check against 0 first.
+ */
+static inline unsigned long __ffs(unsigned long word)
+{
+ asm("bsf %1,%0"
+ : "=r" (word)
+ : "rm" (word));
+ return word;
+}
+
+/**
+ * ffz - find first zero bit in word
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+static inline unsigned long ffz(unsigned long word)
+{
+ asm("bsf %1,%0"
+ : "=r" (word)
+ : "r" (~word));
+ return word;
+}
+
+/*
+ * __fls: find last set bit in word
+ * @word: The word to search
+ *
+ * Undefined if no zero exists, so code should check against ~0UL first.
+ */
+static inline unsigned long __fls(unsigned long word)
+{
+ asm("bsr %1,%0"
+ : "=r" (word)
+ : "rm" (word));
+ return word;
+}
+
+#ifdef __KERNEL__
+/**
+ * ffs - find first set bit in word
+ * @x: the word to search
+ *
+ * This is defined the same way as the libc and compiler builtin ffs
+ * routines, therefore differs in spirit from the other bitops.
+ *
+ * ffs(value) returns 0 if value is 0 or the position of the first
+ * set bit if value is nonzero. The first (least significant) bit
+ * is at position 1.
+ */
+static inline int ffs(int x)
+{
+ int r;
+#ifdef CONFIG_X86_CMOV
+ asm("bsfl %1,%0\n\t"
+ "cmovzl %2,%0"
+ : "=r" (r) : "rm" (x), "r" (-1));
+#else
+ asm("bsfl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $-1,%0\n"
+ "1:" : "=r" (r) : "rm" (x));
+#endif
+ return r + 1;
+}
+
+/**
+ * fls - find last set bit in word
+ * @x: the word to search
+ *
+ * This is defined in a similar way as the libc and compiler builtin
+ * ffs, but returns the position of the most significant set bit.
+ *
+ * fls(value) returns 0 if value is 0 or the position of the last
+ * set bit if value is nonzero. The last (most significant) bit is
+ * at position 32.
+ */
+static inline int fls(int x)
+{
+ int r;
+#ifdef CONFIG_X86_CMOV
+ asm("bsrl %1,%0\n\t"
+ "cmovzl %2,%0"
+ : "=&r" (r) : "rm" (x), "rm" (-1));
+#else
+ asm("bsrl %1,%0\n\t"
+ "jnz 1f\n\t"
+ "movl $-1,%0\n"
+ "1:" : "=r" (r) : "rm" (x));
+#endif
+ return r + 1;
+}
+#endif /* __KERNEL__ */
#undef BASE_ADDR
#undef BIT_ADDR
#undef ADDR
-#ifdef CONFIG_X86_32
-# include "bitops_32.h"
-#else
-# include "bitops_64.h"
-#endif
+static inline void set_bit_string(unsigned long *bitmap,
+ unsigned long i, int len)
+{
+ unsigned long end = i + len;
+ while (i < end) {
+ __set_bit(i, bitmap);
+ i++;
+ }
+}
+
+#ifdef __KERNEL__
+
+#include <asm-generic/bitops/sched.h>
+
+#define ARCH_HAS_FAST_MULTIPLIER 1
+
+#include <asm-generic/bitops/hweight.h>
+
+#endif /* __KERNEL__ */
+
+#include <asm-generic/bitops/fls64.h>
+
+#ifdef __KERNEL__
+
+#include <asm-generic/bitops/ext2-non-atomic.h>
+
+#define ext2_set_bit_atomic(lock, nr, addr) \
+ test_and_set_bit((nr), (unsigned long *)(addr))
+#define ext2_clear_bit_atomic(lock, nr, addr) \
+ test_and_clear_bit((nr), (unsigned long *)(addr))
+
+#include <asm-generic/bitops/minix.h>
+#endif /* __KERNEL__ */
#endif /* _ASM_X86_BITOPS_H */
diff --git a/include/asm-x86/bitops_32.h b/include/asm-x86/bitops_32.h
deleted file mode 100644
index 2513a81..0000000
--- a/include/asm-x86/bitops_32.h
+++ /dev/null
@@ -1,166 +0,0 @@
-#ifndef _I386_BITOPS_H
-#define _I386_BITOPS_H
-
-/*
- * Copyright 1992, Linus Torvalds.
- */
-
-/**
- * find_first_zero_bit - find the first zero bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit number of the first zero bit, not the number of the byte
- * containing a bit.
- */
-static inline int find_first_zero_bit(const unsigned long *addr, unsigned size)
-{
- int d0, d1, d2;
- int res;
-
- if (!size)
- return 0;
- /* This looks at memory.
- * Mark it volatile to tell gcc not to move it around
- */
- asm volatile("movl $-1,%%eax\n\t"
- "xorl %%edx,%%edx\n\t"
- "repe; scasl\n\t"
- "je 1f\n\t"
- "xorl -4(%%edi),%%eax\n\t"
- "subl $4,%%edi\n\t"
- "bsfl %%eax,%%edx\n"
- "1:\tsubl %%ebx,%%edi\n\t"
- "shll $3,%%edi\n\t"
- "addl %%edi,%%edx"
- : "=d" (res), "=&c" (d0), "=&D" (d1), "=&a" (d2)
- : "1" ((size + 31) >> 5), "2" (addr),
- "b" (addr) : "memory");
- return res;
-}
-
-/**
- * find_next_zero_bit - find the first zero bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bit number to start searching at
- * @size: The maximum size to search
- */
-int find_next_zero_bit(const unsigned long *addr, int size, int offset);
-
-/**
- * __ffs - find first bit in word.
- * @word: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
-static inline unsigned long __ffs(unsigned long word)
-{
- __asm__("bsfl %1,%0"
- :"=r" (word)
- :"rm" (word));
- return word;
-}
-
-/**
- * find_first_bit - find the first set bit in a memory region
- * @addr: The address to start the search at
- * @size: The maximum size to search
- *
- * Returns the bit number of the first set bit, not the number of the byte
- * containing a bit.
- */
-static inline unsigned find_first_bit(const unsigned long *addr, unsigned size)
-{
- unsigned x = 0;
-
- while (x < size) {
- unsigned long val = *addr++;
- if (val)
- return __ffs(val) + x;
- x += sizeof(*addr) << 3;
- }
- return x;
-}
-
-/**
- * find_next_bit - find the first set bit in a memory region
- * @addr: The address to base the search on
- * @offset: The bit number to start searching at
- * @size: The maximum size to search
- */
-int find_next_bit(const unsigned long *addr, int size, int offset);
-
-/**
- * ffz - find first zero in word.
- * @word: The word to search
- *
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
-static inline unsigned long ffz(unsigned long word)
-{
- __asm__("bsfl %1,%0"
- :"=r" (word)
- :"r" (~word));
- return word;
-}
-
-#ifdef __KERNEL__
-
-#include <asm-generic/bitops/sched.h>
-
-/**
- * ffs - find first bit set
- * @x: the word to search
- *
- * This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz() (man ffs).
- */
-static inline int ffs(int x)
-{
- int r;
-
- __asm__("bsfl %1,%0\n\t"
- "jnz 1f\n\t"
- "movl $-1,%0\n"
- "1:" : "=r" (r) : "rm" (x));
- return r+1;
-}
-
-/**
- * fls - find last bit set
- * @x: the word to search
- *
- * This is defined the same way as ffs().
- */
-static inline int fls(int x)
-{
- int r;
-
- __asm__("bsrl %1,%0\n\t"
- "jnz 1f\n\t"
- "movl $-1,%0\n"
- "1:" : "=r" (r) : "rm" (x));
- return r+1;
-}
-
-#include <asm-generic/bitops/hweight.h>
-
-#endif /* __KERNEL__ */
-
-#include <asm-generic/bitops/fls64.h>
-
-#ifdef __KERNEL__
-
-#include <asm-generic/bitops/ext2-non-atomic.h>
-
-#define ext2_set_bit_atomic(lock, nr, addr) \
- test_and_set_bit((nr), (unsigned long *)(addr))
-#define ext2_clear_bit_atomic(lock, nr, addr) \
- test_and_clear_bit((nr), (unsigned long *)(addr))
-
-#include <asm-generic/bitops/minix.h>
-
-#endif /* __KERNEL__ */
-
-#endif /* _I386_BITOPS_H */
diff --git a/include/asm-x86/bitops_64.h b/include/asm-x86/bitops_64.h
deleted file mode 100644
index 365f820..0000000
--- a/include/asm-x86/bitops_64.h
+++ /dev/null
@@ -1,162 +0,0 @@
-#ifndef _X86_64_BITOPS_H
-#define _X86_64_BITOPS_H
-
-/*
- * Copyright 1992, Linus Torvalds.
- */
-
-extern long find_first_zero_bit(const unsigned long *addr, unsigned long size);
-extern long find_next_zero_bit(const unsigned long *addr, long size, long offset);
-extern long find_first_bit(const unsigned long *addr, unsigned long size);
-extern long find_next_bit(const unsigned long *addr, long size, long offset);
-
-/* return index of first bet set in val or max when no bit is set */
-static inline long __scanbit(unsigned long val, unsigned long max)
-{
- asm("bsfq %1,%0 ; cmovz %2,%0" : "=&r" (val) : "r" (val), "r" (max));
- return val;
-}
-
-#define find_next_bit(addr,size,off) \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
- ((off) + (__scanbit((*(unsigned long *)addr) >> (off),(size)-(off)))) : \
- find_next_bit(addr,size,off)))
-
-#define find_next_zero_bit(addr,size,off) \
-((__builtin_constant_p(size) && (size) <= BITS_PER_LONG ? \
- ((off)+(__scanbit(~(((*(unsigned long *)addr)) >> (off)),(size)-(off)))) : \
- find_next_zero_bit(addr,size,off)))
-
-#define find_first_bit(addr, size) \
- ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \
- ? (__scanbit(*(unsigned long *)(addr), (size))) \
- : find_first_bit((addr), (size))))
-
-#define find_first_zero_bit(addr, size) \
- ((__builtin_constant_p((size)) && (size) <= BITS_PER_LONG \
- ? (__scanbit(~*(unsigned long *)(addr), (size))) \
- : find_first_zero_bit((addr), (size))))
-
-static inline void set_bit_string(unsigned long *bitmap, unsigned long i,
- int len)
-{
- unsigned long end = i + len;
- while (i < end) {
- __set_bit(i, bitmap);
- i++;
- }
-}
-
-/**
- * ffz - find first zero in word.
- * @word: The word to search
- *
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
-static inline unsigned long ffz(unsigned long word)
-{
- __asm__("bsfq %1,%0"
- :"=r" (word)
- :"r" (~word));
- return word;
-}
-
-/**
- * __ffs - find first bit in word.
- * @word: The word to search
- *
- * Undefined if no bit exists, so code should check against 0 first.
- */
-static inline unsigned long __ffs(unsigned long word)
-{
- __asm__("bsfq %1,%0"
- :"=r" (word)
- :"rm" (word));
- return word;
-}
-
-/*
- * __fls: find last bit set.
- * @word: The word to search
- *
- * Undefined if no zero exists, so code should check against ~0UL first.
- */
-static inline unsigned long __fls(unsigned long word)
-{
- __asm__("bsrq %1,%0"
- :"=r" (word)
- :"rm" (word));
- return word;
-}
-
-#ifdef __KERNEL__
-
-#include <asm-generic/bitops/sched.h>
-
-/**
- * ffs - find first bit set
- * @x: the word to search
- *
- * This is defined the same way as
- * the libc and compiler builtin ffs routines, therefore
- * differs in spirit from the above ffz (man ffs).
- */
-static inline int ffs(int x)
-{
- int r;
-
- __asm__("bsfl %1,%0\n\t"
- "cmovzl %2,%0"
- : "=r" (r) : "rm" (x), "r" (-1));
- return r+1;
-}
-
-/**
- * fls64 - find last bit set in 64 bit word
- * @x: the word to search
- *
- * This is defined the same way as fls.
- */
-static inline int fls64(__u64 x)
-{
- if (x == 0)
- return 0;
- return __fls(x) + 1;
-}
-
-/**
- * fls - find last bit set
- * @x: the word to search
- *
- * This is defined the same way as ffs.
- */
-static inline int fls(int x)
-{
- int r;
-
- __asm__("bsrl %1,%0\n\t"
- "cmovzl %2,%0"
- : "=&r" (r) : "rm" (x), "rm" (-1));
- return r+1;
-}
-
-#define ARCH_HAS_FAST_MULTIPLIER 1
-
-#include <asm-generic/bitops/hweight.h>
-
-#endif /* __KERNEL__ */
-
-#ifdef __KERNEL__
-
-#include <asm-generic/bitops/ext2-non-atomic.h>
-
-#define ext2_set_bit_atomic(lock, nr, addr) \
- test_and_set_bit((nr), (unsigned long *)(addr))
-#define ext2_clear_bit_atomic(lock, nr, addr) \
- test_and_clear_bit((nr), (unsigned long *)(addr))
-
-#include <asm-generic/bitops/minix.h>
-
-#endif /* __KERNEL__ */
-
-#endif /* _X86_64_BITOPS_H */
diff --git a/include/asm-x86/bootparam.h b/include/asm-x86/bootparam.h
index 5115135..e865990 100644
--- a/include/asm-x86/bootparam.h
+++ b/include/asm-x86/bootparam.h
@@ -9,6 +9,17 @@
#include <asm/ist.h>
#include <video/edid.h>
+/* setup data types */
+#define SETUP_NONE 0
+
+/* extensible setup data list node */
+struct setup_data {
+ u64 next;
+ u32 type;
+ u32 len;
+ u8 data[0];
+};
+
struct setup_header {
__u8 setup_sects;
__u16 root_flags;
@@ -46,6 +57,9 @@ struct setup_header {
__u32 cmdline_size;
__u32 hardware_subarch;
__u64 hardware_subarch_data;
+ __u32 payload_offset;
+ __u32 payload_length;
+ __u64 setup_data;
} __attribute__((packed));
struct sys_desc_table {
diff --git a/include/asm-x86/e820_64.h b/include/asm-x86/e820_64.h
index f478c57..71c4d68 100644
--- a/include/asm-x86/e820_64.h
+++ b/include/asm-x86/e820_64.h
@@ -48,7 +48,8 @@ extern struct e820map e820;
extern void update_e820(void);
extern void reserve_early(unsigned long start, unsigned long end, char *name);
-extern void early_res_to_bootmem(void);
+extern void free_early(unsigned long start, unsigned long end);
+extern void early_res_to_bootmem(unsigned long start, unsigned long end);
#endif/*!__ASSEMBLY__*/
diff --git a/include/asm-x86/io_apic.h b/include/asm-x86/io_apic.h
index 0c9e17c..d593e14 100644
--- a/include/asm-x86/io_apic.h
+++ b/include/asm-x86/io_apic.h
@@ -1,7 +1,7 @@
#ifndef __ASM_IO_APIC_H
#define __ASM_IO_APIC_H
-#include <asm/types.h>
+#include <linux/types.h>
#include <asm/mpspec.h>
#include <asm/apicdef.h>
@@ -110,11 +110,13 @@ extern int nr_ioapic_registers[MAX_IO_APICS];
* MP-BIOS irq configuration table structures:
*/
+#define MP_MAX_IOAPIC_PIN 127
+
struct mp_ioapic_routing {
int apic_id;
int gsi_base;
int gsi_end;
- u32 pin_programmed[4];
+ DECLARE_BITMAP(pin_programmed, MP_MAX_IOAPIC_PIN + 1);
};
/* I/O APIC entries */
diff --git a/include/asm-x86/kvm.h b/include/asm-x86/kvm.h
index 7a71120..80eefef 100644
--- a/include/asm-x86/kvm.h
+++ b/include/asm-x86/kvm.h
@@ -188,4 +188,45 @@ struct kvm_cpuid2 {
struct kvm_cpuid_entry2 entries[0];
};
+/* for KVM_GET_PIT and KVM_SET_PIT */
+struct kvm_pit_channel_state {
+ __u32 count; /* can be 65536 */
+ __u16 latched_count;
+ __u8 count_latched;
+ __u8 status_latched;
+ __u8 status;
+ __u8 read_state;
+ __u8 write_state;
+ __u8 write_latch;
+ __u8 rw_mode;
+ __u8 mode;
+ __u8 bcd;
+ __u8 gate;
+ __s64 count_load_time;
+};
+
+struct kvm_pit_state {
+ struct kvm_pit_channel_state channels[3];
+};
+
+#define KVM_TRC_INJ_VIRQ (KVM_TRC_HANDLER + 0x02)
+#define KVM_TRC_REDELIVER_EVT (KVM_TRC_HANDLER + 0x03)
+#define KVM_TRC_PEND_INTR (KVM_TRC_HANDLER + 0x04)
+#define KVM_TRC_IO_READ (KVM_TRC_HANDLER + 0x05)
+#define KVM_TRC_IO_WRITE (KVM_TRC_HANDLER + 0x06)
+#define KVM_TRC_CR_READ (KVM_TRC_HANDLER + 0x07)
+#define KVM_TRC_CR_WRITE (KVM_TRC_HANDLER + 0x08)
+#define KVM_TRC_DR_READ (KVM_TRC_HANDLER + 0x09)
+#define KVM_TRC_DR_WRITE (KVM_TRC_HANDLER + 0x0A)
+#define KVM_TRC_MSR_READ (KVM_TRC_HANDLER + 0x0B)
+#define KVM_TRC_MSR_WRITE (KVM_TRC_HANDLER + 0x0C)
+#define KVM_TRC_CPUID (KVM_TRC_HANDLER + 0x0D)
+#define KVM_TRC_INTR (KVM_TRC_HANDLER + 0x0E)
+#define KVM_TRC_NMI (KVM_TRC_HANDLER + 0x0F)
+#define KVM_TRC_VMMCALL (KVM_TRC_HANDLER + 0x10)
+#define KVM_TRC_HLT (KVM_TRC_HANDLER + 0x11)
+#define KVM_TRC_CLTS (KVM_TRC_HANDLER + 0x12)
+#define KVM_TRC_LMSW (KVM_TRC_HANDLER + 0x13)
+#define KVM_TRC_APIC_ACCESS (KVM_TRC_HANDLER + 0x14)
+
#endif
diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
index 68ee390..9d963cd 100644
--- a/include/asm-x86/kvm_host.h
+++ b/include/asm-x86/kvm_host.h
@@ -20,6 +20,13 @@
#include <asm/desc.h>
+#define KVM_MAX_VCPUS 16
+#define KVM_MEMORY_SLOTS 32
+/* memory slots that does not exposed to userspace */
+#define KVM_PRIVATE_MEM_SLOTS 4
+
+#define KVM_PIO_PAGE_OFFSET 1
+
#define CR3_PAE_RESERVED_BITS ((X86_CR3_PWT | X86_CR3_PCD) - 1)
#define CR3_NONPAE_RESERVED_BITS ((PAGE_SIZE-1) & ~(X86_CR3_PWT | X86_CR3_PCD))
#define CR3_L_MODE_RESERVED_BITS (CR3_NONPAE_RESERVED_BITS | \
@@ -39,6 +46,13 @@
#define INVALID_PAGE (~(hpa_t)0)
#define UNMAPPED_GVA (~(gpa_t)0)
+/* shadow tables are PAE even on non-PAE hosts */
+#define KVM_HPAGE_SHIFT 21
+#define KVM_HPAGE_SIZE (1UL << KVM_HPAGE_SHIFT)
+#define KVM_HPAGE_MASK (~(KVM_HPAGE_SIZE - 1))
+
+#define KVM_PAGES_PER_HPAGE (KVM_HPAGE_SIZE / PAGE_SIZE)
+
#define DE_VECTOR 0
#define UD_VECTOR 6
#define NM_VECTOR 7
@@ -48,6 +62,7 @@
#define SS_VECTOR 12
#define GP_VECTOR 13
#define PF_VECTOR 14
+#define MC_VECTOR 18
#define SELECTOR_TI_MASK (1 << 2)
#define SELECTOR_RPL_MASK 0x03
@@ -58,7 +73,8 @@
#define KVM_PERMILLE_MMU_PAGES 20
#define KVM_MIN_ALLOC_MMU_PAGES 64
-#define KVM_NUM_MMU_PAGES 1024
+#define KVM_MMU_HASH_SHIFT 10
+#define KVM_NUM_MMU_PAGES (1 << KVM_MMU_HASH_SHIFT)
#define KVM_MIN_FREE_MMU_PAGES 5
#define KVM_REFILL_PAGES 25
#define KVM_MAX_CPUID_ENTRIES 40
@@ -106,6 +122,12 @@ enum {
#define KVM_NR_MEM_OBJS 40
+struct kvm_guest_debug {
+ int enabled;
+ unsigned long bp[4];
+ int singlestep;
+};
+
/*
* We don't want allocation failures within the mmu code, so we preallocate
* enough memory for a single page fault in a cache.
@@ -140,6 +162,7 @@ union kvm_mmu_page_role {
unsigned pad_for_nice_hex_output:6;
unsigned metaphysical:1;
unsigned access:3;
+ unsigned invalid:1;
};
};
@@ -204,11 +227,6 @@ struct kvm_vcpu_arch {
u64 shadow_efer;
u64 apic_base;
struct kvm_lapic *apic; /* kernel irqchip context */
-#define VCPU_MP_STATE_RUNNABLE 0
-#define VCPU_MP_STATE_UNINITIALIZED 1
-#define VCPU_MP_STATE_INIT_RECEIVED 2
-#define VCPU_MP_STATE_SIPI_RECEIVED 3
-#define VCPU_MP_STATE_HALTED 4
int mp_state;
int sipi_vector;
u64 ia32_misc_enable_msr;
@@ -226,8 +244,9 @@ struct kvm_vcpu_arch {
u64 *last_pte_updated;
struct {
- gfn_t gfn; /* presumed gfn during guest pte update */
- struct page *page; /* page corresponding to that gfn */
+ gfn_t gfn; /* presumed gfn during guest pte update */
+ pfn_t pfn; /* pfn corresponding to that gfn */
+ int largepage;
} update_pte;
struct i387_fxsave_struct host_fx_image;
@@ -261,6 +280,11 @@ struct kvm_vcpu_arch {
/* emulate context */
struct x86_emulate_ctxt emulate_ctxt;
+
+ gpa_t time;
+ struct kvm_vcpu_time_info hv_clock;
+ unsigned int time_offset;
+ struct page *time_page;
};
struct kvm_mem_alias {
@@ -283,10 +307,13 @@ struct kvm_arch{
struct list_head active_mmu_pages;
struct kvm_pic *vpic;
struct kvm_ioapic *vioapic;
+ struct kvm_pit *vpit;
int round_robin_prev_vcpu;
unsigned int tss_addr;
struct page *apic_access_page;
+
+ gpa_t wall_clock;
};
struct kvm_vm_stat {
@@ -298,6 +325,7 @@ struct kvm_vm_stat {
u32 mmu_recycled;
u32 mmu_cache_miss;
u32 remote_tlb_flush;
+ u32 lpages;
};
struct kvm_vcpu_stat {
@@ -320,6 +348,7 @@ struct kvm_vcpu_stat {
u32 fpu_reload;
u32 insn_emulation;
u32 insn_emulation_fail;
+ u32 hypercalls;
};
struct descriptor_table {
@@ -355,6 +384,7 @@ struct kvm_x86_ops {
u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg);
void (*get_segment)(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg);
+ int (*get_cpl)(struct kvm_vcpu *vcpu);
void (*set_segment)(struct kvm_vcpu *vcpu,
struct kvm_segment *var, int seg);
void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l);
@@ -410,6 +440,15 @@ void kvm_mmu_zap_all(struct kvm *kvm);
unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
+int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
+
+int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const void *val, int bytes);
+int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
+ gpa_t addr, unsigned long *ret);
+
+extern bool tdp_enabled;
+
enum emulation_result {
EMULATE_DONE, /* no further processing */
EMULATE_DO_MMIO, /* kvm_run filled with mmio request */
@@ -429,6 +468,7 @@ void realmode_lmsw(struct kvm_vcpu *vcpu, unsigned long msw,
unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr);
void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long value,
unsigned long *rflags);
+void kvm_enable_efer_bits(u64);
int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
int kvm_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data);
@@ -448,12 +488,14 @@ int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr,
int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr,
unsigned long value);
-void set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
-void set_cr3(struct kvm_vcpu *vcpu, unsigned long cr0);
-void set_cr4(struct kvm_vcpu *vcpu, unsigned long cr0);
-void set_cr8(struct kvm_vcpu *vcpu, unsigned long cr0);
-unsigned long get_cr8(struct kvm_vcpu *vcpu);
-void lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
+int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason);
+
+void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
+void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
+void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
+void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8);
+unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu);
+void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw);
void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l);
int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata);
@@ -491,6 +533,8 @@ int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
+void kvm_enable_tdp(void);
+
int load_pdptrs(struct kvm_vcpu *vcpu, unsigned long cr3);
int complete_pio(struct kvm_vcpu *vcpu);
@@ -600,6 +644,7 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
#define ASM_VMX_VMWRITE_RSP_RDX ".byte 0x0f, 0x79, 0xd4"
#define ASM_VMX_VMXOFF ".byte 0x0f, 0x01, 0xc4"
#define ASM_VMX_VMXON_RAX ".byte 0xf3, 0x0f, 0xc7, 0x30"
+#define ASM_VMX_INVVPID ".byte 0x66, 0x0f, 0x38, 0x81, 0x08"
#define MSR_IA32_TIME_STAMP_COUNTER 0x010
@@ -610,4 +655,30 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
#define RMODE_TSS_SIZE \
(TSS_BASE_SIZE + TSS_REDIRECTION_SIZE + TSS_IOPB_SIZE + 1)
+enum {
+ TASK_SWITCH_CALL = 0,
+ TASK_SWITCH_IRET = 1,
+ TASK_SWITCH_JMP = 2,
+ TASK_SWITCH_GATE = 3,
+};
+
+#define KVMTRACE_5D(evt, vcpu, d1, d2, d3, d4, d5, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 5, d1, d2, d3, d4, d5)
+#define KVMTRACE_4D(evt, vcpu, d1, d2, d3, d4, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 4, d1, d2, d3, d4, 0)
+#define KVMTRACE_3D(evt, vcpu, d1, d2, d3, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 3, d1, d2, d3, 0, 0)
+#define KVMTRACE_2D(evt, vcpu, d1, d2, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 2, d1, d2, 0, 0, 0)
+#define KVMTRACE_1D(evt, vcpu, d1, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 1, d1, 0, 0, 0, 0)
+#define KVMTRACE_0D(evt, vcpu, name) \
+ trace_mark(kvm_trace_##name, "%u %p %u %u %u %u %u %u", KVM_TRC_##evt, \
+ vcpu, 0, 0, 0, 0, 0, 0)
+
#endif
diff --git a/include/asm-x86/kvm_para.h b/include/asm-x86/kvm_para.h
index c6f3fd8..5098459 100644
--- a/include/asm-x86/kvm_para.h
+++ b/include/asm-x86/kvm_para.h
@@ -10,10 +10,65 @@
* paravirtualization, the appropriate feature bit should be checked.
*/
#define KVM_CPUID_FEATURES 0x40000001
+#define KVM_FEATURE_CLOCKSOURCE 0
+#define KVM_FEATURE_NOP_IO_DELAY 1
+#define KVM_FEATURE_MMU_OP 2
+
+#define MSR_KVM_WALL_CLOCK 0x11
+#define MSR_KVM_SYSTEM_TIME 0x12
+
+#define KVM_MAX_MMU_OP_BATCH 32
+
+/* Operations for KVM_HC_MMU_OP */
+#define KVM_MMU_OP_WRITE_PTE 1
+#define KVM_MMU_OP_FLUSH_TLB 2
+#define KVM_MMU_OP_RELEASE_PT 3
+
+/* Payload for KVM_HC_MMU_OP */
+struct kvm_mmu_op_header {
+ __u32 op;
+ __u32 pad;
+};
+
+struct kvm_mmu_op_write_pte {
+ struct kvm_mmu_op_header header;
+ __u64 pte_phys;
+ __u64 pte_val;
+};
+
+struct kvm_mmu_op_flush_tlb {
+ struct kvm_mmu_op_header header;
+};
+
+struct kvm_mmu_op_release_pt {
+ struct kvm_mmu_op_header header;
+ __u64 pt_phys;
+};
#ifdef __KERNEL__
#include <asm/processor.h>
+/* xen binary-compatible interface. See xen headers for details */
+struct kvm_vcpu_time_info {
+ uint32_t version;
+ uint32_t pad0;
+ uint64_t tsc_timestamp;
+ uint64_t system_time;
+ uint32_t tsc_to_system_mul;
+ int8_t tsc_shift;
+ int8_t pad[3];
+} __attribute__((__packed__)); /* 32 bytes */
+
+struct kvm_wall_clock {
+ uint32_t wc_version;
+ uint32_t wc_sec;
+ uint32_t wc_nsec;
+} __attribute__((__packed__));
+
+
+extern void kvmclock_init(void);
+
+
/* This instruction is vmcall. On non-VT architectures, it will generate a
* trap that we will then rewrite to the appropriate instruction.
*/
diff --git a/include/asm-x86/mach-default/smpboot_hooks.h b/include/asm-x86/mach-default/smpboot_hooks.h
index 3ff2c5b..56d0e1f 100644
--- a/include/asm-x86/mach-default/smpboot_hooks.h
+++ b/include/asm-x86/mach-default/smpboot_hooks.h
@@ -33,7 +33,7 @@ static inline void smpboot_restore_warm_reset_vector(void)
*((volatile long *) phys_to_virt(0x467)) = 0;
}
-static inline void smpboot_setup_io_apic(void)
+static inline void __init smpboot_setup_io_apic(void)
{
/*
* Here we can be sure that there is an IO-APIC in the system. Let's
diff --git a/include/asm-x86/pgtable_32.h b/include/asm-x86/pgtable_32.h
index 168b644..577ab79 100644
--- a/include/asm-x86/pgtable_32.h
+++ b/include/asm-x86/pgtable_32.h
@@ -198,16 +198,16 @@ do { \
*/
#define update_mmu_cache(vma, address, pte) do { } while (0)
-void native_pagetable_setup_start(pgd_t *base);
-void native_pagetable_setup_done(pgd_t *base);
+extern void native_pagetable_setup_start(pgd_t *base);
+extern void native_pagetable_setup_done(pgd_t *base);
#ifndef CONFIG_PARAVIRT
-static inline void paravirt_pagetable_setup_start(pgd_t *base)
+static inline void __init paravirt_pagetable_setup_start(pgd_t *base)
{
native_pagetable_setup_start(base);
}
-static inline void paravirt_pagetable_setup_done(pgd_t *base)
+static inline void __init paravirt_pagetable_setup_done(pgd_t *base)
{
native_pagetable_setup_done(base);
}
diff --git a/include/asm-x86/posix_types.h b/include/asm-x86/posix_types.h
index fe312a5..bb7133d 100644
--- a/include/asm-x86/posix_types.h
+++ b/include/asm-x86/posix_types.h
@@ -1,5 +1,11 @@
#ifdef __KERNEL__
-# if defined(CONFIG_X86_32) || defined(__i386__)
+# ifdef CONFIG_X86_32
+# include "posix_types_32.h"
+# else
+# include "posix_types_64.h"
+# endif
+#else
+# ifdef __i386__
# include "posix_types_32.h"
# else
# include "posix_types_64.h"
diff --git a/include/asm-x86/processor.h b/include/asm-x86/processor.h
index e6bf92d..2e7974e 100644
--- a/include/asm-x86/processor.h
+++ b/include/asm-x86/processor.h
@@ -118,7 +118,6 @@ struct cpuinfo_x86 {
#define X86_VENDOR_CYRIX 1
#define X86_VENDOR_AMD 2
#define X86_VENDOR_UMC 3
-#define X86_VENDOR_NEXGEN 4
#define X86_VENDOR_CENTAUR 5
#define X86_VENDOR_TRANSMETA 7
#define X86_VENDOR_NSC 8
@@ -723,6 +722,7 @@ static inline void __mwait(unsigned long eax, unsigned long ecx)
static inline void __sti_mwait(unsigned long eax, unsigned long ecx)
{
+ trace_hardirqs_on();
/* "mwait %eax, %ecx;" */
asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
:: "a" (eax), "c" (ecx));
diff --git a/include/asm-x86/ptrace.h b/include/asm-x86/ptrace.h
index 24ec061..9f922b0 100644
--- a/include/asm-x86/ptrace.h
+++ b/include/asm-x86/ptrace.h
@@ -231,6 +231,8 @@ extern int do_get_thread_area(struct task_struct *p, int idx,
extern int do_set_thread_area(struct task_struct *p, int idx,
struct user_desc __user *info, int can_allocate);
+#define __ARCH_WANT_COMPAT_SYS_PTRACE
+
#endif /* __KERNEL__ */
#endif /* !__ASSEMBLY__ */
diff --git a/include/asm-x86/reboot.h b/include/asm-x86/reboot.h
index 6b5233b..e63741f 100644
--- a/include/asm-x86/reboot.h
+++ b/include/asm-x86/reboot.h
@@ -15,5 +15,7 @@ struct machine_ops {
extern struct machine_ops machine_ops;
void machine_real_restart(unsigned char *code, int length);
+void native_machine_crash_shutdown(struct pt_regs *regs);
+void native_machine_shutdown(void);
#endif /* _ASM_REBOOT_H */
diff --git a/include/asm-x86/rio.h b/include/asm-x86/rio.h
index 3451c57..c9448bd 100644
--- a/include/asm-x86/rio.h
+++ b/include/asm-x86/rio.h
@@ -60,15 +60,4 @@ enum {
ALT_CALGARY = 5, /* Second Planar Calgary */
};
-/*
- * there is a real-mode segmented pointer pointing to the
- * 4K EBDA area at 0x40E.
- */
-static inline unsigned long get_bios_ebda(void)
-{
- unsigned long address = *(unsigned short *)phys_to_virt(0x40EUL);
- address <<= 4;
- return address;
-}
-
#endif /* __ASM_RIO_H */
diff --git a/include/asm-x86/unistd.h b/include/asm-x86/unistd.h
index effc7ad..2a58ed3 100644
--- a/include/asm-x86/unistd.h
+++ b/include/asm-x86/unistd.h
@@ -1,5 +1,11 @@
#ifdef __KERNEL__
-# if defined(CONFIG_X86_32) || defined(__i386__)
+# ifdef CONFIG_X86_32
+# include "unistd_32.h"
+# else
+# include "unistd_64.h"
+# endif
+#else
+# ifdef __i386__
# include "unistd_32.h"
# else
# include "unistd_64.h"