aboutsummaryrefslogtreecommitdiffstats
path: root/target-i386
diff options
context:
space:
mode:
authorJun Nakajima <jun.nakajima@intel.com>2011-12-17 19:22:12 -0800
committerJiang, Yunhong <yunhong.jiang@intel.com>2012-01-17 06:15:11 +0800
commite4a3c7801e0075a49674c79972394ad962b338f2 (patch)
treec8f2463b26574e3b07cd1d5fa1b02820575d34e1 /target-i386
parenta381ef07088ce479610129e37bfef42538f397da (diff)
downloadexternal_qemu-e4a3c7801e0075a49674c79972394ad962b338f2.zip
external_qemu-e4a3c7801e0075a49674c79972394ad962b338f2.tar.gz
external_qemu-e4a3c7801e0075a49674c79972394ad962b338f2.tar.bz2
New files to add HAX support
QEMU emulator interacts with the HAX kernel module. A HAX (Hardware-based Accelerated eXecution) kernel module is required to use HAX support. Most guest instructions run in VMX non-root (i.e. in hardware) mode and achieve near-native (relative to the host) performance. QEMU still emulates PIO/MMIO instructions and non-PG (paging) mode operations. HAX is supported only on Mac OS X and Windows hosts when Intel VT is present. Change-Id: I8dd52a35e315437dc568f555742bb8ab7e9d8ab2 Signed-off-by: Zhang, Xiantao <xiantao.zhang@intel.com> Signed-off-by: Xin, Xiaohui <xiaohui.xin@intel.com> Signed-off-by: Jiang Yunhong <yunhong.jiang@intel.com> Signed-off-by: Nakajima, Jun <jun.nakajima@intel.com>
Diffstat (limited to 'target-i386')
-rw-r--r--target-i386/hax-all.c1002
-rw-r--r--target-i386/hax-darwin.c290
-rw-r--r--target-i386/hax-darwin.h76
-rw-r--r--target-i386/hax-i386.h90
-rw-r--r--target-i386/hax-interface.h350
-rw-r--r--target-i386/hax-windows.c466
-rw-r--r--target-i386/hax-windows.h65
7 files changed, 2339 insertions, 0 deletions
diff --git a/target-i386/hax-all.c b/target-i386/hax-all.c
new file mode 100644
index 0000000..18d65d0
--- /dev/null
+++ b/target-i386/hax-all.c
@@ -0,0 +1,1002 @@
+/*
+** Copyright (c) 2011, Intel Corporation
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+*/
+
+/*
+ * HAX common code for both Windows and Darwin
+ * Some portion of code from KVM is used in this file.
+ */
+
+#include "target-i386/hax-i386.h"
+
+#define HAX_EMUL_ONE 0x1
+#define HAX_EMUL_REAL 0x2
+#define HAX_EMUL_HLT 0x4
+#define HAX_EMUL_EXITLOOP 0x5
+
+#define HAX_EMULATE_STATE_MMIO 0x1
+#define HAX_EMULATE_STATE_REAL 0x2
+#define HAX_EMULATE_STATE_NONE 0x3
+#define HAX_EMULATE_STATE_INITIAL 0x4
+
+struct hax_state hax_global;
+
+int hax_support = -1;
+
+/* Called after hax_init */
+int hax_enabled()
+{
+ return (!hax_disabled && hax_support);
+}
+
+/* Currently non-PG modes are emulated by QEMU */
+int hax_vcpu_emulation_mode(CPUState *env)
+{
+ return !(env->cr[0] & CR0_PG_MASK);
+}
+
+static int hax_prepare_emulation(CPUState *env)
+{
+ /* Flush all emulation states */
+ tlb_flush(env, 1);
+ tb_flush(env);
+ /* Sync the vcpu state from hax kernel module */
+ hax_vcpu_sync_state(env, 0);
+ return 0;
+}
+
+/*
+ * Check whether to break the translation block loop
+ * Break tbloop after one MMIO emulation, or after finish emulation mode
+ */
+static int hax_stop_tbloop(CPUState *env)
+{
+ switch (env->hax_vcpu->emulation_state)
+ {
+ case HAX_EMULATE_STATE_MMIO:
+ return 1;
+ case HAX_EMULATE_STATE_INITIAL:
+ case HAX_EMULATE_STATE_REAL:
+ if (!hax_vcpu_emulation_mode(env))
+ return 1;
+ break;
+ default:
+ dprint("Invalid emulation state in hax_sto_tbloop state %x\n",
+ env->hax_vcpu->emulation_state);
+ break;
+ }
+
+ return 0;
+}
+
+int hax_stop_emulation(CPUState *env)
+{
+ if (hax_stop_tbloop(env))
+ {
+ env->hax_vcpu->emulation_state = HAX_EMULATE_STATE_NONE;
+ /*
+ * QEMU emulation changes vcpu state,
+ * Sync the vcpu state to HAX kernel module
+ */
+ hax_vcpu_sync_state(env, 1);
+ return 1;
+ }
+
+ return 0;
+}
+
+int hax_stop_translate(CPUState *env)
+{
+ struct hax_vcpu_state *vstate;
+
+ vstate = env->hax_vcpu;
+ assert(vstate->emulation_state);
+ if (vstate->emulation_state == HAX_EMULATE_STATE_MMIO )
+ return 1;
+
+ return 0;
+}
+
+int valid_hax_tunnel_size(uint16_t size)
+{
+ return size >= sizeof(struct hax_tunnel);
+}
+
+hax_fd hax_vcpu_get_fd(CPUState *env)
+{
+ struct hax_vcpu_state *vcpu = env->hax_vcpu;
+ if (!vcpu)
+ return HAX_INVALID_FD;
+ return vcpu->fd;
+}
+
+/* Current version */
+uint32_t hax_cur_version = 0x1;
+/* Least HAX kernel version */
+uint32_t hax_lest_version = 0x1;
+
+static int hax_version_support(struct hax_state *hax)
+{
+ int ret;
+ struct hax_module_version version;
+
+ ret = hax_mod_version(hax, &version);
+ if (ret < 0)
+ return 0;
+
+ if ( (hax_lest_version > version.cur_version) ||
+ (hax_cur_version < version.compat_version) )
+ return 0;
+
+ return 1;
+}
+
+int hax_vcpu_create(int id)
+{
+ struct hax_vcpu_state *vcpu = NULL;
+ int ret;
+
+ if (!hax_global.vm)
+ {
+ dprint("vcpu %x created failed, vm is null\n", id);
+ return -1;
+ }
+
+ if (hax_global.vm->vcpus[id])
+ {
+ dprint("vcpu %x allocated already\n", id);
+ return 0;
+ }
+
+ vcpu = qemu_malloc(sizeof(struct hax_vcpu_state));
+ if (!vcpu)
+ {
+ dprint("Failed to alloc vcpu state\n");
+ return -ENOMEM;
+ }
+
+ memset(vcpu, 0, sizeof(struct hax_vcpu_state));
+
+ ret = hax_host_create_vcpu(hax_global.vm->fd, id);
+ if (ret)
+ {
+ dprint("Failed to create vcpu %x\n", id);
+ goto error;
+ }
+
+ vcpu->fd = hax_host_open_vcpu(hax_global.vm->id, id);
+ if (hax_invalid_fd(vcpu->fd))
+ {
+ dprint("Failed to open the vcpu\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ hax_global.vm->vcpus[id] = vcpu;
+
+ ret = hax_host_setup_vcpu_channel(vcpu);
+ if (ret)
+ {
+ dprint("Invalid HAX tunnel size \n");
+ ret = -EINVAL;
+ goto error;
+ }
+ return 0;
+
+error:
+ /* vcpu and tunnel will be closed automatically */
+ if (vcpu && !hax_invalid_fd(vcpu->fd))
+ hax_close_fd(vcpu->fd);
+
+ hax_global.vm->vcpus[id] = NULL;
+ qemu_free(vcpu);
+ return -1;
+}
+
+int hax_vcpu_destroy(CPUState *env)
+{
+ struct hax_vcpu_state *vcpu = env->hax_vcpu;
+
+ if (!hax_global.vm)
+ {
+ dprint("vcpu %x destroy failed, vm is null\n", vcpu->vcpu_id);
+ return -1;
+ }
+
+ if (!vcpu)
+ return 0;
+
+ /*
+ * 1. The hax_tunnel is also destroyed at vcpu_destroy
+ * 2. hax_close_fd will require the HAX kernel module to free vcpu
+ */
+ hax_close_fd(vcpu->fd);
+ hax_global.vm->vcpus[vcpu->vcpu_id] = NULL;
+ qemu_free(vcpu);
+ return 0;
+}
+
+int hax_init_vcpu(CPUState *env)
+{
+ int ret;
+
+ ret = hax_vcpu_create(env->cpu_index);
+ if (ret < 0)
+ {
+ dprint("Failed to create HAX vcpu\n");
+ exit(-1);
+ }
+
+ env->hax_vcpu = hax_global.vm->vcpus[env->cpu_index];
+ env->hax_vcpu->emulation_state = HAX_EMULATE_STATE_INITIAL;
+
+ return ret;
+}
+
+struct hax_vm *hax_vm_create(struct hax_state *hax)
+{
+ struct hax_vm *vm;
+ int vm_id = 0, ret;
+ char *vm_name = NULL;
+
+ if (hax_invalid_fd(hax->fd))
+ return NULL;
+
+ if (hax->vm)
+ return hax->vm;
+
+ vm = qemu_malloc(sizeof(struct hax_vm));
+ if (!vm)
+ return NULL;
+ memset(vm, 0, sizeof(struct hax_vm));
+
+ ret = hax_host_create_vm(hax, vm_id);
+ if (ret) {
+ dprint("Failed to create vm %x\n", ret);
+ goto error;
+ }
+ vm->id = vm_id;
+ vm->fd = hax_host_open_vm(hax, vm_id);
+ if (hax_invalid_fd(vm->fd))
+ {
+ dprint("Open vm device error:%s\n", vm_name);
+ goto error;
+ }
+
+ hax->vm = vm;
+ return vm;
+
+error:
+ qemu_free(vm);
+ hax->vm = NULL;
+ return NULL;
+}
+
+int hax_vm_destroy(struct hax_vm *vm)
+{
+ int i;
+
+ for (i = 0; i < HAX_MAX_VCPU; i++)
+ if (vm->vcpus[i])
+ {
+ dprint("VCPU should be cleaned before vm clean\n");
+ return -1;
+ }
+ hax_close_fd(vm->fd);
+ qemu_free(vm);
+ hax_global.vm = NULL;
+ return 0;
+}
+
+int hax_init(int smp_cpus)
+{
+ struct hax_state *hax = NULL;
+ int ret;
+
+ hax_support = 0;
+
+ hax = &hax_global;
+ memset(hax, 0, sizeof(struct hax_state));
+
+ hax->fd = hax_mod_open();
+ if (hax_invalid_fd(hax->fd))
+ {
+ hax->fd = 0;
+ ret = -ENODEV;
+ goto error;
+ }
+
+ if (!hax_version_support(hax))
+ {
+ dprint("Incompatible HAX version. Qemu current version %x ", hax_cur_version );
+ dprint("requires least HAX version %x\n", hax_lest_version);
+ ret = -EINVAL;
+ goto error;
+ }
+
+ hax->vm = hax_vm_create(hax);
+ if (!hax->vm)
+ {
+ dprint("Failed to create HAX VM\n");
+ ret = -EINVAL;
+ goto error;
+ }
+
+ hax_support = 1;
+ qemu_register_reset( hax_reset_vcpu_state, 0, NULL);
+
+ return 0;
+error:
+ if (hax->vm)
+ hax_vm_destroy(hax->vm);
+ if (hax->fd)
+ hax_mod_close(hax);
+
+ return ret;
+}
+
+int hax_handle_io(CPUState *env, uint32_t df, uint16_t port, int direction,
+ int size, int count, void *buffer)
+{
+ uint8_t *ptr;
+ int i;
+
+ if (!df)
+ ptr = (uint8_t *)buffer;
+ else
+ ptr = buffer + size * count - size;
+ for (i = 0; i < count; i++)
+ {
+ if (direction == HAX_EXIT_IO_IN) {
+ switch (size) {
+ case 1:
+ stb_p(ptr, cpu_inb(port));
+ break;
+ case 2:
+ stw_p(ptr, cpu_inw(port));
+ break;
+ case 4:
+ stl_p(ptr, cpu_inl(port));
+ break;
+ }
+ } else {
+ switch (size) {
+ case 1:
+ cpu_outb(port, ldub_p(ptr));
+ break;
+ case 2:
+ cpu_outw(port, lduw_p(ptr));
+ break;
+ case 4:
+ cpu_outl(port, ldl_p(ptr));
+ break;
+ }
+ }
+ if (!df)
+ ptr += size;
+ else
+ ptr -= size;
+ }
+
+ return 0;
+}
+
+static int hax_vcpu_interrupt(CPUState *env)
+{
+ struct hax_vcpu_state *vcpu = env->hax_vcpu;
+ struct hax_tunnel *ht = vcpu->tunnel;
+
+ /*
+ * Try to inject an interrupt if the guest can accept it
+ * Unlike KVM, the HAX kernel module checks the eflags, instead.
+ */
+ if (ht->ready_for_interrupt_injection &&
+ (env->interrupt_request & CPU_INTERRUPT_HARD))
+ {
+ int irq;
+
+ env->interrupt_request &= ~CPU_INTERRUPT_HARD;
+ irq = cpu_get_pic_interrupt(env);
+ if (irq >= 0) {
+ hax_inject_interrupt(env, irq);
+ }
+ }
+
+ /*
+ * If we have an interrupt pending but the guest is not ready to
+ * receive it, request an interrupt window exit. This will cause
+ * a return to userspace as soon as the guest is ready to receive
+ * an interrupt.
+ */
+ if ((env->interrupt_request & CPU_INTERRUPT_HARD))
+ ht->request_interrupt_window = 1;
+ else
+ ht->request_interrupt_window = 0;
+ return 0;
+}
+
+void hax_raise_event(CPUState *env)
+{
+ struct hax_vcpu_state *vcpu = env->hax_vcpu;
+
+ if (!vcpu)
+ return;
+ vcpu->tunnel->user_event_pending = 1;
+}
+
+/*
+ * Request the HAX kernel module to run the CPU for us until one of
+ * the following occurs:
+ * 1. Guest crashes or is shut down
+ * 2. We need QEMU's emulation like when the guest executes a MMIO
+ * instruction or guest enters emulation mode (non-PG mode)
+ * 3. Guest executes HLT
+ * 4. Qemu has Signal/event pending
+ * 5. An unknown VMX-exit happens
+ */
+extern void qemu_system_reset_request(void);
+static int hax_vcpu_hax_exec(CPUState *env)
+{
+ int ret = 0;
+ struct hax_vcpu_state *vcpu = env->hax_vcpu;
+ struct hax_tunnel *ht = vcpu->tunnel;
+
+ if (hax_vcpu_emulation_mode(env))
+ {
+ dprint("Trying to vcpu execute at eip:%lx\n", env->eip);
+ return HAX_EMUL_EXITLOOP;
+ }
+
+ do {
+ int hax_ret;
+
+ if (env->exit_request) {
+ ret = HAX_EMUL_EXITLOOP ;
+ break;
+ }
+
+ hax_vcpu_interrupt(env);
+
+ hax_ret = hax_vcpu_run(vcpu);
+
+ /* Simply continue the vcpu_run if system call interrupted */
+ if (hax_ret == -EINTR || hax_ret == -EAGAIN) {
+ dprint("io window interrupted\n");
+ continue;
+ }
+
+ if (hax_ret < 0)
+ {
+ dprint("vcpu run failed for vcpu %x\n", vcpu->vcpu_id);
+ abort();
+ }
+ switch (ht->_exit_status)
+ {
+ case HAX_EXIT_IO:
+ {
+ ret = hax_handle_io(env, ht->pio._df, ht->pio._port,
+ ht->pio._direction,
+ ht->pio._size, ht->pio._count, vcpu->iobuf);
+ }
+ break;
+ case HAX_EXIT_MMIO:
+ ret = HAX_EMUL_ONE;
+ break;
+ case HAX_EXIT_REAL:
+ ret = HAX_EMUL_REAL;
+ break;
+ /* Guest state changed, currently only for shutdown */
+ case HAX_EXIT_STATECHANGE:
+ dprint("VCPU shutdown request\n");
+ qemu_system_reset_request();
+ hax_prepare_emulation(env);
+ cpu_dump_state(env, stderr, fprintf, 0);
+ ret = HAX_EMUL_EXITLOOP;
+ break;
+ case HAX_EXIT_UNKNOWN_VMEXIT:
+ dprint("Unknown VMX exit %x from guest\n", ht->_exit_reason);
+ qemu_system_reset_request();
+ hax_prepare_emulation(env);
+ cpu_dump_state(env, stderr, fprintf, 0);
+ ret = HAX_EMUL_EXITLOOP;
+ break;
+ case HAX_EXIT_HLT:
+ if (!(env->interrupt_request & CPU_INTERRUPT_HARD) &&
+ !(env->interrupt_request & CPU_INTERRUPT_NMI)) {
+ /* hlt instruction with interrupt disabled is shutdown */
+ env->eflags |= IF_MASK;
+ env->halted = 1;
+ env->exception_index = EXCP_HLT;
+ ret = HAX_EMUL_HLT;
+ }
+ break;
+ /* these situation will continue to hax module */
+ case HAX_EXIT_INTERRUPT:
+ case HAX_EXIT_PAUSED:
+ break;
+ default:
+ dprint("Unknow exit %x from hax\n", ht->_exit_status);
+ qemu_system_reset_request();
+ hax_prepare_emulation(env);
+ cpu_dump_state(env, stderr, fprintf, 0);
+ ret = HAX_EMUL_EXITLOOP;
+ break;
+ }
+ }while (!ret);
+
+ if (env->exit_request) {
+ env->exit_request = 0;
+ env->exception_index = EXCP_INTERRUPT;
+ }
+ return ret;
+}
+
+/*
+ * return 1 when need to emulate, 0 when need to exit loop
+ */
+int hax_vcpu_exec(CPUState *env)
+{
+ int next = 0, ret = 0;
+ struct hax_vcpu_state *vcpu;
+
+ if (env->hax_vcpu->emulation_state != HAX_EMULATE_STATE_NONE)
+ return 1;
+
+ vcpu = env->hax_vcpu;
+ next = hax_vcpu_hax_exec(env);
+ switch (next)
+ {
+ case HAX_EMUL_ONE:
+ ret = 1;
+ env->hax_vcpu->emulation_state = HAX_EMULATE_STATE_MMIO;
+ hax_prepare_emulation(env);
+ break;
+ case HAX_EMUL_REAL:
+ ret = 1;
+ env->hax_vcpu->emulation_state =
+ HAX_EMULATE_STATE_REAL;
+ hax_prepare_emulation(env);
+ break;
+ case HAX_EMUL_HLT:
+ case HAX_EMUL_EXITLOOP:
+ break;
+ default:
+ dprint("Unknown hax vcpu exec return %x\n", next);
+ abort();
+ }
+
+ return ret;
+}
+
+#define HAX_RAM_INFO_ROM 0x1
+
+static void set_v8086_seg(struct segment_desc_t *lhs, const SegmentCache *rhs)
+{
+ memset(lhs, 0, sizeof(struct segment_desc_t ));
+ lhs->selector = rhs->selector;
+ lhs->base = rhs->base;
+ lhs->limit = rhs->limit;
+ lhs->type = 3;
+ lhs->present = 1;
+ lhs->dpl = 3;
+ lhs->operand_size = 0;
+ lhs->desc = 1;
+ lhs->long_mode = 0;
+ lhs->granularity = 0;
+ lhs->available = 0;
+}
+
+static void get_seg(SegmentCache *lhs, const struct segment_desc_t *rhs)
+{
+ lhs->selector = rhs->selector;
+ lhs->base = rhs->base;
+ lhs->limit = rhs->limit;
+ lhs->flags =
+ (rhs->type << DESC_TYPE_SHIFT)
+ | (rhs->present * DESC_P_MASK)
+ | (rhs->dpl << DESC_DPL_SHIFT)
+ | (rhs->operand_size << DESC_B_SHIFT)
+ | (rhs->desc * DESC_S_MASK)
+ | (rhs->long_mode << DESC_L_SHIFT)
+ | (rhs->granularity * DESC_G_MASK)
+ | (rhs->available * DESC_AVL_MASK);
+}
+
+static void set_seg(struct segment_desc_t *lhs, const SegmentCache *rhs)
+{
+ unsigned flags = rhs->flags;
+
+ memset(lhs, 0, sizeof(struct segment_desc_t));
+ lhs->selector = rhs->selector;
+ lhs->base = rhs->base;
+ lhs->limit = rhs->limit;
+ lhs->type = (flags >> DESC_TYPE_SHIFT) & 15;
+ lhs->present = (flags & DESC_P_MASK) != 0;
+ lhs->dpl = rhs->selector & 3;
+ lhs->operand_size = (flags >> DESC_B_SHIFT) & 1;
+ lhs->desc = (flags & DESC_S_MASK) != 0;
+ lhs->long_mode = (flags >> DESC_L_SHIFT) & 1;
+ lhs->granularity = (flags & DESC_G_MASK) != 0;
+ lhs->available = (flags & DESC_AVL_MASK) != 0;
+}
+
+static void hax_getput_reg(uint64_t *hax_reg, target_ulong *qemu_reg, int set)
+{
+ target_ulong reg = *hax_reg;
+
+ if (set)
+ *hax_reg = *qemu_reg;
+ else
+ *qemu_reg = reg;
+}
+
+/* The sregs has been synced with HAX kernel already before this call */
+static int hax_get_segments(CPUState *env, struct vcpu_state_t *sregs)
+{
+ get_seg(&env->segs[R_CS], &sregs->_cs);
+ get_seg(&env->segs[R_DS], &sregs->_ds);
+ get_seg(&env->segs[R_ES], &sregs->_es);
+ get_seg(&env->segs[R_FS], &sregs->_fs);
+ get_seg(&env->segs[R_GS], &sregs->_gs);
+ get_seg(&env->segs[R_SS], &sregs->_ss);
+
+ get_seg(&env->tr, &sregs->_tr);
+ get_seg(&env->ldt, &sregs->_ldt);
+ env->idt.limit = sregs->_idt.limit;
+ env->idt.base = sregs->_idt.base;
+ env->gdt.limit = sregs->_gdt.limit;
+ env->gdt.base = sregs->_gdt.base;
+ return 0;
+}
+
+static int hax_set_segments(CPUState *env, struct vcpu_state_t *sregs)
+{
+ if ((env->eflags & VM_MASK)) {
+ set_v8086_seg(&sregs->_cs, &env->segs[R_CS]);
+ set_v8086_seg(&sregs->_ds, &env->segs[R_DS]);
+ set_v8086_seg(&sregs->_es, &env->segs[R_ES]);
+ set_v8086_seg(&sregs->_fs, &env->segs[R_FS]);
+ set_v8086_seg(&sregs->_gs, &env->segs[R_GS]);
+ set_v8086_seg(&sregs->_ss, &env->segs[R_SS]);
+ } else {
+ set_seg(&sregs->_cs, &env->segs[R_CS]);
+ set_seg(&sregs->_ds, &env->segs[R_DS]);
+ set_seg(&sregs->_es, &env->segs[R_ES]);
+ set_seg(&sregs->_fs, &env->segs[R_FS]);
+ set_seg(&sregs->_gs, &env->segs[R_GS]);
+ set_seg(&sregs->_ss, &env->segs[R_SS]);
+
+ if (env->cr[0] & CR0_PE_MASK) {
+ /* force ss cpl to cs cpl */
+ sregs->_ss.selector = (sregs->_ss.selector & ~3) |
+ (sregs->_cs.selector & 3);
+ sregs->_ss.dpl = sregs->_ss.selector & 3;
+ }
+ }
+
+ set_seg(&sregs->_tr, &env->tr);
+ set_seg(&sregs->_ldt, &env->ldt);
+ sregs->_idt.limit = env->idt.limit;
+ sregs->_idt.base = env->idt.base;
+ sregs->_gdt.limit = env->gdt.limit;
+ sregs->_gdt.base = env->gdt.base;
+ return 0;
+}
+
+/*
+ * After get the state from the kernel module, some
+ * qemu emulator state need be updated also
+ */
+static int hax_setup_qemu_emulator(CPUState *env)
+{
+
+#define HFLAG_COPY_MASK ~( \
+ HF_CPL_MASK | HF_PE_MASK | HF_MP_MASK | HF_EM_MASK | \
+ HF_TS_MASK | HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK | \
+ HF_OSFXSR_MASK | HF_LMA_MASK | HF_CS32_MASK | \
+ HF_SS32_MASK | HF_CS64_MASK | HF_ADDSEG_MASK)
+
+ uint32_t hflags;
+
+ hflags = (env->segs[R_CS].flags >> DESC_DPL_SHIFT) & HF_CPL_MASK;
+ hflags |= (env->cr[0] & CR0_PE_MASK) << (HF_PE_SHIFT - CR0_PE_SHIFT);
+ hflags |= (env->cr[0] << (HF_MP_SHIFT - CR0_MP_SHIFT)) &
+ (HF_MP_MASK | HF_EM_MASK | HF_TS_MASK);
+ hflags |= (env->eflags & (HF_TF_MASK | HF_VM_MASK | HF_IOPL_MASK));
+ hflags |= (env->cr[4] & CR4_OSFXSR_MASK) <<
+ (HF_OSFXSR_SHIFT - CR4_OSFXSR_SHIFT);
+
+ if (env->efer & MSR_EFER_LMA) {
+ hflags |= HF_LMA_MASK;
+ }
+
+ if ((hflags & HF_LMA_MASK) && (env->segs[R_CS].flags & DESC_L_MASK)) {
+ hflags |= HF_CS32_MASK | HF_SS32_MASK | HF_CS64_MASK;
+ } else {
+ hflags |= (env->segs[R_CS].flags & DESC_B_MASK) >>
+ (DESC_B_SHIFT - HF_CS32_SHIFT);
+ hflags |= (env->segs[R_SS].flags & DESC_B_MASK) >>
+ (DESC_B_SHIFT - HF_SS32_SHIFT);
+ if (!(env->cr[0] & CR0_PE_MASK) ||
+ (env->eflags & VM_MASK) ||
+ !(hflags & HF_CS32_MASK)) {
+ hflags |= HF_ADDSEG_MASK;
+ } else {
+ hflags |= ((env->segs[R_DS].base |
+ env->segs[R_ES].base |
+ env->segs[R_SS].base) != 0) <<
+ HF_ADDSEG_SHIFT;
+ }
+ }
+ env->hflags = (env->hflags & HFLAG_COPY_MASK) | hflags;
+ return 0;
+}
+
+static int hax_sync_vcpu_register(CPUState *env, int set)
+{
+ struct vcpu_state_t regs;
+ int ret;
+ memset(&regs, 0, sizeof(struct vcpu_state_t));
+
+ if (!set)
+ {
+ ret = hax_sync_vcpu_state(env, &regs, 0);
+ if (ret < 0)
+ return -1;
+ }
+
+ /*generic register */
+ hax_getput_reg(&regs._rax, &env->regs[R_EAX], set);
+ hax_getput_reg(&regs._rbx, &env->regs[R_EBX], set);
+ hax_getput_reg(&regs._rcx, &env->regs[R_ECX], set);
+ hax_getput_reg(&regs._rdx, &env->regs[R_EDX], set);
+ hax_getput_reg(&regs._rsi, &env->regs[R_ESI], set);
+ hax_getput_reg(&regs._rdi, &env->regs[R_EDI], set);
+ hax_getput_reg(&regs._rsp, &env->regs[R_ESP], set);
+ hax_getput_reg(&regs._rbp, &env->regs[R_EBP], set);
+
+ hax_getput_reg(&regs._rflags, &env->eflags, set);
+ hax_getput_reg(&regs._rip, &env->eip, set);
+
+ if (set)
+ {
+
+ regs._cr0 = env->cr[0];
+ regs._cr2 = env->cr[2];
+ regs._cr3 = env->cr[3];
+ regs._cr4 = env->cr[4];
+ hax_set_segments(env, &regs);
+ }
+ else
+ {
+ env->cr[0] = regs._cr0;
+ env->cr[2] = regs._cr2;
+ env->cr[3] = regs._cr3;
+ env->cr[4] = regs._cr4;
+ hax_get_segments(env, &regs);
+ }
+
+ if (set)
+ {
+ ret = hax_sync_vcpu_state(env, &regs, 1);
+ if (ret < 0)
+ return -1;
+ }
+ if (!set)
+ hax_setup_qemu_emulator(env);
+ return 0;
+}
+
+static void hax_msr_entry_set(struct vmx_msr *item,
+ uint32_t index, uint64_t value)
+{
+ item->entry = index;
+ item->value = value;
+}
+
+static int hax_get_msrs(CPUState *env)
+{
+ struct hax_msr_data md;
+ struct vmx_msr *msrs = md.entries;
+ int ret, i, n;
+
+ n = 0;
+ msrs[n++].entry = MSR_IA32_SYSENTER_CS;
+ msrs[n++].entry = MSR_IA32_SYSENTER_ESP;
+ msrs[n++].entry = MSR_IA32_SYSENTER_EIP;
+ msrs[n++].entry = MSR_IA32_TSC;
+ md.nr_msr = n;
+ ret = hax_sync_msr(env, &md, 0);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < md.done; i++) {
+ switch (msrs[i].entry) {
+ case MSR_IA32_SYSENTER_CS:
+ env->sysenter_cs = msrs[i].value;
+ break;
+ case MSR_IA32_SYSENTER_ESP:
+ env->sysenter_esp = msrs[i].value;
+ break;
+ case MSR_IA32_SYSENTER_EIP:
+ env->sysenter_eip = msrs[i].value;
+ break;
+ case MSR_IA32_TSC:
+ env->tsc = msrs[i].value;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+static int hax_set_msrs(CPUState *env)
+{
+ struct hax_msr_data md;
+ struct vmx_msr *msrs;
+ msrs = md.entries;
+ int n = 0;
+
+ memset(&md, 0, sizeof(struct hax_msr_data));
+ hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_CS, env->sysenter_cs);
+ hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_ESP, env->sysenter_esp);
+ hax_msr_entry_set(&msrs[n++], MSR_IA32_SYSENTER_EIP, env->sysenter_eip);
+ hax_msr_entry_set(&msrs[n++], MSR_IA32_TSC, env->tsc);
+ md.nr_msr = n;
+ md.done = 0;
+
+ return hax_sync_msr(env, &md, 1);
+
+}
+
+static int hax_get_fpu(CPUState *env)
+{
+ struct fx_layout fpu;
+ int i, ret;
+
+ ret = hax_sync_fpu(env, &fpu, 0);
+ if (ret < 0)
+ return ret;
+
+ env->fpstt = (fpu.fsw >> 11) & 7;
+ env->fpus = fpu.fsw;
+ env->fpuc = fpu.fcw;
+ for (i = 0; i < 8; ++i)
+ env->fptags[i] = !((fpu.ftw >> i) & 1);
+ memcpy(env->fpregs, fpu.st_mm, sizeof(env->fpregs));
+
+ memcpy(env->xmm_regs, fpu.mmx_1, sizeof(fpu.mmx_1));
+ memcpy((XMMReg *)(env->xmm_regs) + 8, fpu.mmx_2, sizeof(fpu.mmx_2));
+ env->mxcsr = fpu.mxcsr;
+
+ return 0;
+}
+
+static int hax_set_fpu(CPUState *env)
+{
+ struct fx_layout fpu;
+ int i;
+
+ memset(&fpu, 0, sizeof(fpu));
+ fpu.fsw = env->fpus & ~(7 << 11);
+ fpu.fsw |= (env->fpstt & 7) << 11;
+ fpu.fcw = env->fpuc;
+
+ for (i = 0; i < 8; ++i)
+ fpu.ftw |= (!env->fptags[i]) << i;
+
+ memcpy(fpu.st_mm, env->fpregs, sizeof (env->fpregs));
+ memcpy(fpu.mmx_1, env->xmm_regs, sizeof (fpu.mmx_1));
+ memcpy(fpu.mmx_2, (XMMReg *)(env->xmm_regs) + 8, sizeof (fpu.mmx_2));
+
+ fpu.mxcsr = env->mxcsr;
+
+ return hax_sync_fpu(env, &fpu, 1);
+}
+
+int hax_arch_get_registers(CPUState *env)
+{
+ int ret;
+
+ ret = hax_sync_vcpu_register(env, 0);
+ if (ret < 0)
+ return ret;
+
+ ret = hax_get_fpu(env);
+ if (ret < 0)
+ return ret;
+
+ ret = hax_get_msrs(env);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int hax_arch_set_registers(CPUState *env)
+{
+ int ret;
+ ret = hax_sync_vcpu_register(env, 1);
+
+ if (ret < 0)
+ {
+ dprint("Failed to sync vcpu reg\n");
+ return ret;
+ }
+ ret = hax_set_fpu(env);
+ if (ret < 0)
+ {
+ dprint("FPU failed\n");
+ return ret;
+ }
+ ret = hax_set_msrs(env);
+ if (ret < 0)
+ {
+ dprint("MSR failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+void hax_vcpu_sync_state(CPUState *env, int modified)
+{
+ if (hax_enabled()) {
+ if (modified)
+ hax_arch_set_registers(env);
+ else
+ hax_arch_get_registers(env);
+ }
+}
+
+/*
+ * This is simpler than the one for KVM because we don't support
+ * direct I/O device assignment at this point.
+ */
+int hax_sync_vcpus(void)
+{
+ if (hax_enabled())
+ {
+ CPUState *env;
+
+ env = first_cpu;
+ if (!env)
+ return 0;
+
+ for (; env != NULL; env = env->next_cpu) {
+ int ret;
+
+ ret = hax_arch_set_registers(env);
+ if (ret < 0)
+ {
+ dprint("Failed to sync HAX vcpu context\n");
+ exit(1);
+ }
+ }
+ }
+
+ return 0;
+}
+
+void hax_reset_vcpu_state(void *opaque)
+{
+ CPUState *env;
+ for (env = first_cpu; env != NULL; env = env->next_cpu)
+ {
+ if (env->hax_vcpu)
+ {
+ env->hax_vcpu->emulation_state = HAX_EMULATE_STATE_INITIAL;
+ env->hax_vcpu->tunnel->user_event_pending = 0;
+ env->hax_vcpu->tunnel->ready_for_interrupt_injection = 0;
+ }
+ }
+}
diff --git a/target-i386/hax-darwin.c b/target-i386/hax-darwin.c
new file mode 100644
index 0000000..b6d27c3
--- /dev/null
+++ b/target-i386/hax-darwin.c
@@ -0,0 +1,290 @@
+/*
+** Copyright (c) 2011, Intel Corporation
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+*/
+
+/* HAX module interface - darwin version */
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/ioctl.h>
+
+#include "target-i386/hax-i386.h"
+hax_fd hax_mod_open(void)
+{
+ int fd = open("/dev/HAX", O_RDWR);
+
+ if (fd == -1)
+ {
+ dprint("Failed to open the hax module\n");
+ return -errno;
+ }
+
+ return fd;
+}
+
+int hax_populate_ram(uint64_t va, uint32_t size)
+{
+ int ret;
+ struct hax_alloc_ram_info info;
+
+ if (!hax_global.vm || !hax_global.vm->fd)
+ {
+ dprint("Allocate memory before vm create?\n");
+ return -EINVAL;
+ }
+
+ info.size = size;
+ info.va = va;
+ ret = ioctl(hax_global.vm->fd, HAX_VM_IOCTL_ALLOC_RAM, &info);
+ if (ret < 0)
+ {
+ dprint("Failed to allocate %x memory\n", size);
+ return ret;
+ }
+ return 0;
+}
+
+int hax_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size, ram_addr_t phys_offset)
+{
+ struct hax_set_ram_info info, *pinfo = &info;
+ int ret;
+ ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
+
+ /* We look for the RAM and ROM only */
+ if (flags >= IO_MEM_UNASSIGNED)
+ return 0;
+
+ if ( (start_addr & ~TARGET_PAGE_MASK) || (size & ~TARGET_PAGE_MASK))
+ {
+ dprint("set_phys_mem %x %lx requires page aligned addr and size\n", start_addr, size);
+ exit(1);
+ return -1;
+ }
+
+ info.pa_start = start_addr;
+ info.size = size;
+ info.va = (uint64_t)qemu_get_ram_ptr(phys_offset);
+ info.flags = (flags & IO_MEM_ROM) ? 1 : 0;
+
+ ret = ioctl(hax_global.vm->fd, HAX_VM_IOCTL_SET_RAM, pinfo);
+ if (ret < 0)
+ {
+ dprint("has set phys mem failed\n");
+ exit(1);
+ }
+ return ret;
+}
+
+int hax_mod_version(struct hax_state *hax, struct hax_module_version *version)
+{
+ int ret;
+
+ ret = ioctl(hax->fd, HAX_IOCTL_VERSION, version);
+ if (ret == -1)
+ {
+ dprint("Failed to get HAX version\n");
+ return -errno;
+ }
+
+ return 0;
+}
+
+static char *hax_vm_devfs_string(int vm_id)
+{
+ char *name;
+
+ if (vm_id > MAX_VM_ID)
+ {
+ dprint("Too big VM id\n");
+ return NULL;
+ }
+
+ name = qemu_strdup("/dev/hax_vm/vmxx");
+ if (!name)
+ return NULL;
+ sprintf(name, "/dev/hax_vm/vm%02d", vm_id);
+
+ return name;
+}
+
+static char *hax_vcpu_devfs_string(int vm_id, int vcpu_id)
+{
+ char *name;
+
+ if (vm_id > MAX_VM_ID || vcpu_id > MAX_VCPU_ID)
+ {
+ dprint("Too big vm id %x or vcpu id %x\n", vm_id, vcpu_id);
+ return NULL;
+ }
+
+ name = qemu_strdup("/dev/hax_vmxx/vcpuyy");
+ if (!name)
+ return NULL;
+
+ sprintf(name, "/dev/hax_vm%02d/vcpu%02d", vm_id, vcpu_id);
+
+ return name;
+}
+
+int hax_host_create_vm(struct hax_state *hax, int vm_id)
+{
+ int ret;
+
+ if (hax_invalid_fd(hax->fd))
+ return -EINVAL;
+
+ if (hax->vm)
+ return 0;
+
+ ret = ioctl(hax->fd, HAX_IOCTL_CREATE_VM, &vm_id);
+
+ return ret;
+}
+
+hax_fd hax_host_open_vm(struct hax_state *hax, int vm_id)
+{
+ hax_fd fd;
+ char *vm_name = NULL;
+
+ vm_name = hax_vm_devfs_string(vm_id);
+ if (!vm_name)
+ return -1;
+
+ fd = open(vm_name, O_RDWR);
+ qemu_free(vm_name);
+
+ return fd;
+}
+
+/*
+ * Simply assume that the size should be bigger than the hax_tunnel,
+ * since the hax_tunnel can be extended later with backward
+ * compatibility.
+ */
+int hax_host_create_vcpu(hax_fd vm_fd, int vcpuid)
+{
+ int ret;
+
+ ret = ioctl(vm_fd, HAX_VM_IOCTL_VCPU_CREATE, &vcpuid);
+ if (ret < 0)
+ dprint("Failed to create vcpu %x\n", vcpuid);
+
+ return ret;
+}
+
+hax_fd hax_host_open_vcpu(int vmid, int vcpuid)
+{
+ char *devfs_path = NULL;
+ hax_fd fd;
+
+ devfs_path = hax_vcpu_devfs_string(vmid, vcpuid);
+ if (!devfs_path)
+ {
+ dprint("Failed to get the devfs\n");
+ return -EINVAL;
+ }
+
+ fd = open(devfs_path, O_RDWR);
+ qemu_free(devfs_path);
+ if (fd < 0)
+ dprint("Failed to open the vcpu devfs\n");
+ return fd;
+}
+
+int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu)
+{
+ int ret;
+ struct hax_tunnel_info info;
+
+ ret = ioctl(vcpu->fd, HAX_VCPU_IOCTL_SETUP_TUNNEL, &info);
+ if (ret)
+ {
+ dprint("Failed to setup the hax tunnel\n");
+ return ret;
+ }
+
+ if (!valid_hax_tunnel_size(info.size))
+ {
+ dprint("Invalid hax tunnel size %x\n", info.size);
+ ret = -EINVAL;
+ return ret;
+ }
+
+ vcpu->tunnel = (struct hax_tunnel *)(info.va);
+ vcpu->iobuf = (unsigned char *)(info.io_va);
+ return 0;
+}
+
+int hax_vcpu_run(struct hax_vcpu_state* vcpu)
+{
+ int ret;
+
+ ret = ioctl(vcpu->fd, HAX_VCPU_IOCTL_RUN, NULL);
+ return ret;
+}
+
+int hax_sync_fpu(CPUState *env, struct fx_layout *fl, int set)
+{
+ int ret, fd;
+
+ fd = hax_vcpu_get_fd(env);
+ if (fd <= 0)
+ return -1;
+
+ if (set)
+ ret = ioctl(fd, HAX_VCPU_IOCTL_SET_FPU, fl);
+ else
+ ret = ioctl(fd, HAX_VCPU_IOCTL_GET_FPU, fl);
+ return ret;
+}
+
+int hax_sync_msr(CPUState *env, struct hax_msr_data *msrs, int set)
+{
+ int ret, fd;
+
+ fd = hax_vcpu_get_fd(env);
+ if (fd <= 0)
+ return -1;
+ if (set)
+ ret = ioctl(fd, HAX_VCPU_IOCTL_SET_MSRS, msrs);
+ else
+ ret = ioctl(fd, HAX_VCPU_IOCTL_GET_MSRS, msrs);
+ return ret;
+}
+
+int hax_sync_vcpu_state(CPUState *env, struct vcpu_state_t *state, int set)
+{
+ int ret, fd;
+
+ fd = hax_vcpu_get_fd(env);
+ if (fd <= 0)
+ return -1;
+
+ if (set)
+ ret = ioctl(fd, HAX_VCPU_SET_REGS, state);
+ else
+ ret = ioctl(fd, HAX_VCPU_GET_REGS, state);
+ return ret;
+}
+
+int hax_inject_interrupt(CPUState *env, int vector)
+{
+ int ret, fd;
+
+ fd = hax_vcpu_get_fd(env);
+ if (fd <= 0)
+ return -1;
+
+ ret = ioctl(fd, HAX_VCPU_IOCTL_INTERRUPT, &vector);
+ return ret;
+}
diff --git a/target-i386/hax-darwin.h b/target-i386/hax-darwin.h
new file mode 100644
index 0000000..261cfd3
--- /dev/null
+++ b/target-i386/hax-darwin.h
@@ -0,0 +1,76 @@
+/*
+** Copyright (c) 2011, Intel Corporation
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+*/
+
+#ifndef __HAX_UNIX_H
+#define __HAX_UNIX_H
+
+#include <sys/types.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <stdarg.h>
+
+#define HAX_INVALID_FD (-1)
+static inline int hax_invalid_fd(hax_fd fd)
+{
+ return fd <= 0;
+}
+
+static inline void hax_mod_close(struct hax_state *hax)
+{
+ close(hax->fd);
+}
+
+static inline void hax_close_fd(hax_fd fd)
+{
+ close(fd);
+}
+
+/* HAX model level ioctl */
+/* Get API version the HAX driver supports */
+#define HAX_IOCTL_VERSION _IOWR(0, 0x20, struct hax_module_version)
+/* Create VM instance and return the vm_id */
+#define HAX_IOCTL_CREATE_VM _IOWR(0, 0x21, int)
+
+/* Pass down a VM_ID, create a VCPU instance for it */
+#define HAX_VM_IOCTL_VCPU_CREATE _IOR(0, 0x80, int)
+/*
+ * Allocate guest memory, the step of allocate guest memory is:
+ * 1. QEMU will allocate the virtual address to cover the guest memory ranges
+ * 2. QEMU passing down the virtual address and length in the
+ * HAX_VM_IOCTL_ALLOC_RAM ioctl through hax_alloc_ram_info structure
+ * 3. HAX driver populate physical memory for the virtual address range, and
+ * lock these physical memory lock, so that they will not be swapped out
+ * 4. HAX driver map the populated physical memory into kernel address space
+ */
+#define HAX_VM_IOCTL_ALLOC_RAM _IOWR(0, 0x81, struct hax_alloc_ram_info)
+/*
+ * Setup translation between guest physical address and host physical address
+ */
+#define HAX_VM_IOCTL_SET_RAM _IOWR(0, 0x82, struct hax_set_ram_info)
+/* Run the guest in non-root mode */
+#define HAX_VCPU_IOCTL_RUN _IO(0, 0xc0)
+/* Sync QEMU's guest MSR value to HAX driver */
+#define HAX_VCPU_IOCTL_SET_MSRS _IOWR(0, 0xc1, struct hax_msr_data)
+/* Sync HAX driver's guest MSR value to QEMU */
+#define HAX_VCPU_IOCTL_GET_MSRS _IOWR(0, 0xc2, struct hax_msr_data)
+#define HAX_VCPU_IOCTL_SET_FPU _IOW(0, 0xc3, struct fx_layout)
+#define HAX_VCPU_IOCTL_GET_FPU _IOR(0, 0xc4, struct fx_layout)
+
+/* Setup HAX tunnel, see structure hax_tunnel comments in hax-interface.h */
+#define HAX_VCPU_IOCTL_SETUP_TUNNEL _IOWR(0, 0xc5, struct hax_tunnel_info)
+/* A interrupt need to be injected into guest */
+#define HAX_VCPU_IOCTL_INTERRUPT _IOWR(0, 0xc6, uint32_t)
+#define HAX_VCPU_SET_REGS _IOWR(0, 0xc7, struct vcpu_state_t)
+#define HAX_VCPU_GET_REGS _IOWR(0, 0xc8, struct vcpu_state_t)
+
+#endif /* __HAX_UNIX_H */
diff --git a/target-i386/hax-i386.h b/target-i386/hax-i386.h
new file mode 100644
index 0000000..e55e584
--- /dev/null
+++ b/target-i386/hax-i386.h
@@ -0,0 +1,90 @@
+/*
+** Copyright (c) 2011, Intel Corporation
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+*/
+
+#ifndef _HAX_I386_H
+#define _HAX_I386_H
+
+#include "android/utils/debug.h"
+#include "hax.h"
+
+#ifdef CONFIG_DARWIN
+typedef int hax_fd;
+#endif
+
+#ifdef CONFIG_WIN32
+typedef HANDLE hax_fd;
+#endif
+
+extern struct hax_state hax_global;
+struct hax_vcpu_state
+{
+ hax_fd fd;
+ int vcpu_id;
+ int emulation_state;
+ struct hax_tunnel *tunnel;
+ unsigned char *iobuf;
+};
+
+struct hax_state
+{
+ hax_fd fd; /* the global hax device interface */
+ uint32_t version;
+ struct hax_vm *vm;
+};
+
+#define HAX_MAX_VCPU 0x10
+#define MAX_VM_ID 0x40
+#define MAX_VCPU_ID 0x40
+
+struct hax_vm
+{
+ hax_fd fd;
+ int id;
+ struct hax_vcpu_state *vcpus[HAX_MAX_VCPU];
+};
+
+/* Functions exported to host specific mode */
+hax_fd hax_vcpu_get_fd(CPUState *env);
+int valid_hax_tunnel_size(uint16_t size);
+
+/* Host specific functions */
+int hax_mod_version(struct hax_state *hax, struct hax_module_version *version);
+int hax_inject_interrupt(CPUState *env, int vector);
+struct hax_vm *hax_vm_create(struct hax_state *hax);
+int hax_vcpu_run(struct hax_vcpu_state *vcpu);
+int hax_vcpu_create(int id);
+int hax_sync_vcpu_state(CPUState *env, struct vcpu_state_t *state, int set);
+int hax_sync_msr(CPUState *env, struct hax_msr_data *msrs, int set);
+int hax_sync_fpu(CPUState *env, struct fx_layout *fl, int set);
+int hax_vm_destroy(struct hax_vm *vm);
+
+/* Common host function */
+int hax_host_create_vm(struct hax_state *hax, int vm_id);
+hax_fd hax_host_open_vm(struct hax_state *hax, int vm_id);
+int hax_host_create_vcpu(hax_fd vm_fd, int vcpuid);
+hax_fd hax_host_open_vcpu(int vmid, int vcpuid);
+int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu);
+hax_fd hax_mod_open(void);
+
+
+#ifdef CONFIG_DARWIN
+#include "target-i386/hax-darwin.h"
+#endif
+
+#ifdef CONFIG_WIN32
+#include "target-i386/hax-windows.h"
+#endif
+
+#include "target-i386/hax-interface.h"
+
+#endif
diff --git a/target-i386/hax-interface.h b/target-i386/hax-interface.h
new file mode 100644
index 0000000..5a9ed31
--- /dev/null
+++ b/target-i386/hax-interface.h
@@ -0,0 +1,350 @@
+/*
+** Copyright (c) 2011, Intel Corporation
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+*/
+
+#ifndef _HAX_INTERFACE_H
+#define _HAX_INTERFACE_H
+
+/*
+ * Common data structure for HAX interface on both Mac and Windows
+ * The IOCTL is defined in hax-darwin.h and hax-windows.h
+ */
+
+/* fx_layout according to Intel SDM */
+struct fx_layout {
+ uint16_t fcw;
+ uint16_t fsw;
+ uint8 ftw;
+ uint8 res1;
+ uint16_t fop;
+ union {
+ struct {
+ uint32 fip;
+ uint16_t fcs;
+ uint16_t res2;
+ };
+ uint64 fpu_ip;
+ };
+ union {
+ struct {
+ uint32 fdp;
+ uint16_t fds;
+ uint16_t res3;
+ };
+ uint64 fpu_dp;
+ };
+ uint32 mxcsr;
+ uint32 mxcsr_mask;
+ uint8 st_mm[8][16];
+ uint8 mmx_1[8][16];
+ uint8 mmx_2[8][16];
+ uint8 pad[96];
+};
+
+struct vmx_msr {
+ uint64 entry;
+ uint64 value;
+};
+
+/*
+ * Use fixed-size array to make Mac OS X support efficient by avoiding
+ * use memory map or copy-in routines.
+ */
+#define HAX_MAX_MSR_ARRAY 0x20
+struct hax_msr_data
+{
+ uint16_t nr_msr;
+ uint16_t done;
+ uint16_t pad[2];
+ struct vmx_msr entries[HAX_MAX_MSR_ARRAY];
+};
+
+union interruptibility_state_t {
+ uint32 raw;
+ struct {
+ uint32 sti_blocking : 1;
+ uint32 movss_blocking : 1;
+ uint32 smi_blocking : 1;
+ uint32 nmi_blocking : 1;
+ uint32 reserved : 28;
+ };
+ uint64_t pad;
+};
+
+typedef union interruptibility_state_t interruptibility_state_t;
+
+// Segment descriptor
+struct segment_desc_t {
+ uint16_t selector;
+ uint16_t _dummy;
+ uint32 limit;
+ uint64 base;
+ union {
+ struct {
+ uint32 type : 4;
+ uint32 desc : 1;
+ uint32 dpl : 2;
+ uint32 present : 1;
+ uint32 : 4;
+ uint32 available : 1;
+ uint32 long_mode : 1;
+ uint32 operand_size : 1;
+ uint32 granularity : 1;
+ uint32 null : 1;
+ uint32 : 15;
+ };
+ uint32 ar;
+ };
+ uint32 ipad;
+};
+
+typedef struct segment_desc_t segment_desc_t;
+
+struct vcpu_state_t
+{
+ union {
+ uint64 _regs[16];
+ struct {
+ union {
+ struct {
+ uint8 _al,
+ _ah;
+ };
+ uint16_t _ax;
+ uint32 _eax;
+ uint64 _rax;
+ };
+ union {
+ struct {
+ uint8 _cl,
+ _ch;
+ };
+ uint16_t _cx;
+ uint32 _ecx;
+ uint64 _rcx;
+ };
+ union {
+ struct {
+ uint8 _dl,
+ _dh;
+ };
+ uint16_t _dx;
+ uint32 _edx;
+ uint64 _rdx;
+ };
+ union {
+ struct {
+ uint8 _bl,
+ _bh;
+ };
+ uint16_t _bx;
+ uint32 _ebx;
+ uint64 _rbx;
+ };
+ union {
+ uint16_t _sp;
+ uint32 _esp;
+ uint64 _rsp;
+ };
+ union {
+ uint16_t _bp;
+ uint32 _ebp;
+ uint64 _rbp;
+ };
+ union {
+ uint16_t _si;
+ uint32 _esi;
+ uint64 _rsi;
+ };
+ union {
+ uint16_t _di;
+ uint32 _edi;
+ uint64 _rdi;
+ };
+
+ uint64 _r8;
+ uint64 _r9;
+ uint64 _r10;
+ uint64 _r11;
+ uint64 _r12;
+ uint64 _r13;
+ uint64 _r14;
+ uint64 _r15;
+ };
+ };
+
+ union {
+ uint32 _eip;
+ uint64 _rip;
+ };
+
+ union {
+ uint32 _eflags;
+ uint64 _rflags;
+ };
+
+ segment_desc_t _cs;
+ segment_desc_t _ss;
+ segment_desc_t _ds;
+ segment_desc_t _es;
+ segment_desc_t _fs;
+ segment_desc_t _gs;
+ segment_desc_t _ldt;
+ segment_desc_t _tr;
+
+ segment_desc_t _gdt;
+ segment_desc_t _idt;
+
+ uint64 _cr0;
+ uint64 _cr2;
+ uint64 _cr3;
+ uint64 _cr4;
+
+ uint64 _dr0;
+ uint64 _dr1;
+ uint64 _dr2;
+ uint64 _dr3;
+ uint64 _dr6;
+ uint64 _dr7;
+ uint64 _pde;
+
+ uint32 _efer;
+
+ uint32 _sysenter_cs;
+ uint64 _sysenter_eip;
+ uint64 _sysenter_esp;
+
+ uint32 _activity_state;
+ uint32 pad;
+ interruptibility_state_t _interruptibility_state;
+};
+
+/*
+ * HAX tunnel is a per-vCPU shared memory between QEMU and HAX driver
+ * It is used to pass information between QEMU and HAX driver, like KVM_RUN
+ *
+ * In HAX_VCPU_IOCTL_SETUP_TUNNEL ioctl, HAX driver allocats the memory, maps
+ * it to QEMU virtual address space and returns the virtual address and size to
+ * QEMU through hax_tunnel_info structure
+ */
+struct hax_tunnel
+{
+ uint32_t _exit_reason;
+ uint32_t _exit_flag;
+ uint32_t _exit_status;
+ uint32_t user_event_pending;
+ int ready_for_interrupt_injection;
+ int request_interrupt_window;
+ union {
+ struct {
+ /* 0: read, 1: write */
+#define HAX_EXIT_IO_IN 1
+#define HAX_EXIT_IO_OUT 0
+ uint8_t _direction;
+ uint8_t _df;
+ uint16_t _size;
+ uint16_t _port;
+ uint16_t _count;
+ uint8_t _flags;
+ uint8_t _pad0;
+ uint16_t _pad1;
+ uint32_t _pad2;
+ uint64_t _vaddr;
+ } pio;
+ struct {
+ uint64_t gla;
+ } mmio;
+ struct {
+ } state;
+ };
+};
+
+struct hax_tunnel_info
+{
+ uint64_t va;
+ uint64_t io_va;
+ uint16_t size;
+ uint16_t pad[3];
+};
+
+/* The exit reason in HAX tunnel for HAX_VCPU_IOCTL_RUN IOCTL */
+enum exit_status {
+ /* IO port emulation request */
+ HAX_EXIT_IO = 1,
+ /* MMIO instruction emulation request
+ * QEMU emulates MMIO instruction in following step:
+ * 1. When guest accesses MMIO address, it is trapped to HAX driver
+ * 2. HAX driver return back to QEMU with the instruction pointer address
+ * 3. QEMU sync the vcpu state with HAX driver
+ * 4. QEMU emulates this instruction
+ * 5. QEMU sync the vcpu state to HAX driver
+ * 6. HAX driver continuous run the guest through HAX_VCPU_IOCTL_RUN
+ */
+ HAX_EXIT_MMIO,
+ /*
+ * QEMU emulation mode request
+ * QEMU emulates guest instruction when guest is running in
+ * real mode or protected mode
+ */
+ HAX_EXIT_REAL,
+ /*
+ * Interrupt window open, qemu can inject an interrupt now.
+ * Also used to indicate a signal is pending to QEMU
+ */
+ HAX_EXIT_INTERRUPT,
+ /* Unknown vmexit, mostly trigger reboot */
+ HAX_EXIT_UNKNOWN_VMEXIT,
+ /*
+ * Halt in guest
+ * When guest executes HLT instruction with interrupt enabled, HAX
+ * return back to QEMU.
+ */
+ HAX_EXIT_HLT,
+ /* Reboot request, like because of tripple fault in guest */
+ HAX_EXIT_STATECHANGE,
+ /*
+ * The VCPU is paused
+ * Now the vcpu is only paused when to be destroid, so simply return to hax
+ */
+ HAX_EXIT_PAUSED,
+};
+
+/*
+ * The API version between QEMU and HAX driver
+ * Compat_version defines the oldest API version the HAX driver can support
+ */
+struct hax_module_version
+{
+ uint32_t compat_version;
+ uint32_t cur_version;
+};
+
+/* See comments for HAX_VM_IOCTL_ALLOC_RAM ioctl */
+struct hax_alloc_ram_info
+{
+ uint32_t size;
+ uint32_t pad;
+ uint64_t va;
+};
+
+/* See comments for HAX_VM_IOCTL_SET_RAM ioctl */
+#define HAX_RAM_INFO_ROM 0x1
+struct hax_set_ram_info
+{
+ uint64_t pa_start;
+ uint32_t size;
+ uint8_t flags;
+ uint8_t pad[3];
+ uint64_t va;
+};
+
+#endif
diff --git a/target-i386/hax-windows.c b/target-i386/hax-windows.c
new file mode 100644
index 0000000..683a227
--- /dev/null
+++ b/target-i386/hax-windows.c
@@ -0,0 +1,466 @@
+/*
+** Copyright (c) 2011, Intel Corporation
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+*/
+
+#include "target-i386/hax-i386.h"
+
+/*
+ * return 0 upon success, -1 when the driver is not loaded,
+ * other negative value for other failures
+ */
+static int hax_open_device(hax_fd *fd)
+{
+ uint32_t errNum = 0;
+ HANDLE hDevice;
+
+ if (!fd)
+ return -2;
+
+ hDevice = CreateFile( "\\\\.\\HAX",
+ GENERIC_READ | GENERIC_WRITE,
+ 0,
+ NULL,
+ CREATE_ALWAYS,
+ FILE_ATTRIBUTE_NORMAL,
+ NULL);
+
+ if (hDevice == INVALID_HANDLE_VALUE)
+ {
+ dprint("Failed to open the HAX device!\n");
+ errNum = GetLastError();
+ if (errNum == ERROR_FILE_NOT_FOUND)
+ return -1;
+ return -2;
+ }
+ *fd = hDevice;
+ dprint("device fd:%d\n", *fd);
+ return 0;
+}
+
+
+hax_fd hax_mod_open(void)
+{
+ int ret;
+ hax_fd fd;
+
+ ret = hax_open_device(&fd);
+ if (ret != 0)
+ dprint("Open HAX device failed\n");
+
+ return fd;
+}
+
+int hax_populate_ram(uint64_t va, uint32_t size)
+{
+ int ret;
+ struct hax_alloc_ram_info info;
+ HANDLE hDeviceVM;
+ DWORD dSize = 0;
+
+ if (!hax_global.vm || !hax_global.vm->fd)
+ {
+ dprint("Allocate memory before vm create?\n");
+ return -EINVAL;
+ }
+
+ info.size = size;
+ info.va = va;
+
+ hDeviceVM = hax_global.vm->fd;
+
+ ret = DeviceIoControl(hDeviceVM,
+ HAX_VM_IOCTL_ALLOC_RAM,
+ &info, sizeof(info),
+ NULL, 0,
+ &dSize,
+ (LPOVERLAPPED) NULL);
+
+ if (!ret) {
+ dprint("Failed to allocate %x memory\n", size);
+ return ret;
+ }
+
+ return 0;
+}
+
+
+int hax_set_phys_mem(target_phys_addr_t start_addr, ram_addr_t size, ram_addr_t phys_offset)
+{
+ struct hax_set_ram_info info, *pinfo = &info;
+ ram_addr_t flags = phys_offset & ~TARGET_PAGE_MASK;
+ HANDLE hDeviceVM;
+ DWORD dSize = 0;
+ int ret = 0;
+
+ /* We look for the RAM and ROM only */
+ if (flags >= IO_MEM_UNASSIGNED)
+ return 0;
+
+ if ( (start_addr & ~TARGET_PAGE_MASK) || (size & ~TARGET_PAGE_MASK))
+ {
+ dprint(
+ "set_phys_mem %x %lx requires page aligned addr and size\n",
+ start_addr, size);
+ return -1;
+ }
+
+ info.pa_start = start_addr;
+ info.size = size;
+ info.va = (uint64_t)qemu_get_ram_ptr(phys_offset);
+ info.flags = (flags & IO_MEM_ROM) ? 1 : 0;
+
+ hDeviceVM = hax_global.vm->fd;
+
+ ret = DeviceIoControl(hDeviceVM,
+ HAX_VM_IOCTL_SET_RAM,
+ pinfo, sizeof(*pinfo),
+ NULL, 0,
+ &dSize,
+ (LPOVERLAPPED) NULL);
+
+ if (!ret)
+ return -EFAULT;
+ else
+ return 0;
+}
+
+int hax_mod_version(struct hax_state *hax, struct hax_module_version *version)
+{
+ int ret;
+ HANDLE hDevice = hax->fd; //handle to hax module
+ DWORD dSize = 0;
+ DWORD err = 0;
+
+ if (hax_invalid_fd(hDevice)) {
+ dprint("Invalid fd for hax device!\n");
+ return -ENODEV;
+ }
+
+ ret = DeviceIoControl(hDevice,
+ HAX_IOCTL_VERSION,
+ NULL, 0,
+ version, sizeof(*version),
+ &dSize,
+ (LPOVERLAPPED) NULL);
+
+ if (!ret) {
+ err = GetLastError();
+ if (err == ERROR_INSUFFICIENT_BUFFER ||
+ err == ERROR_MORE_DATA)
+ dprint("HAX module is too large.\n");
+ dprint("Failed to get Hax module version:%d\n", err);
+ return -EFAULT;
+ } else
+ return 0;
+}
+
+static char *hax_vm_devfs_string(int vm_id)
+{
+ char *name;
+
+ if (vm_id > MAX_VM_ID)
+ {
+ dprint("Too big VM id\n");
+ return NULL;
+ }
+
+ name = qemu_strdup("\\\\.\\hax_vmxx");
+ if (!name)
+ return NULL;
+ sprintf(name, "\\\\.\\hax_vm%02d", vm_id);
+
+ return name;
+}
+
+static char *hax_vcpu_devfs_string(int vm_id, int vcpu_id)
+{
+ char *name;
+
+ if (vm_id > MAX_VM_ID || vcpu_id > MAX_VCPU_ID)
+ {
+ dprint("Too big vm id %x or vcpu id %x\n", vm_id, vcpu_id);
+ return NULL;
+ }
+ name = qemu_strdup("\\\\.\\hax_vmxx_vcpuxx");
+ if (!name)
+ return NULL;
+ sprintf(name, "\\\\.\\hax_vm%02d_vcpu%02d", vm_id, vcpu_id);
+
+ return name;
+}
+
+int hax_host_create_vm(struct hax_state *hax, int vm_id)
+{
+ int ret;
+ DWORD dSize = 0;
+
+ if (hax_invalid_fd(hax->fd))
+ return -EINVAL;
+
+ if (hax->vm)
+ return 0;
+
+ ret = DeviceIoControl(hax->fd,
+ HAX_IOCTL_CREATE_VM,
+ NULL, 0,
+ &vm_id, sizeof(vm_id),
+ &dSize,
+ (LPOVERLAPPED) NULL);
+ if (!ret) {
+ dprint("error code:%d", GetLastError());
+ return -1;
+ }
+
+ return 0;
+}
+
+hax_fd hax_host_open_vm(struct hax_state *hax, int vm_id)
+{
+ char *vm_name = NULL;
+ hax_fd hDeviceVM;
+
+ vm_name = hax_vm_devfs_string(vm_id);
+ if (!vm_name) {
+ dprint("Incorrect name\n");
+ return INVALID_HANDLE_VALUE;
+ }
+
+ hDeviceVM = CreateFile(vm_name,
+ GENERIC_READ | GENERIC_WRITE,
+ 0,
+ NULL,
+ CREATE_ALWAYS,
+ FILE_ATTRIBUTE_NORMAL,
+ NULL);
+ if (hDeviceVM == INVALID_HANDLE_VALUE)
+ dprint("Open the vm devcie error:%s, ec:%d\n", vm_name, GetLastError());
+
+ qemu_free(vm_name);
+ return hDeviceVM;
+}
+
+int hax_host_create_vcpu(hax_fd vm_fd, int vcpuid)
+{
+ int ret;
+ DWORD dSize = 0;
+
+ ret = DeviceIoControl(vm_fd,
+ HAX_VM_IOCTL_VCPU_CREATE,
+ &vcpuid, sizeof(vcpuid),
+ NULL, 0,
+ &dSize,
+ (LPOVERLAPPED) NULL);
+ if (!ret)
+ {
+ dprint("Failed to create vcpu %x\n", vcpuid);
+ return -1;
+ }
+
+ return 0;
+}
+
+hax_fd hax_host_open_vcpu(int vmid, int vcpuid)
+{
+ char *devfs_path = NULL;
+ hax_fd hDeviceVCPU;
+
+ devfs_path = hax_vcpu_devfs_string(vmid, vcpuid);
+ if (!devfs_path)
+ {
+ dprint("Failed to get the devfs\n");
+ return INVALID_HANDLE_VALUE;
+ }
+
+ hDeviceVCPU = CreateFile( devfs_path,
+ GENERIC_READ | GENERIC_WRITE,
+ 0,
+ NULL,
+ CREATE_ALWAYS,
+ FILE_ATTRIBUTE_NORMAL,
+ NULL);
+
+ if (hDeviceVCPU == INVALID_HANDLE_VALUE)
+ dprint("Failed to open the vcpu devfs\n");
+ qemu_free(devfs_path);
+ return hDeviceVCPU;
+}
+
+int hax_host_setup_vcpu_channel(struct hax_vcpu_state *vcpu)
+{
+ hax_fd hDeviceVCPU = vcpu->fd;
+ int ret;
+ struct hax_tunnel_info info;
+ DWORD dSize = 0;
+
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_IOCTL_SETUP_TUNNEL,
+ NULL, 0,
+ &info, sizeof(info),
+ &dSize,
+ (LPOVERLAPPED) NULL);
+ if (!ret)
+ {
+ dprint("Failed to setup the hax tunnel\n");
+ return -1;
+ }
+
+ if (!valid_hax_tunnel_size(info.size))
+ {
+ dprint("Invalid hax tunnel size %x\n", info.size);
+ ret = -EINVAL;
+ return ret;
+ }
+ vcpu->tunnel = (struct hax_tunnel *)(info.va);
+ vcpu->iobuf = (unsigned char *)(info.io_va);
+ return 0;
+}
+
+int hax_vcpu_run(struct hax_vcpu_state* vcpu)
+{
+ int ret;
+ HANDLE hDeviceVCPU = vcpu->fd;
+ DWORD dSize = 0;
+
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_IOCTL_RUN,
+ NULL, 0,
+ NULL, 0,
+ &dSize,
+ (LPOVERLAPPED) NULL);
+ if (!ret)
+ return -EFAULT;
+ else
+ return 0;
+}
+
+int hax_sync_fpu(CPUState *env, struct fx_layout *fl, int set)
+{
+ int ret;
+ hax_fd fd;
+ HANDLE hDeviceVCPU;
+ DWORD dSize = 0;
+
+ fd = hax_vcpu_get_fd(env);
+ if (hax_invalid_fd(fd))
+ return -1;
+
+ hDeviceVCPU = fd;
+
+ if (set)
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_IOCTL_SET_FPU,
+ fl, sizeof(*fl),
+ NULL, 0,
+ &dSize,
+ (LPOVERLAPPED) NULL);
+ else
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_IOCTL_GET_FPU,
+ NULL, 0,
+ fl, sizeof(*fl),
+ &dSize,
+ (LPOVERLAPPED) NULL);
+ if (!ret)
+ return -EFAULT;
+ else
+ return 0;
+}
+
+int hax_sync_msr(CPUState *env, struct hax_msr_data *msrs, int set)
+{
+ int ret;
+ hax_fd fd;
+ HANDLE hDeviceVCPU;
+ DWORD dSize = 0;
+
+ fd = hax_vcpu_get_fd(env);
+ if (hax_invalid_fd(fd))
+ return -1;
+ hDeviceVCPU = fd;
+
+ if (set)
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_IOCTL_SET_MSRS,
+ msrs, sizeof(*msrs),
+ msrs, sizeof(*msrs),
+ &dSize,
+ (LPOVERLAPPED) NULL);
+ else
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_IOCTL_GET_MSRS,
+ msrs, sizeof(*msrs),
+ msrs, sizeof(*msrs),
+ &dSize,
+ (LPOVERLAPPED) NULL);
+ if (!ret)
+ return -EFAULT;
+ else
+ return 0;
+}
+
+int hax_sync_vcpu_state(CPUState *env, struct vcpu_state_t *state, int set)
+{
+ int ret;
+ hax_fd fd;
+ HANDLE hDeviceVCPU;
+ DWORD dSize;
+
+ fd = hax_vcpu_get_fd(env);
+ if (hax_invalid_fd(fd))
+ return -1;
+
+ hDeviceVCPU = fd;
+
+ if (set)
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_SET_REGS,
+ state, sizeof(*state),
+ NULL, 0,
+ &dSize,
+ (LPOVERLAPPED) NULL);
+ else
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_GET_REGS,
+ NULL, 0,
+ state, sizeof(*state),
+ &dSize,
+ (LPOVERLAPPED) NULL);
+ if (!ret)
+ return -EFAULT;
+ else
+ return 0;
+}
+
+int hax_inject_interrupt(CPUState *env, int vector)
+{
+ int ret;
+ hax_fd fd;
+ HANDLE hDeviceVCPU;
+ DWORD dSize;
+
+ fd = hax_vcpu_get_fd(env);
+ if (hax_invalid_fd(fd))
+ return -1;
+
+ hDeviceVCPU = fd;
+
+ ret = DeviceIoControl(hDeviceVCPU,
+ HAX_VCPU_IOCTL_INTERRUPT,
+ &vector, sizeof(vector),
+ NULL, 0,
+ &dSize,
+ (LPOVERLAPPED) NULL);
+ if (!ret)
+ return -EFAULT;
+ else
+ return 0;
+}
diff --git a/target-i386/hax-windows.h b/target-i386/hax-windows.h
new file mode 100644
index 0000000..b6d60b7
--- /dev/null
+++ b/target-i386/hax-windows.h
@@ -0,0 +1,65 @@
+/*
+** Copyright (c) 2011, Intel Corporation
+**
+** This software is licensed under the terms of the GNU General Public
+** License version 2, as published by the Free Software Foundation, and
+** may be copied, distributed, and modified under those terms.
+**
+** This program is distributed in the hope that it will be useful,
+** but WITHOUT ANY WARRANTY; without even the implied warranty of
+** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+** GNU General Public License for more details.
+*/
+
+#ifndef __HAX_WINDOWS_H
+#define __HAX_WINDOWS_H
+
+#include <windows.h>
+#include <memory.h>
+#include <malloc.h>
+#include <winioctl.h>
+#include <string.h>
+#include <stdio.h>
+#include <windef.h>
+
+#define HAX_INVALID_FD INVALID_HANDLE_VALUE
+
+static inline void hax_mod_close(struct hax_state *hax)
+{
+ CloseHandle(hax->fd);
+}
+
+static inline void hax_close_fd(hax_fd fd)
+{
+ CloseHandle(fd);
+}
+
+static inline int hax_invalid_fd(hax_fd fd)
+{
+ return (fd == INVALID_HANDLE_VALUE);
+}
+
+
+#define HAX_DEVICE_TYPE 0x4000
+
+/* See comments for the ioctl in hax-darwin.h */
+#define HAX_IOCTL_VERSION CTL_CODE(HAX_DEVICE_TYPE, 0x900, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_IOCTL_CREATE_VM CTL_CODE(HAX_DEVICE_TYPE, 0x901, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define HAX_VM_IOCTL_VCPU_CREATE CTL_CODE(HAX_DEVICE_TYPE, 0x902, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VM_IOCTL_ALLOC_RAM CTL_CODE(HAX_DEVICE_TYPE, 0x903, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VM_IOCTL_SET_RAM CTL_CODE(HAX_DEVICE_TYPE, 0x904, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define HAX_VCPU_IOCTL_RUN CTL_CODE(HAX_DEVICE_TYPE, 0x906, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VCPU_IOCTL_SET_MSRS CTL_CODE(HAX_DEVICE_TYPE, 0x907, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VCPU_IOCTL_GET_MSRS CTL_CODE(HAX_DEVICE_TYPE, 0x908, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define HAX_VCPU_IOCTL_SET_FPU CTL_CODE(HAX_DEVICE_TYPE, 0x909, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VCPU_IOCTL_GET_FPU CTL_CODE(HAX_DEVICE_TYPE, 0x90a, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#define HAX_VCPU_IOCTL_SETUP_TUNNEL CTL_CODE(HAX_DEVICE_TYPE, 0x90b, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VCPU_IOCTL_INTERRUPT CTL_CODE(HAX_DEVICE_TYPE, 0x90c, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VCPU_SET_REGS CTL_CODE(HAX_DEVICE_TYPE, 0x90d, METHOD_BUFFERED, FILE_ANY_ACCESS)
+#define HAX_VCPU_GET_REGS CTL_CODE(HAX_DEVICE_TYPE, 0x90e, METHOD_BUFFERED, FILE_ANY_ACCESS)
+
+#endif