aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid 'Digit' Turner <digit@android.com>2011-05-11 17:37:44 +0200
committerDavid 'Digit' Turner <digit@android.com>2011-06-01 17:08:19 +0200
commit280afa072a7b829e581d884c2b3276530a6014b7 (patch)
tree6f1f213d66db8a5f89994bdfda7c9cd24140ffeb
parentae3098a3bce898cf958a6c3334f3d62282b12d2a (diff)
downloadexternal_qemu-280afa072a7b829e581d884c2b3276530a6014b7.zip
external_qemu-280afa072a7b829e581d884c2b3276530a6014b7.tar.gz
external_qemu-280afa072a7b829e581d884c2b3276530a6014b7.tar.bz2
ramblocks: integrate upstream implementation (sophisticated)
Change-Id: I49e96e2d5ae571849b0b6fef0a30b41ecdee8d23
-rwxr-xr-xandroid-configure.sh6
-rw-r--r--android/config/darwin-x86/config-host.h1
-rw-r--r--android/config/freebsd-x86/config-host.h1
-rw-r--r--android/config/linux-ppc/config-host.h1
-rw-r--r--android/config/linux-x86/config-host.h1
-rw-r--r--arch_init.c4
-rw-r--r--cpu-all.h68
-rw-r--r--cpu-common.h26
-rw-r--r--exec-all.h64
-rw-r--r--exec.c392
-rw-r--r--hw/android_arm.c2
-rw-r--r--hw/armv7m.c14
-rw-r--r--hw/hw.h4
-rw-r--r--hw/pc.c26
-rw-r--r--migration.h7
-rw-r--r--osdep.c22
-rw-r--r--vl-android.c315
-rw-r--r--vl.c318
18 files changed, 449 insertions, 823 deletions
diff --git a/android-configure.sh b/android-configure.sh
index 5a1748c..4939323 100755
--- a/android-configure.sh
+++ b/android-configure.sh
@@ -482,6 +482,12 @@ case "$TARGET_OS" in
;;
esac
+case "$TARGET_OS" in
+ linux-*|darwin-*)
+ echo "#define CONFIG_MADVISE 1" >> $config_h
+ ;;
+esac
+
# the -nand-limits options can only work on non-windows systems
if [ "$TARGET_OS" != "windows" ] ; then
echo "#define CONFIG_NAND_LIMITS 1" >> $config_h
diff --git a/android/config/darwin-x86/config-host.h b/android/config/darwin-x86/config-host.h
index 437971b..0395918 100644
--- a/android/config/darwin-x86/config-host.h
+++ b/android/config/darwin-x86/config-host.h
@@ -17,3 +17,4 @@
#define MAP_ANONYMOUS MAP_ANON
#define CONFIG_ANDROID 1
#define CONFIG_POSIX 1
+#define CONFIG_MADVISE 1
diff --git a/android/config/freebsd-x86/config-host.h b/android/config/freebsd-x86/config-host.h
index 582583c..2e44a83 100644
--- a/android/config/freebsd-x86/config-host.h
+++ b/android/config/freebsd-x86/config-host.h
@@ -15,3 +15,4 @@
#define CONFIG_SKINS 1
#define CONFIG_ANDROID 1
#define CONFIG_POSIX 1
+#define CONFIG_MADVISE 1
diff --git a/android/config/linux-ppc/config-host.h b/android/config/linux-ppc/config-host.h
index 03d2b22..9faf170 100644
--- a/android/config/linux-ppc/config-host.h
+++ b/android/config/linux-ppc/config-host.h
@@ -17,3 +17,4 @@
#define CONFIG_LINUX 1
#define CONFIG_POSIX 1
#define CONFIG_ANDROID 1
+#define CONFIG_MADVISE 1
diff --git a/android/config/linux-x86/config-host.h b/android/config/linux-x86/config-host.h
index 15d6ab8..70a0eed 100644
--- a/android/config/linux-x86/config-host.h
+++ b/android/config/linux-x86/config-host.h
@@ -17,3 +17,4 @@
#define CONFIG_LINUX 1
#define CONFIG_POSIX 1
#define CONFIG_ANDROID 1
+#define CONFIG_MADVISE 1
diff --git a/arch_init.c b/arch_init.c
index 81e6116..b966d8a 100644
--- a/arch_init.c
+++ b/arch_init.c
@@ -82,7 +82,7 @@ const char arch_config_name[] = CONFIG_QEMU_SHAREDIR "/target-" TARGET_ARCH ".co
const uint32_t arch_type = QEMU_ARCH;
-#if 0
+#if 1
/***********************************************************/
/* ram save/restore */
@@ -249,7 +249,7 @@ static void sort_ram_list(void)
qemu_free(blocks);
}
-int ram_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
+int ram_save_live(QEMUFile *f, int stage, void *opaque)
{
ram_addr_t addr;
uint64_t bytes_transferred_last;
diff --git a/cpu-all.h b/cpu-all.h
index 01e6838..bf4197b 100644
--- a/cpu-all.h
+++ b/cpu-all.h
@@ -765,15 +765,15 @@ int page_check_range(target_ulong start, target_ulong len, int flags);
CPUState *cpu_copy(CPUState *env);
CPUState *qemu_get_cpu(int cpu);
-void cpu_dump_state(CPUState *env, FILE *f,
- int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
+#define CPU_DUMP_CODE 0x00010000
+
+void cpu_dump_state(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
int flags);
-void cpu_dump_statistics (CPUState *env, FILE *f,
- int (*cpu_fprintf)(FILE *f, const char *fmt, ...),
+void cpu_dump_statistics(CPUState *env, FILE *f, fprintf_function cpu_fprintf,
int flags);
void QEMU_NORETURN cpu_abort(CPUState *env, const char *fmt, ...)
- __attribute__ ((__format__ (__printf__, 2, 3)));
+ GCC_FMT_ATTR(2, 3);
extern CPUState *first_cpu;
extern CPUState *cpu_single_env;
@@ -862,9 +862,31 @@ target_phys_addr_t cpu_get_phys_page_debug(CPUState *env, target_ulong addr);
/* memory API */
extern int phys_ram_fd;
-extern uint8_t *phys_ram_dirty;
extern ram_addr_t ram_size;
-extern ram_addr_t last_ram_offset;
+
+/* RAM is pre-allocated and passed into qemu_ram_alloc_from_ptr */
+#define RAM_PREALLOC_MASK (1 << 0)
+
+typedef struct RAMBlock {
+ uint8_t *host;
+ ram_addr_t offset;
+ ram_addr_t length;
+ uint32_t flags;
+ char idstr[256];
+ QLIST_ENTRY(RAMBlock) next;
+#if defined(__linux__) && !defined(TARGET_S390X)
+ int fd;
+#endif
+} RAMBlock;
+
+typedef struct RAMList {
+ uint8_t *phys_dirty;
+ QLIST_HEAD(ram, RAMBlock) blocks;
+} RAMList;
+extern RAMList ram_list;
+
+extern const char *mem_path;
+extern int mem_prealloc;
/* physical memory access */
@@ -891,18 +913,44 @@ extern ram_addr_t last_ram_offset;
/* read dirty bit (return 0 or 1) */
static inline int cpu_physical_memory_is_dirty(ram_addr_t addr)
{
- return phys_ram_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
+ return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] == 0xff;
+}
+
+static inline int cpu_physical_memory_get_dirty_flags(ram_addr_t addr)
+{
+ return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS];
}
static inline int cpu_physical_memory_get_dirty(ram_addr_t addr,
int dirty_flags)
{
- return phys_ram_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
+ return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] & dirty_flags;
}
static inline void cpu_physical_memory_set_dirty(ram_addr_t addr)
{
- phys_ram_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
+ ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] = 0xff;
+}
+
+static inline int cpu_physical_memory_set_dirty_flags(ram_addr_t addr,
+ int dirty_flags)
+{
+ return ram_list.phys_dirty[addr >> TARGET_PAGE_BITS] |= dirty_flags;
+}
+
+static inline void cpu_physical_memory_mask_dirty_range(ram_addr_t start,
+ int length,
+ int dirty_flags)
+{
+ int i, mask, len;
+ uint8_t *p;
+
+ len = length >> TARGET_PAGE_BITS;
+ mask = ~dirty_flags;
+ p = ram_list.phys_dirty + (start >> TARGET_PAGE_BITS);
+ for (i = 0; i < len; i++) {
+ p[i] &= mask;
+ }
}
void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
diff --git a/cpu-common.h b/cpu-common.h
index a422689..9e32177 100644
--- a/cpu-common.h
+++ b/cpu-common.h
@@ -28,10 +28,21 @@ typedef unsigned long ram_addr_t;
typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
-void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
+void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
ram_addr_t size,
ram_addr_t phys_offset,
- ram_addr_t region_offset);
+ ram_addr_t region_offset,
+ bool log_dirty);
+
+static inline void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
+ ram_addr_t size,
+ ram_addr_t phys_offset,
+ ram_addr_t region_offset)
+{
+ cpu_register_physical_memory_log(start_addr, size, phys_offset,
+ region_offset, false);
+}
+
static inline void cpu_register_physical_memory(target_phys_addr_t start_addr,
ram_addr_t size,
ram_addr_t phys_offset)
@@ -40,12 +51,19 @@ static inline void cpu_register_physical_memory(target_phys_addr_t start_addr,
}
ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr);
-ram_addr_t qemu_ram_alloc(ram_addr_t);
+ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
+ ram_addr_t size, void *host);
+ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size);
void qemu_ram_free(ram_addr_t addr);
+void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
/* This should only be used for ram local to a device. */
void *qemu_get_ram_ptr(ram_addr_t addr);
+/* Same but slower, to use for migration, where the order of
+ * RAMBlocks must not change. */
+void *qemu_safe_ram_ptr(ram_addr_t addr);
/* This should not be used by devices. */
-ram_addr_t qemu_ram_addr_from_host(void *ptr);
+int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
+ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
CPUWriteMemoryFunc * const *mem_write,
diff --git a/exec-all.h b/exec-all.h
index b680b4e..91d1bda 100644
--- a/exec-all.h
+++ b/exec-all.h
@@ -25,6 +25,15 @@
/* allow to see translation results - the slowdown should be negligible, so we leave it */
#define DEBUG_DISAS
+/* Page tracking code uses ram addresses in system mode, and virtual
+ addresses in userspace mode. Define tb_page_addr_t to be an appropriate
+ type. */
+#if defined(CONFIG_USER_ONLY)
+typedef abi_ulong tb_page_addr_t;
+#else
+typedef ram_addr_t tb_page_addr_t;
+#endif
+
/* is_jmp field values */
#define DISAS_NEXT 0 /* next instruction can be analyzed */
#define DISAS_JUMP 1 /* only pc was modified dynamically */
@@ -182,7 +191,7 @@ static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
static inline unsigned int tb_phys_hash_func(unsigned long pc)
{
- return pc & (CODE_GEN_PHYS_HASH_SIZE - 1);
+ return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
}
#ifdef CONFIG_MEMCHECK
@@ -398,60 +407,9 @@ static inline target_ulong get_phys_addr_code(CPUState *env1, target_ulong addr)
}
p = (void *)(unsigned long)addr
+ env1->tlb_table[mmu_idx][page_index].addend;
- return qemu_ram_addr_from_host(p);
-}
-
-#if 0
-/* Deterministic execution requires that IO only be performed on the last
- instruction of a TB so that interrupts take effect immediately. */
-static inline int can_do_io(CPUState *env)
-{
- if (!use_icount)
- return 1;
-
- /* If not executing code then assume we are ok. */
- if (!env->current_tb)
- return 1;
-
- return env->can_do_io != 0;
+ return qemu_ram_addr_from_host_nofail(p);
}
#endif
-#endif /* 0 */
-
-#ifdef CONFIG_KQEMU
-#define KQEMU_MODIFY_PAGE_MASK (0xff & ~(VGA_DIRTY_FLAG | CODE_DIRTY_FLAG))
-
-#define MSR_QPI_COMMBASE 0xfabe0010
-
-int kqemu_init(CPUState *env);
-int kqemu_cpu_exec(CPUState *env);
-void kqemu_flush_page(CPUState *env, target_ulong addr);
-void kqemu_flush(CPUState *env, int global);
-void kqemu_set_notdirty(CPUState *env, ram_addr_t ram_addr);
-void kqemu_modify_page(CPUState *env, ram_addr_t ram_addr);
-void kqemu_set_phys_mem(uint64_t start_addr, ram_addr_t size,
- ram_addr_t phys_offset);
-void kqemu_cpu_interrupt(CPUState *env);
-void kqemu_record_dump(void);
-
-extern uint32_t kqemu_comm_base;
-
-extern ram_addr_t kqemu_phys_ram_size;
-extern uint8_t *kqemu_phys_ram_base;
-
-static inline int kqemu_is_ok(CPUState *env)
-{
- return(env->kqemu_enabled &&
- (env->cr[0] & CR0_PE_MASK) &&
- !(env->hflags & HF_INHIBIT_IRQ_MASK) &&
- (env->eflags & IF_MASK) &&
- !(env->eflags & VM_MASK) &&
- (env->kqemu_enabled == 2 ||
- ((env->hflags & HF_CPL_MASK) == 3 &&
- (env->eflags & IOPL_MASK) != IOPL_MASK)));
-}
-
-#endif
typedef void (CPUDebugExcpHandler)(CPUState *env);
diff --git a/exec.c b/exec.c
index 113917e..285c950 100644
--- a/exec.c
+++ b/exec.c
@@ -115,21 +115,9 @@ uint8_t *code_gen_ptr;
#if !defined(CONFIG_USER_ONLY)
int phys_ram_fd;
-uint8_t *phys_ram_dirty;
static int in_migration;
-typedef struct RAMBlock {
- uint8_t *host;
- ram_addr_t offset;
- ram_addr_t length;
- struct RAMBlock *next;
-} RAMBlock;
-
-static RAMBlock *ram_blocks;
-/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
- then we can no longer assume contiguous ram offsets, and external uses
- of this variable will break. */
-ram_addr_t last_ram_offset;
+RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
#endif
CPUState *first_cpu;
@@ -479,7 +467,8 @@ static void code_gen_alloc(unsigned long tb_size)
exit(1);
}
}
-#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
+#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
+ || defined(__DragonFly__) || defined(__OpenBSD__)
{
int flags;
void *addr = NULL;
@@ -492,6 +481,13 @@ static void code_gen_alloc(unsigned long tb_size)
/* Cannot map more than that */
if (code_gen_buffer_size > (800 * 1024 * 1024))
code_gen_buffer_size = (800 * 1024 * 1024);
+#elif defined(__sparc_v9__)
+ // Map the buffer below 2G, so we can use direct calls and branches
+ flags |= MAP_FIXED;
+ addr = (void *) 0x60000000UL;
+ if (code_gen_buffer_size > (512 * 1024 * 1024)) {
+ code_gen_buffer_size = (512 * 1024 * 1024);
+ }
#endif
code_gen_buffer = mmap(addr, code_gen_buffer_size,
PROT_WRITE | PROT_READ | PROT_EXEC,
@@ -1861,7 +1857,7 @@ static void tlb_protect_code(ram_addr_t ram_addr)
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
target_ulong vaddr)
{
- phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
+ cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
}
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
@@ -1882,8 +1878,7 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
{
CPUState *env;
unsigned long length, start1;
- int i, mask, len;
- uint8_t *p;
+ int i;
start &= TARGET_PAGE_MASK;
end = TARGET_PAGE_ALIGN(end);
@@ -1891,18 +1886,14 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
length = end - start;
if (length == 0)
return;
- len = length >> TARGET_PAGE_BITS;
- mask = ~dirty_flags;
- p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
- for(i = 0; i < len; i++)
- p[i] &= mask;
+ cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
/* we modify the TLB cache so that the dirty bit will be set again
when accessing the range */
- start1 = (unsigned long)qemu_get_ram_ptr(start);
+ start1 = (unsigned long)qemu_safe_ram_ptr(start);
/* Chek that we don't span multiple blocks - this breaks the
address comparisons below. */
- if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
+ if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
!= (end - 1) - start) {
abort();
}
@@ -1949,7 +1940,7 @@ static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
+ tlb_entry->addend);
- ram_addr = qemu_ram_addr_from_host(p);
+ ram_addr = qemu_ram_addr_from_host_nofail(p);
if (!cpu_physical_memory_is_dirty(ram_addr)) {
tlb_entry->addr_write |= TLB_NOTDIRTY;
}
@@ -2369,16 +2360,17 @@ static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
start_addr and region_offset are rounded down to a page boundary
before calculating this offset. This should not be a problem unless
the low bits of start_addr and region_offset differ. */
-void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
+void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
ram_addr_t size,
ram_addr_t phys_offset,
- ram_addr_t region_offset)
+ ram_addr_t region_offset,
+ bool log_dirty)
{
target_phys_addr_t addr, end_addr;
PhysPageDesc *p;
CPUState *env;
ram_addr_t orig_size = size;
- void *subpage;
+ subpage_t *subpage;
if (kvm_enabled())
kvm_set_phys_mem(start_addr, size, phys_offset);
@@ -2389,7 +2381,9 @@ void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
region_offset &= TARGET_PAGE_MASK;
size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
end_addr = start_addr + (target_phys_addr_t)size;
- for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
+
+ addr = start_addr;
+ do {
p = phys_page_find(addr >> TARGET_PAGE_BITS);
if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
ram_addr_t orig_memory = p->phys_offset;
@@ -2398,7 +2392,7 @@ void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
need_subpage);
- if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
+ if (need_subpage) {
if (!(orig_memory & IO_MEM_SUBPAGE)) {
subpage = subpage_init((addr & TARGET_PAGE_MASK),
&p->phys_offset, orig_memory,
@@ -2430,7 +2424,7 @@ void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
end_addr2, need_subpage);
- if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
+ if (need_subpage) {
subpage = subpage_init((addr & TARGET_PAGE_MASK),
&p->phys_offset, IO_MEM_UNASSIGNED,
addr & TARGET_PAGE_MASK);
@@ -2441,7 +2435,8 @@ void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
}
}
region_offset += TARGET_PAGE_SIZE;
- }
+ addr += TARGET_PAGE_SIZE;
+ } while (addr != end_addr);
/* since each CPU stores ram addresses in its TLB cache, we must
reset the modified entries */
@@ -2474,46 +2469,216 @@ void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
kvm_uncoalesce_mmio_region(addr, size);
}
-ram_addr_t qemu_ram_alloc(ram_addr_t size)
+static ram_addr_t find_ram_offset(ram_addr_t size)
{
- RAMBlock *new_block;
+ RAMBlock *block, *next_block;
+ ram_addr_t offset = 0, mingap = ULONG_MAX;
+
+ if (QLIST_EMPTY(&ram_list.blocks))
+ return 0;
+
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ ram_addr_t end, next = ULONG_MAX;
+
+ end = block->offset + block->length;
+
+ QLIST_FOREACH(next_block, &ram_list.blocks, next) {
+ if (next_block->offset >= end) {
+ next = MIN(next, next_block->offset);
+ }
+ }
+ if (next - end >= size && next - end < mingap) {
+ offset = end;
+ mingap = next - end;
+ }
+ }
+ return offset;
+}
+
+static ram_addr_t last_ram_offset(void)
+{
+ RAMBlock *block;
+ ram_addr_t last = 0;
+
+ QLIST_FOREACH(block, &ram_list.blocks, next)
+ last = MAX(last, block->offset + block->length);
+
+ return last;
+}
+
+ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
+ ram_addr_t size, void *host)
+{
+ RAMBlock *new_block, *block;
size = TARGET_PAGE_ALIGN(size);
- new_block = qemu_malloc(sizeof(*new_block));
+ new_block = qemu_mallocz(sizeof(*new_block));
+#if 0
+ if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
+ char *id = dev->parent_bus->info->get_dev_path(dev);
+ if (id) {
+ snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
+ qemu_free(id);
+ }
+ }
+#endif
+ pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
+
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ if (!strcmp(block->idstr, new_block->idstr)) {
+ fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
+ new_block->idstr);
+ abort();
+ }
+ }
+
+ if (host) {
+ new_block->host = host;
+ new_block->flags |= RAM_PREALLOC_MASK;
+ } else {
+ if (mem_path) {
+#if 0 && defined (__linux__) && !defined(TARGET_S390X)
+ new_block->host = file_ram_alloc(new_block, size, mem_path);
+ if (!new_block->host) {
+ new_block->host = qemu_vmalloc(size);
+ qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
+ }
+#else
+ fprintf(stderr, "-mem-path option unsupported\n");
+ exit(1);
+#endif
+ } else {
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
- /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
- new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
+ new_block->host = mmap((void*)0x1000000, size,
+ PROT_EXEC|PROT_READ|PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
#else
- new_block->host = qemu_vmalloc(size);
+ new_block->host = qemu_vmalloc(size);
#endif
#ifdef MADV_MERGEABLE
- madvise(new_block->host, size, MADV_MERGEABLE);
+ madvise(new_block->host, size, MADV_MERGEABLE);
#endif
- new_block->offset = last_ram_offset;
+ }
+ }
+
+ new_block->offset = find_ram_offset(size);
new_block->length = size;
- new_block->next = ram_blocks;
- ram_blocks = new_block;
+ QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
- phys_ram_dirty = qemu_realloc(phys_ram_dirty,
- (last_ram_offset + size) >> TARGET_PAGE_BITS);
- memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
+ ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
+ last_ram_offset() >> TARGET_PAGE_BITS);
+ memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
0xff, size >> TARGET_PAGE_BITS);
- last_ram_offset += size;
-
if (kvm_enabled())
kvm_setup_guest_memory(new_block->host, size);
return new_block->offset;
}
+ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
+{
+ return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
+}
+
void qemu_ram_free(ram_addr_t addr)
{
- /* TODO: implement this. */
+ RAMBlock *block;
+
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ if (addr == block->offset) {
+ QLIST_REMOVE(block, next);
+ if (block->flags & RAM_PREALLOC_MASK) {
+ ;
+ } else if (mem_path) {
+#if defined (__linux__) && !defined(TARGET_S390X)
+ if (block->fd) {
+ munmap(block->host, block->length);
+ close(block->fd);
+ } else {
+ qemu_vfree(block->host);
+ }
+#else
+ abort();
+#endif
+ } else {
+#if defined(TARGET_S390X) && defined(CONFIG_KVM)
+ munmap(block->host, block->length);
+#else
+ qemu_vfree(block->host);
+#endif
+ }
+ qemu_free(block);
+ return;
+ }
+ }
+
+}
+
+#ifndef _WIN32
+void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
+{
+#ifndef CONFIG_ANDROID
+ RAMBlock *block;
+ ram_addr_t offset;
+ int flags;
+ void *area, *vaddr;
+
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ offset = addr - block->offset;
+ if (offset < block->length) {
+ vaddr = block->host + offset;
+ if (block->flags & RAM_PREALLOC_MASK) {
+ ;
+ } else {
+ flags = MAP_FIXED;
+ munmap(vaddr, length);
+ if (mem_path) {
+#if defined(__linux__) && !defined(TARGET_S390X)
+ if (block->fd) {
+#ifdef MAP_POPULATE
+ flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
+ MAP_PRIVATE;
+#else
+ flags |= MAP_PRIVATE;
+#endif
+ area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
+ flags, block->fd, offset);
+ } else {
+ flags |= MAP_PRIVATE | MAP_ANONYMOUS;
+ area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
+ flags, -1, 0);
+ }
+#else
+ abort();
+#endif
+ } else {
+#if defined(TARGET_S390X) && defined(CONFIG_KVM)
+ flags |= MAP_SHARED | MAP_ANONYMOUS;
+ area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
+ flags, -1, 0);
+#else
+ flags |= MAP_PRIVATE | MAP_ANONYMOUS;
+ area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
+ flags, -1, 0);
+#endif
+ }
+ if (area != vaddr) {
+ fprintf(stderr, "Could not remap addr: %lx@%lx\n",
+ length, addr);
+ exit(1);
+ }
+ qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
+ }
+ return;
+ }
+ }
+#endif /* !CONFIG_ANDROID */
}
+#endif /* !_WIN32 */
/* Return a host pointer to ram allocated with qemu_ram_alloc.
With the exception of the softmmu code in this file, this should
@@ -2525,57 +2690,69 @@ void qemu_ram_free(ram_addr_t addr)
*/
void *qemu_get_ram_ptr(ram_addr_t addr)
{
- RAMBlock *prev;
- RAMBlock **prevp;
RAMBlock *block;
- prev = NULL;
- prevp = &ram_blocks;
- block = ram_blocks;
- while (block && (block->offset > addr
- || block->offset + block->length <= addr)) {
- if (prev)
- prevp = &prev->next;
- prev = block;
- block = block->next;
- }
- if (!block) {
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ if (addr - block->offset < block->length) {
+ /* Move this entry to to start of the list. */
+ if (block != QLIST_FIRST(&ram_list.blocks)) {
+ QLIST_REMOVE(block, next);
+ QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
+ }
+ return block->host + (addr - block->offset);
+ }
+ }
+
fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
abort();
+
+ return NULL;
}
- /* Move this entry to to start of the list. */
- if (prev) {
- prev->next = block->next;
- block->next = *prevp;
- *prevp = block;
- }
+
+/* Return a host pointer to ram allocated with qemu_ram_alloc.
+ * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
+ */
+void *qemu_safe_ram_ptr(ram_addr_t addr)
+{
+ RAMBlock *block;
+
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ if (addr - block->offset < block->length) {
return block->host + (addr - block->offset);
}
+ }
-/* Some of the softmmu routines need to translate from a host pointer
- (typically a TLB entry) back to a ram offset. */
-ram_addr_t qemu_ram_addr_from_host(void *ptr)
+ fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
+ abort();
+
+ return NULL;
+}
+
+int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
{
- RAMBlock *prev;
- RAMBlock **prevp;
RAMBlock *block;
uint8_t *host = ptr;
- prev = NULL;
- prevp = &ram_blocks;
- block = ram_blocks;
- while (block && (block->host > host
- || block->host + block->length <= host)) {
- if (prev)
- prevp = &prev->next;
- prev = block;
- block = block->next;
- }
- if (!block) {
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ if (host - block->host < block->length) {
+ *ram_addr = block->offset + (host - block->host);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+/* Some of the softmmu routines need to translate from a host pointer
+ (typically a TLB entry) back to a ram offset. */
+ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
+{
+ ram_addr_t ram_addr;
+
+ if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
fprintf(stderr, "Bad ram pointer %p\n", ptr);
abort();
}
- return block->offset + (host - block->host);
+ return ram_addr;
}
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
@@ -2657,16 +2834,16 @@ static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
uint32_t val)
{
int dirty_flags;
- dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+ dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
tb_invalidate_phys_page_fast(ram_addr, 1);
- dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+ dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
#endif
}
stb_p(qemu_get_ram_ptr(ram_addr), val);
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
- phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
+ cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
@@ -2677,16 +2854,16 @@ static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
uint32_t val)
{
int dirty_flags;
- dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+ dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
tb_invalidate_phys_page_fast(ram_addr, 2);
- dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+ dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
#endif
}
stw_p(qemu_get_ram_ptr(ram_addr), val);
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
- phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
+ cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
@@ -2697,16 +2874,16 @@ static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
uint32_t val)
{
int dirty_flags;
- dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+ dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
tb_invalidate_phys_page_fast(ram_addr, 4);
- dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+ dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
#endif
}
stl_p(qemu_get_ram_ptr(ram_addr), val);
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
- phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
+ cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
@@ -3158,8 +3335,8 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
/* invalidate code */
tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
/* set dirty bit */
- phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
- (0xff & ~CODE_DIRTY_FLAG);
+ cpu_physical_memory_set_dirty_flags(
+ addr1, (0xff & ~CODE_DIRTY_FLAG));
}
}
} else {
@@ -3355,7 +3532,7 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
{
if (buffer != bounce.buffer) {
if (is_write) {
- ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
+ ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
while (access_len) {
unsigned l;
l = TARGET_PAGE_SIZE;
@@ -3365,8 +3542,8 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
/* invalidate code */
tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
/* set dirty bit */
- phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
- (0xff & ~CODE_DIRTY_FLAG);
+ cpu_physical_memory_set_dirty_flags(
+ addr1, (0xff & ~CODE_DIRTY_FLAG));
}
addr1 += l;
access_len -= l;
@@ -3377,7 +3554,7 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
if (is_write) {
cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
}
- qemu_free(bounce.buffer);
+ qemu_vfree(bounce.buffer);
bounce.buffer = NULL;
cpu_notify_map_clients();
}
@@ -3500,8 +3677,8 @@ void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
/* invalidate code */
tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
/* set dirty bit */
- phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
- (0xff & ~CODE_DIRTY_FLAG);
+ cpu_physical_memory_set_dirty_flags(
+ addr1, (0xff & ~CODE_DIRTY_FLAG));
}
}
}
@@ -3569,8 +3746,8 @@ void stl_phys(target_phys_addr_t addr, uint32_t val)
/* invalidate code */
tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
/* set dirty bit */
- phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
- (0xff & ~CODE_DIRTY_FLAG);
+ cpu_physical_memory_set_dirty_flags(addr1,
+ (0xff & ~CODE_DIRTY_FLAG));
}
}
}
@@ -3690,8 +3867,7 @@ void cpu_io_recompile(CPUState *env, void *retaddr)
#if !defined(CONFIG_USER_ONLY)
-void dump_exec_info(FILE *f,
- int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
+void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
{
int i, target_code_size, max_target_code_size;
int direct_jmp_count, direct_jmp2_count, cross_page;
@@ -3718,14 +3894,14 @@ void dump_exec_info(FILE *f,
}
/* XXX: avoid using doubles ? */
cpu_fprintf(f, "Translation buffer state:\n");
- cpu_fprintf(f, "gen code size %ld/%ld\n",
+ cpu_fprintf(f, "gen code size %td/%ld\n",
code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
cpu_fprintf(f, "TB count %d/%d\n",
nb_tbs, code_gen_max_blocks);
cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
nb_tbs ? target_code_size / nb_tbs : 0,
max_target_code_size);
- cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
+ cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
cpu_fprintf(f, "cross page TB count %d (%d%%)\n",
diff --git a/hw/android_arm.c b/hw/android_arm.c
index 3b9dc6d..188051b 100644
--- a/hw/android_arm.c
+++ b/hw/android_arm.c
@@ -79,7 +79,7 @@ static void android_arm_init_(ram_addr_t ram_size,
env = cpu_init(cpu_model);
register_savevm( "cpu", 0, ARM_CPU_SAVE_VERSION, cpu_save, cpu_load, env );
- ram_offset = qemu_ram_alloc(ram_size);
+ ram_offset = qemu_ram_alloc(NULL,"android_arm",ram_size);
cpu_register_physical_memory(0, ram_size, ram_offset | IO_MEM_RAM);
cpu_pic = arm_pic_init_cpu(env);
diff --git a/hw/armv7m.c b/hw/armv7m.c
index 297a3e1..f7636db 100644
--- a/hw/armv7m.c
+++ b/hw/armv7m.c
@@ -105,13 +105,13 @@ static void bitband_writel(void *opaque, target_phys_addr_t offset,
cpu_physical_memory_write(addr, (uint8_t *)&v, 4);
}
-static CPUReadMemoryFunc *bitband_readfn[] = {
+static CPUReadMemoryFunc * const bitband_readfn[] = {
bitband_readb,
bitband_readw,
bitband_readl
};
-static CPUWriteMemoryFunc *bitband_writefn[] = {
+static CPUWriteMemoryFunc * const bitband_writefn[] = {
bitband_writeb,
bitband_writew,
bitband_writel
@@ -192,9 +192,11 @@ qemu_irq *armv7m_init(int flash_size, int sram_size,
/* Flash programming is done via the SCU, so pretend it is ROM. */
cpu_register_physical_memory(0, flash_size,
- qemu_ram_alloc(flash_size) | IO_MEM_ROM);
+ qemu_ram_alloc(NULL, "armv7m.flash",
+ flash_size) | IO_MEM_ROM);
cpu_register_physical_memory(0x20000000, sram_size,
- qemu_ram_alloc(sram_size) | IO_MEM_RAM);
+ qemu_ram_alloc(NULL, "armv7m.sram",
+ sram_size) | IO_MEM_RAM);
armv7m_bitband_init();
nvic = qdev_create(NULL, "armv7m_nvic");
@@ -233,8 +235,8 @@ qemu_irq *armv7m_init(int flash_size, int sram_size,
space. This stops qemu complaining about executing code outside RAM
when returning from an exception. */
cpu_register_physical_memory(0xfffff000, 0x1000,
- qemu_ram_alloc(0x1000) | IO_MEM_RAM);
-
+ qemu_ram_alloc(NULL, "armv7m.hack",
+ 0x1000) | IO_MEM_RAM);
return pic;
}
diff --git a/hw/hw.h b/hw/hw.h
index ffee4ff..d230448 100644
--- a/hw/hw.h
+++ b/hw/hw.h
@@ -67,7 +67,9 @@ static inline void qemu_put_ubyte(QEMUFile *f, unsigned int v)
void qemu_put_be16(QEMUFile *f, unsigned int v);
void qemu_put_be32(QEMUFile *f, unsigned int v);
void qemu_put_be64(QEMUFile *f, uint64_t v);
+#ifdef CONFIG_ANDROID
void qemu_put_float(QEMUFile *f, float v);
+#endif
int qemu_get_buffer(QEMUFile *f, uint8_t *buf, int size);
int qemu_get_byte(QEMUFile *f);
@@ -81,7 +83,9 @@ static inline unsigned int qemu_get_ubyte(QEMUFile *f)
unsigned int qemu_get_be16(QEMUFile *f);
unsigned int qemu_get_be32(QEMUFile *f);
uint64_t qemu_get_be64(QEMUFile *f);
+#ifdef CONFIG_ANDROID
float qemu_get_float(QEMUFile *f);
+#endif
int qemu_file_rate_limit(QEMUFile *f);
int64_t qemu_file_set_rate_limit(QEMUFile *f, int64_t new_rate);
int64_t qemu_file_get_rate_limit(QEMUFile *f);
diff --git a/hw/pc.c b/hw/pc.c
index ff7670e..0114ff5 100644
--- a/hw/pc.c
+++ b/hw/pc.c
@@ -550,7 +550,7 @@ static void generate_bootsect(target_phys_addr_t option_rom,
*p++ = 0x1f; /* pop ds */
*p++ = 0x58; /* pop ax */
*p++ = 0xcb; /* lret */
-
+
/* Actual code */
*reloc = (p - rom);
@@ -910,7 +910,7 @@ static void pc_init1(ram_addr_t ram_size,
cpu_model = "qemu32";
#endif
}
-
+
for(i = 0; i < smp_cpus; i++) {
env = cpu_init(cpu_model);
if (!env) {
@@ -927,25 +927,23 @@ static void pc_init1(ram_addr_t ram_size,
vmport_init();
/* allocate RAM */
- ram_addr = qemu_ram_alloc(0xa0000);
+ ram_addr = qemu_ram_alloc(NULL, "pc.ram",
+ below_4g_mem_size + above_4g_mem_size);
cpu_register_physical_memory(0, 0xa0000, ram_addr);
-
- /* Allocate, even though we won't register, so we don't break the
- * phys_ram_base + PA assumption. This range includes vga (0xa0000 - 0xc0000),
- * and some bios areas, which will be registered later
- */
- ram_addr = qemu_ram_alloc(0x100000 - 0xa0000);
- ram_addr = qemu_ram_alloc(below_4g_mem_size - 0x100000);
cpu_register_physical_memory(0x100000,
below_4g_mem_size - 0x100000,
- ram_addr);
+ ram_addr + 0x100000);
+ if (above_4g_mem_size > 0) {
+ cpu_register_physical_memory(0x100000000ULL, above_4g_mem_size,
+ ram_addr + below_4g_mem_size);
+ }
#else
/*
* Allocate a single contiguous RAM so that the goldfish
* framebuffer can work well especially when the frame buffer is
* large.
*/
- ram_addr = qemu_ram_alloc(below_4g_mem_size);
+ ram_addr = qemu_ram_alloc(NULL, "pc.ram", below_4g_mem_size);
cpu_register_physical_memory(0, below_4g_mem_size, ram_addr);
#endif
@@ -975,7 +973,7 @@ static void pc_init1(ram_addr_t ram_size,
(bios_size % 65536) != 0) {
goto bios_error;
}
- bios_offset = qemu_ram_alloc(bios_size);
+ bios_offset = qemu_ram_alloc(NULL, "bios.bin", bios_size);
ret = load_image(filename, qemu_get_ram_ptr(bios_offset));
if (ret != bios_size) {
bios_error:
@@ -995,7 +993,7 @@ static void pc_init1(ram_addr_t ram_size,
- option_rom_offset = qemu_ram_alloc(0x20000);
+ option_rom_offset = qemu_ram_alloc(NULL, "pc.rom", 0x20000);
oprom_area_size = 0;
cpu_register_physical_memory(0xc0000, 0x20000, option_rom_offset);
diff --git a/migration.h b/migration.h
index 37c7f8e..9d145b0 100644
--- a/migration.h
+++ b/migration.h
@@ -102,4 +102,11 @@ static inline FdMigrationState *migrate_to_fms(MigrationState *mig_state)
return container_of(mig_state, FdMigrationState, mig_state);
}
+uint64_t ram_bytes_remaining(void);
+uint64_t ram_bytes_transferred(void);
+uint64_t ram_bytes_total(void);
+
+int ram_save_live(QEMUFile *f, int stage, void *opaque);
+int ram_load(QEMUFile *f, void *opaque, int version_id);
+
#endif
diff --git a/osdep.c b/osdep.c
index 5879eed..6c402d9 100644
--- a/osdep.c
+++ b/osdep.c
@@ -32,9 +32,16 @@
/* Needed early for CONFIG_BSD etc. */
#include "config-host.h"
+#if defined(CONFIG_MADVISE) || defined(CONFIG_POSIX_MADVISE)
+#include <sys/mman.h>
+#endif
+
#ifdef CONFIG_SOLARIS
#include <sys/types.h>
#include <sys/statvfs.h>
+/* See MySQL bug #7156 (http://bugs.mysql.com/bug.php?id=7156) for
+ discussion about Solaris header problems */
+extern int madvise(caddr_t, size_t, int);
#endif
#ifdef _WIN32
@@ -59,6 +66,21 @@ typedef int32_t socklen_t;
#include "sysemu.h"
#include "qemu_socket.h"
+int qemu_madvise(void *addr, size_t len, int advice)
+{
+ if (advice == QEMU_MADV_INVALID) {
+ errno = EINVAL;
+ return -1;
+ }
+#if defined(CONFIG_MADVISE)
+ return madvise(addr, len, advice);
+#elif defined(CONFIG_POSIX_MADVISE)
+ return posix_madvise(addr, len, advice);
+#else
+ errno = EINVAL;
+ return -1;
+#endif
+}
/*
diff --git a/vl-android.c b/vl-android.c
index 0271f9c..374488a 100644
--- a/vl-android.c
+++ b/vl-android.c
@@ -261,6 +261,10 @@ DisplayType display_type = DT_DEFAULT;
const char* keyboard_layout = NULL;
int64_t ticks_per_sec;
ram_addr_t ram_size;
+const char *mem_path = NULL;
+#ifdef MAP_POPULATE
+int mem_prealloc = 0; /* force preallocation of physical target memory */
+#endif
int nb_nics;
NICInfo nd_table[MAX_NICS];
int vm_running;
@@ -1685,317 +1689,6 @@ void pcmcia_info(Monitor *mon)
}
/***********************************************************/
-/* ram save/restore */
-
-static int ram_get_page(QEMUFile *f, uint8_t *buf, int len)
-{
- int v;
-
- v = qemu_get_byte(f);
- switch(v) {
- case 0:
- if (qemu_get_buffer(f, buf, len) != len)
- return -EIO;
- break;
- case 1:
- v = qemu_get_byte(f);
- memset(buf, v, len);
- break;
- default:
- return -EINVAL;
- }
-
- if (qemu_file_has_error(f))
- return -EIO;
-
- return 0;
-}
-
-static int ram_load_v1(QEMUFile *f, void *opaque)
-{
- int ret;
- ram_addr_t i;
-
- if (qemu_get_be32(f) != last_ram_offset)
- return -EINVAL;
- for(i = 0; i < last_ram_offset; i+= TARGET_PAGE_SIZE) {
- ret = ram_get_page(f, qemu_get_ram_ptr(i), TARGET_PAGE_SIZE);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-#define BDRV_HASH_BLOCK_SIZE 1024
-#define IOBUF_SIZE 4096
-#define RAM_CBLOCK_MAGIC 0xfabe
-
-typedef struct RamDecompressState {
- z_stream zstream;
- QEMUFile *f;
- uint8_t buf[IOBUF_SIZE];
-} RamDecompressState;
-
-static int ram_decompress_open(RamDecompressState *s, QEMUFile *f)
-{
- int ret;
- memset(s, 0, sizeof(*s));
- s->f = f;
- ret = inflateInit(&s->zstream);
- if (ret != Z_OK)
- return -1;
- return 0;
-}
-
-static int ram_decompress_buf(RamDecompressState *s, uint8_t *buf, int len)
-{
- int ret, clen;
-
- s->zstream.avail_out = len;
- s->zstream.next_out = buf;
- while (s->zstream.avail_out > 0) {
- if (s->zstream.avail_in == 0) {
- if (qemu_get_be16(s->f) != RAM_CBLOCK_MAGIC)
- return -1;
- clen = qemu_get_be16(s->f);
- if (clen > IOBUF_SIZE)
- return -1;
- qemu_get_buffer(s->f, s->buf, clen);
- s->zstream.avail_in = clen;
- s->zstream.next_in = s->buf;
- }
- ret = inflate(&s->zstream, Z_PARTIAL_FLUSH);
- if (ret != Z_OK && ret != Z_STREAM_END) {
- return -1;
- }
- }
- return 0;
-}
-
-static void ram_decompress_close(RamDecompressState *s)
-{
- inflateEnd(&s->zstream);
-}
-
-#define RAM_SAVE_FLAG_FULL 0x01
-#define RAM_SAVE_FLAG_COMPRESS 0x02
-#define RAM_SAVE_FLAG_MEM_SIZE 0x04
-#define RAM_SAVE_FLAG_PAGE 0x08
-#define RAM_SAVE_FLAG_EOS 0x10
-
-static int is_dup_page(uint8_t *page, uint8_t ch)
-{
- uint32_t val = ch << 24 | ch << 16 | ch << 8 | ch;
- uint32_t *array = (uint32_t *)page;
- int i;
-
- for (i = 0; i < (TARGET_PAGE_SIZE / 4); i++) {
- if (array[i] != val)
- return 0;
- }
-
- return 1;
-}
-
-static int ram_save_block(QEMUFile *f)
-{
- static ram_addr_t current_addr = 0;
- ram_addr_t saved_addr = current_addr;
- ram_addr_t addr = 0;
- int found = 0;
-
- while (addr < last_ram_offset) {
- if (cpu_physical_memory_get_dirty(current_addr, MIGRATION_DIRTY_FLAG)) {
- uint8_t *p;
-
- cpu_physical_memory_reset_dirty(current_addr,
- current_addr + TARGET_PAGE_SIZE,
- MIGRATION_DIRTY_FLAG);
-
- p = qemu_get_ram_ptr(current_addr);
-
- if (is_dup_page(p, *p)) {
- qemu_put_be64(f, current_addr | RAM_SAVE_FLAG_COMPRESS);
- qemu_put_byte(f, *p);
- } else {
- qemu_put_be64(f, current_addr | RAM_SAVE_FLAG_PAGE);
- qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
- }
-
- found = 1;
- break;
- }
- addr += TARGET_PAGE_SIZE;
- current_addr = (saved_addr + addr) % last_ram_offset;
- }
-
- return found;
-}
-
-static uint64_t bytes_transferred = 0;
-
-static ram_addr_t ram_save_remaining(void)
-{
- ram_addr_t addr;
- ram_addr_t count = 0;
-
- for (addr = 0; addr < last_ram_offset; addr += TARGET_PAGE_SIZE) {
- if (cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG))
- count++;
- }
-
- return count;
-}
-
-uint64_t ram_bytes_remaining(void)
-{
- return ram_save_remaining() * TARGET_PAGE_SIZE;
-}
-
-uint64_t ram_bytes_transferred(void)
-{
- return bytes_transferred;
-}
-
-uint64_t ram_bytes_total(void)
-{
- return last_ram_offset;
-}
-
-static int ram_save_live(QEMUFile *f, int stage, void *opaque)
-{
- ram_addr_t addr;
- uint64_t bytes_transferred_last;
- double bwidth = 0;
- uint64_t expected_time = 0;
-
- cpu_physical_sync_dirty_bitmap(0, TARGET_PHYS_ADDR_MAX);
-
- if (stage == 1) {
- bytes_transferred = 0;
-
- /* Make sure all dirty bits are set */
- for (addr = 0; addr < last_ram_offset; addr += TARGET_PAGE_SIZE) {
- if (!cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG))
- cpu_physical_memory_set_dirty(addr);
- }
-
- /* Enable dirty memory tracking */
- cpu_physical_memory_set_dirty_tracking(1);
-
- qemu_put_be64(f, last_ram_offset | RAM_SAVE_FLAG_MEM_SIZE);
- }
-
- bytes_transferred_last = bytes_transferred;
- bwidth = qemu_get_clock_ns(rt_clock);
-
- while (!qemu_file_rate_limit(f)) {
- int ret;
-
- ret = ram_save_block(f);
- bytes_transferred += ret * TARGET_PAGE_SIZE;
- if (ret == 0) /* no more blocks */
- break;
- }
-
- bwidth = qemu_get_clock_ns(rt_clock) - bwidth;
- bwidth = (bytes_transferred - bytes_transferred_last) / bwidth;
-
- /* if we haven't transferred anything this round, force expected_time to a
- * a very high value, but without crashing */
- if (bwidth == 0)
- bwidth = 0.000001;
-
- /* try transferring iterative blocks of memory */
- if (stage == 3) {
- /* flush all remaining blocks regardless of rate limiting */
- while (ram_save_block(f) != 0) {
- bytes_transferred += TARGET_PAGE_SIZE;
- }
- cpu_physical_memory_set_dirty_tracking(0);
- }
-
- qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
-
- expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
-
- return (stage == 2) && (expected_time <= migrate_max_downtime());
-}
-
-static int ram_load_dead(QEMUFile *f, void *opaque)
-{
- RamDecompressState s1, *s = &s1;
- uint8_t buf[10];
- ram_addr_t i;
-
- if (ram_decompress_open(s, f) < 0)
- return -EINVAL;
- for(i = 0; i < last_ram_offset; i+= BDRV_HASH_BLOCK_SIZE) {
- if (ram_decompress_buf(s, buf, 1) < 0) {
- fprintf(stderr, "Error while reading ram block header\n");
- goto error;
- }
- if (buf[0] == 0) {
- if (ram_decompress_buf(s, qemu_get_ram_ptr(i),
- BDRV_HASH_BLOCK_SIZE) < 0) {
- fprintf(stderr, "Error while reading ram block address=0x%08" PRIx64, (uint64_t)i);
- goto error;
- }
- } else {
- error:
- printf("Error block header\n");
- return -EINVAL;
- }
- }
- ram_decompress_close(s);
-
- return 0;
-}
-
-static int ram_load(QEMUFile *f, void *opaque, int version_id)
-{
- ram_addr_t addr;
- int flags;
-
- if (version_id == 1)
- return ram_load_v1(f, opaque);
-
- if (version_id == 2) {
- if (qemu_get_be32(f) != last_ram_offset)
- return -EINVAL;
- return ram_load_dead(f, opaque);
- }
-
- if (version_id != 3)
- return -EINVAL;
-
- do {
- addr = qemu_get_be64(f);
-
- flags = addr & ~TARGET_PAGE_MASK;
- addr &= TARGET_PAGE_MASK;
-
- if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
- if (addr != last_ram_offset)
- return -EINVAL;
- }
-
- if (flags & RAM_SAVE_FLAG_FULL) {
- if (ram_load_dead(f, opaque) < 0)
- return -EINVAL;
- }
-
- if (flags & RAM_SAVE_FLAG_COMPRESS) {
- uint8_t ch = qemu_get_byte(f);
- memset(qemu_get_ram_ptr(addr), ch, TARGET_PAGE_SIZE);
- } else if (flags & RAM_SAVE_FLAG_PAGE)
- qemu_get_buffer(f, qemu_get_ram_ptr(addr), TARGET_PAGE_SIZE);
- } while (!(flags & RAM_SAVE_FLAG_EOS));
-
- return 0;
-}
-
-/***********************************************************/
/* machine registration */
static QEMUMachine *first_machine = NULL;
diff --git a/vl.c b/vl.c
index c194260..3102679 100644
--- a/vl.c
+++ b/vl.c
@@ -197,6 +197,10 @@ enum vga_retrace_method vga_retrace_method = VGA_RETRACE_DUMB;
DisplayType display_type = DT_DEFAULT;
const char* keyboard_layout = NULL;
ram_addr_t ram_size;
+const char *mem_path = NULL;
+#ifdef MAP_POPULATE
+int mem_prealloc = 0; /* force preallocation of physical target memory */
+#endif
int nb_nics;
NICInfo nd_table[MAX_NICS];
int vm_running;
@@ -1487,320 +1491,6 @@ void pcmcia_info(Monitor *mon)
}
/***********************************************************/
-/* ram save/restore */
-
-static int ram_get_page(QEMUFile *f, uint8_t *buf, int len)
-{
- int v;
-
- v = qemu_get_byte(f);
- switch(v) {
- case 0:
- if (qemu_get_buffer(f, buf, len) != len)
- return -EIO;
- break;
- case 1:
- v = qemu_get_byte(f);
- memset(buf, v, len);
- break;
- default:
- return -EINVAL;
- }
-
- if (qemu_file_has_error(f))
- return -EIO;
-
- return 0;
-}
-
-static int ram_load_v1(QEMUFile *f, void *opaque)
-{
- int ret;
- ram_addr_t i;
-
- if (qemu_get_be32(f) != last_ram_offset)
- return -EINVAL;
- for(i = 0; i < last_ram_offset; i+= TARGET_PAGE_SIZE) {
- ret = ram_get_page(f, qemu_get_ram_ptr(i), TARGET_PAGE_SIZE);
- if (ret)
- return ret;
- }
- return 0;
-}
-
-#define BDRV_HASH_BLOCK_SIZE 1024
-#define IOBUF_SIZE 4096
-#define RAM_CBLOCK_MAGIC 0xfabe
-
-typedef struct RamDecompressState {
- z_stream zstream;
- QEMUFile *f;
- uint8_t buf[IOBUF_SIZE];
-} RamDecompressState;
-
-static int ram_decompress_open(RamDecompressState *s, QEMUFile *f)
-{
- int ret;
- memset(s, 0, sizeof(*s));
- s->f = f;
- ret = inflateInit(&s->zstream);
- if (ret != Z_OK)
- return -1;
- return 0;
-}
-
-static int ram_decompress_buf(RamDecompressState *s, uint8_t *buf, int len)
-{
- int ret, clen;
-
- s->zstream.avail_out = len;
- s->zstream.next_out = buf;
- while (s->zstream.avail_out > 0) {
- if (s->zstream.avail_in == 0) {
- if (qemu_get_be16(s->f) != RAM_CBLOCK_MAGIC)
- return -1;
- clen = qemu_get_be16(s->f);
- if (clen > IOBUF_SIZE)
- return -1;
- qemu_get_buffer(s->f, s->buf, clen);
- s->zstream.avail_in = clen;
- s->zstream.next_in = s->buf;
- }
- ret = inflate(&s->zstream, Z_PARTIAL_FLUSH);
- if (ret != Z_OK && ret != Z_STREAM_END) {
- return -1;
- }
- }
- return 0;
-}
-
-static void ram_decompress_close(RamDecompressState *s)
-{
- inflateEnd(&s->zstream);
-}
-
-#define RAM_SAVE_FLAG_FULL 0x01
-#define RAM_SAVE_FLAG_COMPRESS 0x02
-#define RAM_SAVE_FLAG_MEM_SIZE 0x04
-#define RAM_SAVE_FLAG_PAGE 0x08
-#define RAM_SAVE_FLAG_EOS 0x10
-
-static int is_dup_page(uint8_t *page, uint8_t ch)
-{
- uint32_t val = ch << 24 | ch << 16 | ch << 8 | ch;
- uint32_t *array = (uint32_t *)page;
- int i;
-
- for (i = 0; i < (TARGET_PAGE_SIZE / 4); i++) {
- if (array[i] != val)
- return 0;
- }
-
- return 1;
-}
-
-static int ram_save_block(QEMUFile *f)
-{
- static ram_addr_t current_addr = 0;
- ram_addr_t saved_addr = current_addr;
- ram_addr_t addr = 0;
- int found = 0;
-
- while (addr < last_ram_offset) {
- if (cpu_physical_memory_get_dirty(current_addr, MIGRATION_DIRTY_FLAG)) {
- uint8_t *p;
-
- cpu_physical_memory_reset_dirty(current_addr,
- current_addr + TARGET_PAGE_SIZE,
- MIGRATION_DIRTY_FLAG);
-
- p = qemu_get_ram_ptr(current_addr);
-
- if (is_dup_page(p, *p)) {
- qemu_put_be64(f, current_addr | RAM_SAVE_FLAG_COMPRESS);
- qemu_put_byte(f, *p);
- } else {
- qemu_put_be64(f, current_addr | RAM_SAVE_FLAG_PAGE);
- qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
- }
-
- found = 1;
- break;
- }
- addr += TARGET_PAGE_SIZE;
- current_addr = (saved_addr + addr) % last_ram_offset;
- }
-
- return found;
-}
-
-static uint64_t bytes_transferred = 0;
-
-static ram_addr_t ram_save_remaining(void)
-{
- ram_addr_t addr;
- ram_addr_t count = 0;
-
- for (addr = 0; addr < last_ram_offset; addr += TARGET_PAGE_SIZE) {
- if (cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG))
- count++;
- }
-
- return count;
-}
-
-uint64_t ram_bytes_remaining(void)
-{
- return ram_save_remaining() * TARGET_PAGE_SIZE;
-}
-
-uint64_t ram_bytes_transferred(void)
-{
- return bytes_transferred;
-}
-
-uint64_t ram_bytes_total(void)
-{
- return last_ram_offset;
-}
-
-static int ram_save_live(QEMUFile *f, int stage, void *opaque)
-{
- ram_addr_t addr;
- uint64_t bytes_transferred_last;
- double bwidth = 0;
- uint64_t expected_time = 0;
-
- if (cpu_physical_sync_dirty_bitmap(0, TARGET_PHYS_ADDR_MAX) != 0) {
- qemu_file_set_error(f);
- return 0;
- }
-
- if (stage == 1) {
- bytes_transferred = 0;
-
- /* Make sure all dirty bits are set */
- for (addr = 0; addr < last_ram_offset; addr += TARGET_PAGE_SIZE) {
- if (!cpu_physical_memory_get_dirty(addr, MIGRATION_DIRTY_FLAG))
- cpu_physical_memory_set_dirty(addr);
- }
-
- /* Enable dirty memory tracking */
- cpu_physical_memory_set_dirty_tracking(1);
-
- qemu_put_be64(f, last_ram_offset | RAM_SAVE_FLAG_MEM_SIZE);
- }
-
- bytes_transferred_last = bytes_transferred;
- bwidth = qemu_get_clock_ns(rt_clock);
-
- while (!qemu_file_rate_limit(f)) {
- int ret;
-
- ret = ram_save_block(f);
- bytes_transferred += ret * TARGET_PAGE_SIZE;
- if (ret == 0) /* no more blocks */
- break;
- }
-
- bwidth = qemu_get_clock_ns(rt_clock) - bwidth;
- bwidth = (bytes_transferred - bytes_transferred_last) / bwidth;
-
- /* if we haven't transferred anything this round, force expected_time to a
- * a very high value, but without crashing */
- if (bwidth == 0)
- bwidth = 0.000001;
-
- /* try transferring iterative blocks of memory */
- if (stage == 3) {
- /* flush all remaining blocks regardless of rate limiting */
- while (ram_save_block(f) != 0) {
- bytes_transferred += TARGET_PAGE_SIZE;
- }
- cpu_physical_memory_set_dirty_tracking(0);
- }
-
- qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
-
- expected_time = ram_save_remaining() * TARGET_PAGE_SIZE / bwidth;
-
- return (stage == 2) && (expected_time <= migrate_max_downtime());
-}
-
-static int ram_load_dead(QEMUFile *f, void *opaque)
-{
- RamDecompressState s1, *s = &s1;
- uint8_t buf[10];
- ram_addr_t i;
-
- if (ram_decompress_open(s, f) < 0)
- return -EINVAL;
- for(i = 0; i < last_ram_offset; i+= BDRV_HASH_BLOCK_SIZE) {
- if (ram_decompress_buf(s, buf, 1) < 0) {
- fprintf(stderr, "Error while reading ram block header\n");
- goto error;
- }
- if (buf[0] == 0) {
- if (ram_decompress_buf(s, qemu_get_ram_ptr(i),
- BDRV_HASH_BLOCK_SIZE) < 0) {
- fprintf(stderr, "Error while reading ram block address=0x%08" PRIx64, (uint64_t)i);
- goto error;
- }
- } else {
- error:
- printf("Error block header\n");
- return -EINVAL;
- }
- }
- ram_decompress_close(s);
-
- return 0;
-}
-
-static int ram_load(QEMUFile *f, void *opaque, int version_id)
-{
- ram_addr_t addr;
- int flags;
-
- if (version_id == 1)
- return ram_load_v1(f, opaque);
-
- if (version_id == 2) {
- if (qemu_get_be32(f) != last_ram_offset)
- return -EINVAL;
- return ram_load_dead(f, opaque);
- }
-
- if (version_id != 3)
- return -EINVAL;
-
- do {
- addr = qemu_get_be64(f);
-
- flags = addr & ~TARGET_PAGE_MASK;
- addr &= TARGET_PAGE_MASK;
-
- if (flags & RAM_SAVE_FLAG_MEM_SIZE) {
- if (addr != last_ram_offset)
- return -EINVAL;
- }
-
- if (flags & RAM_SAVE_FLAG_FULL) {
- if (ram_load_dead(f, opaque) < 0)
- return -EINVAL;
- }
-
- if (flags & RAM_SAVE_FLAG_COMPRESS) {
- uint8_t ch = qemu_get_byte(f);
- memset(qemu_get_ram_ptr(addr), ch, TARGET_PAGE_SIZE);
- } else if (flags & RAM_SAVE_FLAG_PAGE)
- qemu_get_buffer(f, qemu_get_ram_ptr(addr), TARGET_PAGE_SIZE);
- } while (!(flags & RAM_SAVE_FLAG_EOS));
-
- return 0;
-}
-
-/***********************************************************/
/* machine registration */
static QEMUMachine *first_machine = NULL;