aboutsummaryrefslogtreecommitdiffstats
path: root/exec.c
diff options
context:
space:
mode:
authorDavid 'Digit' Turner <digit@android.com>2011-05-11 17:37:44 +0200
committerDavid 'Digit' Turner <digit@android.com>2011-06-01 17:08:19 +0200
commit280afa072a7b829e581d884c2b3276530a6014b7 (patch)
tree6f1f213d66db8a5f89994bdfda7c9cd24140ffeb /exec.c
parentae3098a3bce898cf958a6c3334f3d62282b12d2a (diff)
downloadexternal_qemu-280afa072a7b829e581d884c2b3276530a6014b7.zip
external_qemu-280afa072a7b829e581d884c2b3276530a6014b7.tar.gz
external_qemu-280afa072a7b829e581d884c2b3276530a6014b7.tar.bz2
ramblocks: integrate upstream implementation (sophisticated)
Change-Id: I49e96e2d5ae571849b0b6fef0a30b41ecdee8d23
Diffstat (limited to 'exec.c')
-rw-r--r--exec.c392
1 files changed, 284 insertions, 108 deletions
diff --git a/exec.c b/exec.c
index 113917e..285c950 100644
--- a/exec.c
+++ b/exec.c
@@ -115,21 +115,9 @@ uint8_t *code_gen_ptr;
#if !defined(CONFIG_USER_ONLY)
int phys_ram_fd;
-uint8_t *phys_ram_dirty;
static int in_migration;
-typedef struct RAMBlock {
- uint8_t *host;
- ram_addr_t offset;
- ram_addr_t length;
- struct RAMBlock *next;
-} RAMBlock;
-
-static RAMBlock *ram_blocks;
-/* TODO: When we implement (and use) ram deallocation (e.g. for hotplug)
- then we can no longer assume contiguous ram offsets, and external uses
- of this variable will break. */
-ram_addr_t last_ram_offset;
+RAMList ram_list = { .blocks = QLIST_HEAD_INITIALIZER(ram_list) };
#endif
CPUState *first_cpu;
@@ -479,7 +467,8 @@ static void code_gen_alloc(unsigned long tb_size)
exit(1);
}
}
-#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) || defined(__DragonFly__)
+#elif defined(__FreeBSD__) || defined(__FreeBSD_kernel__) \
+ || defined(__DragonFly__) || defined(__OpenBSD__)
{
int flags;
void *addr = NULL;
@@ -492,6 +481,13 @@ static void code_gen_alloc(unsigned long tb_size)
/* Cannot map more than that */
if (code_gen_buffer_size > (800 * 1024 * 1024))
code_gen_buffer_size = (800 * 1024 * 1024);
+#elif defined(__sparc_v9__)
+ // Map the buffer below 2G, so we can use direct calls and branches
+ flags |= MAP_FIXED;
+ addr = (void *) 0x60000000UL;
+ if (code_gen_buffer_size > (512 * 1024 * 1024)) {
+ code_gen_buffer_size = (512 * 1024 * 1024);
+ }
#endif
code_gen_buffer = mmap(addr, code_gen_buffer_size,
PROT_WRITE | PROT_READ | PROT_EXEC,
@@ -1861,7 +1857,7 @@ static void tlb_protect_code(ram_addr_t ram_addr)
static void tlb_unprotect_code_phys(CPUState *env, ram_addr_t ram_addr,
target_ulong vaddr)
{
- phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] |= CODE_DIRTY_FLAG;
+ cpu_physical_memory_set_dirty_flags(ram_addr, CODE_DIRTY_FLAG);
}
static inline void tlb_reset_dirty_range(CPUTLBEntry *tlb_entry,
@@ -1882,8 +1878,7 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
{
CPUState *env;
unsigned long length, start1;
- int i, mask, len;
- uint8_t *p;
+ int i;
start &= TARGET_PAGE_MASK;
end = TARGET_PAGE_ALIGN(end);
@@ -1891,18 +1886,14 @@ void cpu_physical_memory_reset_dirty(ram_addr_t start, ram_addr_t end,
length = end - start;
if (length == 0)
return;
- len = length >> TARGET_PAGE_BITS;
- mask = ~dirty_flags;
- p = phys_ram_dirty + (start >> TARGET_PAGE_BITS);
- for(i = 0; i < len; i++)
- p[i] &= mask;
+ cpu_physical_memory_mask_dirty_range(start, length, dirty_flags);
/* we modify the TLB cache so that the dirty bit will be set again
when accessing the range */
- start1 = (unsigned long)qemu_get_ram_ptr(start);
+ start1 = (unsigned long)qemu_safe_ram_ptr(start);
/* Chek that we don't span multiple blocks - this breaks the
address comparisons below. */
- if ((unsigned long)qemu_get_ram_ptr(end - 1) - start1
+ if ((unsigned long)qemu_safe_ram_ptr(end - 1) - start1
!= (end - 1) - start) {
abort();
}
@@ -1949,7 +1940,7 @@ static inline void tlb_update_dirty(CPUTLBEntry *tlb_entry)
if ((tlb_entry->addr_write & ~TARGET_PAGE_MASK) == IO_MEM_RAM) {
p = (void *)(unsigned long)((tlb_entry->addr_write & TARGET_PAGE_MASK)
+ tlb_entry->addend);
- ram_addr = qemu_ram_addr_from_host(p);
+ ram_addr = qemu_ram_addr_from_host_nofail(p);
if (!cpu_physical_memory_is_dirty(ram_addr)) {
tlb_entry->addr_write |= TLB_NOTDIRTY;
}
@@ -2369,16 +2360,17 @@ static void *subpage_init (target_phys_addr_t base, ram_addr_t *phys,
start_addr and region_offset are rounded down to a page boundary
before calculating this offset. This should not be a problem unless
the low bits of start_addr and region_offset differ. */
-void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
+void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
ram_addr_t size,
ram_addr_t phys_offset,
- ram_addr_t region_offset)
+ ram_addr_t region_offset,
+ bool log_dirty)
{
target_phys_addr_t addr, end_addr;
PhysPageDesc *p;
CPUState *env;
ram_addr_t orig_size = size;
- void *subpage;
+ subpage_t *subpage;
if (kvm_enabled())
kvm_set_phys_mem(start_addr, size, phys_offset);
@@ -2389,7 +2381,9 @@ void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
region_offset &= TARGET_PAGE_MASK;
size = (size + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK;
end_addr = start_addr + (target_phys_addr_t)size;
- for(addr = start_addr; addr != end_addr; addr += TARGET_PAGE_SIZE) {
+
+ addr = start_addr;
+ do {
p = phys_page_find(addr >> TARGET_PAGE_BITS);
if (p && p->phys_offset != IO_MEM_UNASSIGNED) {
ram_addr_t orig_memory = p->phys_offset;
@@ -2398,7 +2392,7 @@ void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr, end_addr2,
need_subpage);
- if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
+ if (need_subpage) {
if (!(orig_memory & IO_MEM_SUBPAGE)) {
subpage = subpage_init((addr & TARGET_PAGE_MASK),
&p->phys_offset, orig_memory,
@@ -2430,7 +2424,7 @@ void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
CHECK_SUBPAGE(addr, start_addr, start_addr2, end_addr,
end_addr2, need_subpage);
- if (need_subpage || phys_offset & IO_MEM_SUBWIDTH) {
+ if (need_subpage) {
subpage = subpage_init((addr & TARGET_PAGE_MASK),
&p->phys_offset, IO_MEM_UNASSIGNED,
addr & TARGET_PAGE_MASK);
@@ -2441,7 +2435,8 @@ void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
}
}
region_offset += TARGET_PAGE_SIZE;
- }
+ addr += TARGET_PAGE_SIZE;
+ } while (addr != end_addr);
/* since each CPU stores ram addresses in its TLB cache, we must
reset the modified entries */
@@ -2474,46 +2469,216 @@ void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size)
kvm_uncoalesce_mmio_region(addr, size);
}
-ram_addr_t qemu_ram_alloc(ram_addr_t size)
+static ram_addr_t find_ram_offset(ram_addr_t size)
{
- RAMBlock *new_block;
+ RAMBlock *block, *next_block;
+ ram_addr_t offset = 0, mingap = ULONG_MAX;
+
+ if (QLIST_EMPTY(&ram_list.blocks))
+ return 0;
+
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ ram_addr_t end, next = ULONG_MAX;
+
+ end = block->offset + block->length;
+
+ QLIST_FOREACH(next_block, &ram_list.blocks, next) {
+ if (next_block->offset >= end) {
+ next = MIN(next, next_block->offset);
+ }
+ }
+ if (next - end >= size && next - end < mingap) {
+ offset = end;
+ mingap = next - end;
+ }
+ }
+ return offset;
+}
+
+static ram_addr_t last_ram_offset(void)
+{
+ RAMBlock *block;
+ ram_addr_t last = 0;
+
+ QLIST_FOREACH(block, &ram_list.blocks, next)
+ last = MAX(last, block->offset + block->length);
+
+ return last;
+}
+
+ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
+ ram_addr_t size, void *host)
+{
+ RAMBlock *new_block, *block;
size = TARGET_PAGE_ALIGN(size);
- new_block = qemu_malloc(sizeof(*new_block));
+ new_block = qemu_mallocz(sizeof(*new_block));
+#if 0
+ if (dev && dev->parent_bus && dev->parent_bus->info->get_dev_path) {
+ char *id = dev->parent_bus->info->get_dev_path(dev);
+ if (id) {
+ snprintf(new_block->idstr, sizeof(new_block->idstr), "%s/", id);
+ qemu_free(id);
+ }
+ }
+#endif
+ pstrcat(new_block->idstr, sizeof(new_block->idstr), name);
+
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ if (!strcmp(block->idstr, new_block->idstr)) {
+ fprintf(stderr, "RAMBlock \"%s\" already registered, abort!\n",
+ new_block->idstr);
+ abort();
+ }
+ }
+
+ if (host) {
+ new_block->host = host;
+ new_block->flags |= RAM_PREALLOC_MASK;
+ } else {
+ if (mem_path) {
+#if 0 && defined (__linux__) && !defined(TARGET_S390X)
+ new_block->host = file_ram_alloc(new_block, size, mem_path);
+ if (!new_block->host) {
+ new_block->host = qemu_vmalloc(size);
+ qemu_madvise(new_block->host, size, QEMU_MADV_MERGEABLE);
+ }
+#else
+ fprintf(stderr, "-mem-path option unsupported\n");
+ exit(1);
+#endif
+ } else {
#if defined(TARGET_S390X) && defined(CONFIG_KVM)
- /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
- new_block->host = mmap((void*)0x1000000, size, PROT_EXEC|PROT_READ|PROT_WRITE,
- MAP_SHARED | MAP_ANONYMOUS, -1, 0);
+ /* XXX S390 KVM requires the topmost vma of the RAM to be < 256GB */
+ new_block->host = mmap((void*)0x1000000, size,
+ PROT_EXEC|PROT_READ|PROT_WRITE,
+ MAP_SHARED | MAP_ANONYMOUS, -1, 0);
#else
- new_block->host = qemu_vmalloc(size);
+ new_block->host = qemu_vmalloc(size);
#endif
#ifdef MADV_MERGEABLE
- madvise(new_block->host, size, MADV_MERGEABLE);
+ madvise(new_block->host, size, MADV_MERGEABLE);
#endif
- new_block->offset = last_ram_offset;
+ }
+ }
+
+ new_block->offset = find_ram_offset(size);
new_block->length = size;
- new_block->next = ram_blocks;
- ram_blocks = new_block;
+ QLIST_INSERT_HEAD(&ram_list.blocks, new_block, next);
- phys_ram_dirty = qemu_realloc(phys_ram_dirty,
- (last_ram_offset + size) >> TARGET_PAGE_BITS);
- memset(phys_ram_dirty + (last_ram_offset >> TARGET_PAGE_BITS),
+ ram_list.phys_dirty = qemu_realloc(ram_list.phys_dirty,
+ last_ram_offset() >> TARGET_PAGE_BITS);
+ memset(ram_list.phys_dirty + (new_block->offset >> TARGET_PAGE_BITS),
0xff, size >> TARGET_PAGE_BITS);
- last_ram_offset += size;
-
if (kvm_enabled())
kvm_setup_guest_memory(new_block->host, size);
return new_block->offset;
}
+ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size)
+{
+ return qemu_ram_alloc_from_ptr(dev, name, size, NULL);
+}
+
void qemu_ram_free(ram_addr_t addr)
{
- /* TODO: implement this. */
+ RAMBlock *block;
+
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ if (addr == block->offset) {
+ QLIST_REMOVE(block, next);
+ if (block->flags & RAM_PREALLOC_MASK) {
+ ;
+ } else if (mem_path) {
+#if defined (__linux__) && !defined(TARGET_S390X)
+ if (block->fd) {
+ munmap(block->host, block->length);
+ close(block->fd);
+ } else {
+ qemu_vfree(block->host);
+ }
+#else
+ abort();
+#endif
+ } else {
+#if defined(TARGET_S390X) && defined(CONFIG_KVM)
+ munmap(block->host, block->length);
+#else
+ qemu_vfree(block->host);
+#endif
+ }
+ qemu_free(block);
+ return;
+ }
+ }
+
+}
+
+#ifndef _WIN32
+void qemu_ram_remap(ram_addr_t addr, ram_addr_t length)
+{
+#ifndef CONFIG_ANDROID
+ RAMBlock *block;
+ ram_addr_t offset;
+ int flags;
+ void *area, *vaddr;
+
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ offset = addr - block->offset;
+ if (offset < block->length) {
+ vaddr = block->host + offset;
+ if (block->flags & RAM_PREALLOC_MASK) {
+ ;
+ } else {
+ flags = MAP_FIXED;
+ munmap(vaddr, length);
+ if (mem_path) {
+#if defined(__linux__) && !defined(TARGET_S390X)
+ if (block->fd) {
+#ifdef MAP_POPULATE
+ flags |= mem_prealloc ? MAP_POPULATE | MAP_SHARED :
+ MAP_PRIVATE;
+#else
+ flags |= MAP_PRIVATE;
+#endif
+ area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
+ flags, block->fd, offset);
+ } else {
+ flags |= MAP_PRIVATE | MAP_ANONYMOUS;
+ area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
+ flags, -1, 0);
+ }
+#else
+ abort();
+#endif
+ } else {
+#if defined(TARGET_S390X) && defined(CONFIG_KVM)
+ flags |= MAP_SHARED | MAP_ANONYMOUS;
+ area = mmap(vaddr, length, PROT_EXEC|PROT_READ|PROT_WRITE,
+ flags, -1, 0);
+#else
+ flags |= MAP_PRIVATE | MAP_ANONYMOUS;
+ area = mmap(vaddr, length, PROT_READ | PROT_WRITE,
+ flags, -1, 0);
+#endif
+ }
+ if (area != vaddr) {
+ fprintf(stderr, "Could not remap addr: %lx@%lx\n",
+ length, addr);
+ exit(1);
+ }
+ qemu_madvise(vaddr, length, QEMU_MADV_MERGEABLE);
+ }
+ return;
+ }
+ }
+#endif /* !CONFIG_ANDROID */
}
+#endif /* !_WIN32 */
/* Return a host pointer to ram allocated with qemu_ram_alloc.
With the exception of the softmmu code in this file, this should
@@ -2525,57 +2690,69 @@ void qemu_ram_free(ram_addr_t addr)
*/
void *qemu_get_ram_ptr(ram_addr_t addr)
{
- RAMBlock *prev;
- RAMBlock **prevp;
RAMBlock *block;
- prev = NULL;
- prevp = &ram_blocks;
- block = ram_blocks;
- while (block && (block->offset > addr
- || block->offset + block->length <= addr)) {
- if (prev)
- prevp = &prev->next;
- prev = block;
- block = block->next;
- }
- if (!block) {
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ if (addr - block->offset < block->length) {
+ /* Move this entry to to start of the list. */
+ if (block != QLIST_FIRST(&ram_list.blocks)) {
+ QLIST_REMOVE(block, next);
+ QLIST_INSERT_HEAD(&ram_list.blocks, block, next);
+ }
+ return block->host + (addr - block->offset);
+ }
+ }
+
fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
abort();
+
+ return NULL;
}
- /* Move this entry to to start of the list. */
- if (prev) {
- prev->next = block->next;
- block->next = *prevp;
- *prevp = block;
- }
+
+/* Return a host pointer to ram allocated with qemu_ram_alloc.
+ * Same as qemu_get_ram_ptr but avoid reordering ramblocks.
+ */
+void *qemu_safe_ram_ptr(ram_addr_t addr)
+{
+ RAMBlock *block;
+
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ if (addr - block->offset < block->length) {
return block->host + (addr - block->offset);
}
+ }
-/* Some of the softmmu routines need to translate from a host pointer
- (typically a TLB entry) back to a ram offset. */
-ram_addr_t qemu_ram_addr_from_host(void *ptr)
+ fprintf(stderr, "Bad ram offset %" PRIx64 "\n", (uint64_t)addr);
+ abort();
+
+ return NULL;
+}
+
+int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr)
{
- RAMBlock *prev;
- RAMBlock **prevp;
RAMBlock *block;
uint8_t *host = ptr;
- prev = NULL;
- prevp = &ram_blocks;
- block = ram_blocks;
- while (block && (block->host > host
- || block->host + block->length <= host)) {
- if (prev)
- prevp = &prev->next;
- prev = block;
- block = block->next;
- }
- if (!block) {
+ QLIST_FOREACH(block, &ram_list.blocks, next) {
+ if (host - block->host < block->length) {
+ *ram_addr = block->offset + (host - block->host);
+ return 0;
+ }
+ }
+ return -1;
+}
+
+/* Some of the softmmu routines need to translate from a host pointer
+ (typically a TLB entry) back to a ram offset. */
+ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
+{
+ ram_addr_t ram_addr;
+
+ if (qemu_ram_addr_from_host(ptr, &ram_addr)) {
fprintf(stderr, "Bad ram pointer %p\n", ptr);
abort();
}
- return block->offset + (host - block->host);
+ return ram_addr;
}
static uint32_t unassigned_mem_readb(void *opaque, target_phys_addr_t addr)
@@ -2657,16 +2834,16 @@ static void notdirty_mem_writeb(void *opaque, target_phys_addr_t ram_addr,
uint32_t val)
{
int dirty_flags;
- dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+ dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
tb_invalidate_phys_page_fast(ram_addr, 1);
- dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+ dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
#endif
}
stb_p(qemu_get_ram_ptr(ram_addr), val);
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
- phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
+ cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
@@ -2677,16 +2854,16 @@ static void notdirty_mem_writew(void *opaque, target_phys_addr_t ram_addr,
uint32_t val)
{
int dirty_flags;
- dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+ dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
tb_invalidate_phys_page_fast(ram_addr, 2);
- dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+ dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
#endif
}
stw_p(qemu_get_ram_ptr(ram_addr), val);
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
- phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
+ cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
@@ -2697,16 +2874,16 @@ static void notdirty_mem_writel(void *opaque, target_phys_addr_t ram_addr,
uint32_t val)
{
int dirty_flags;
- dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+ dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
if (!(dirty_flags & CODE_DIRTY_FLAG)) {
#if !defined(CONFIG_USER_ONLY)
tb_invalidate_phys_page_fast(ram_addr, 4);
- dirty_flags = phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS];
+ dirty_flags = cpu_physical_memory_get_dirty_flags(ram_addr);
#endif
}
stl_p(qemu_get_ram_ptr(ram_addr), val);
dirty_flags |= (0xff & ~CODE_DIRTY_FLAG);
- phys_ram_dirty[ram_addr >> TARGET_PAGE_BITS] = dirty_flags;
+ cpu_physical_memory_set_dirty_flags(ram_addr, dirty_flags);
/* we remove the notdirty callback only if the code has been
flushed */
if (dirty_flags == 0xff)
@@ -3158,8 +3335,8 @@ void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
/* invalidate code */
tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
/* set dirty bit */
- phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
- (0xff & ~CODE_DIRTY_FLAG);
+ cpu_physical_memory_set_dirty_flags(
+ addr1, (0xff & ~CODE_DIRTY_FLAG));
}
}
} else {
@@ -3355,7 +3532,7 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
{
if (buffer != bounce.buffer) {
if (is_write) {
- ram_addr_t addr1 = qemu_ram_addr_from_host(buffer);
+ ram_addr_t addr1 = qemu_ram_addr_from_host_nofail(buffer);
while (access_len) {
unsigned l;
l = TARGET_PAGE_SIZE;
@@ -3365,8 +3542,8 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
/* invalidate code */
tb_invalidate_phys_page_range(addr1, addr1 + l, 0);
/* set dirty bit */
- phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
- (0xff & ~CODE_DIRTY_FLAG);
+ cpu_physical_memory_set_dirty_flags(
+ addr1, (0xff & ~CODE_DIRTY_FLAG));
}
addr1 += l;
access_len -= l;
@@ -3377,7 +3554,7 @@ void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
if (is_write) {
cpu_physical_memory_write(bounce.addr, bounce.buffer, access_len);
}
- qemu_free(bounce.buffer);
+ qemu_vfree(bounce.buffer);
bounce.buffer = NULL;
cpu_notify_map_clients();
}
@@ -3500,8 +3677,8 @@ void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val)
/* invalidate code */
tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
/* set dirty bit */
- phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
- (0xff & ~CODE_DIRTY_FLAG);
+ cpu_physical_memory_set_dirty_flags(
+ addr1, (0xff & ~CODE_DIRTY_FLAG));
}
}
}
@@ -3569,8 +3746,8 @@ void stl_phys(target_phys_addr_t addr, uint32_t val)
/* invalidate code */
tb_invalidate_phys_page_range(addr1, addr1 + 4, 0);
/* set dirty bit */
- phys_ram_dirty[addr1 >> TARGET_PAGE_BITS] |=
- (0xff & ~CODE_DIRTY_FLAG);
+ cpu_physical_memory_set_dirty_flags(addr1,
+ (0xff & ~CODE_DIRTY_FLAG));
}
}
}
@@ -3690,8 +3867,7 @@ void cpu_io_recompile(CPUState *env, void *retaddr)
#if !defined(CONFIG_USER_ONLY)
-void dump_exec_info(FILE *f,
- int (*cpu_fprintf)(FILE *f, const char *fmt, ...))
+void dump_exec_info(FILE *f, fprintf_function cpu_fprintf)
{
int i, target_code_size, max_target_code_size;
int direct_jmp_count, direct_jmp2_count, cross_page;
@@ -3718,14 +3894,14 @@ void dump_exec_info(FILE *f,
}
/* XXX: avoid using doubles ? */
cpu_fprintf(f, "Translation buffer state:\n");
- cpu_fprintf(f, "gen code size %ld/%ld\n",
+ cpu_fprintf(f, "gen code size %td/%ld\n",
code_gen_ptr - code_gen_buffer, code_gen_buffer_max_size);
cpu_fprintf(f, "TB count %d/%d\n",
nb_tbs, code_gen_max_blocks);
cpu_fprintf(f, "TB avg target size %d max=%d bytes\n",
nb_tbs ? target_code_size / nb_tbs : 0,
max_target_code_size);
- cpu_fprintf(f, "TB avg host size %d bytes (expansion ratio: %0.1f)\n",
+ cpu_fprintf(f, "TB avg host size %td bytes (expansion ratio: %0.1f)\n",
nb_tbs ? (code_gen_ptr - code_gen_buffer) / nb_tbs : 0,
target_code_size ? (double) (code_gen_ptr - code_gen_buffer) / target_code_size : 0);
cpu_fprintf(f, "cross page TB count %d (%d%%)\n",