diff options
author | Jiang Yunhong <yunhong.jiang@intel.com> | 2011-08-30 15:50:31 +0800 |
---|---|---|
committer | David 'Digit' Turner <digit@google.com> | 2011-09-13 12:08:51 +0200 |
commit | f1f1f5ff5b87d4f754572e0f05398842f0cde059 (patch) | |
tree | 197c2e76d3970c7c32ff20196aa07290b25455f6 /hw | |
parent | e07ec9adfc33c0cfb2056180960d7e62fc5ea7c7 (diff) | |
download | external_qemu-f1f1f5ff5b87d4f754572e0f05398842f0cde059.zip external_qemu-f1f1f5ff5b87d4f754572e0f05398842f0cde059.tar.gz external_qemu-f1f1f5ff5b87d4f754572e0f05398842f0cde059.tar.bz2 |
Accelerate nand device in virtualization environment
Currently NAND device driver need 8 MMIO access for goldfish nand device access.
MMIO access is expensive in virtualization environment because each MMIO access
will cause a context switch from in-kernel module to qemu user space process.
Through introducing a new batch command, these multiple MMIO access can be
combined into one, thus improve virtualization perofrmance. The booting time in
KVM environment is reduced from 25~26 seconds to 20~21 seconds.
A kernel patch is required to utilize this new batch command.
The patched kernel will detect the batch support through a new flags in
goldfish nand device, while current kernel is not impacted.
Change-Id: Ia460b1ba3c6fde5b1fc6101bb66f8f58e46e6a78
Signed-off-by: Xin Xiaohui <xiaohui.xin@intel.com>
Signed-off-by: Zhang Xiantao <xiantao.zhang@intel.com>
Signed-off-by: Jiang Yunhong <yunhong.jiang@intel.com>
Signed-off-by: Nakajima Jun <jun.nakajima@intel.com>
Diffstat (limited to 'hw')
-rw-r--r-- | hw/goldfish_nand.c | 33 | ||||
-rw-r--r-- | hw/goldfish_nand_reg.h | 19 |
2 files changed, 50 insertions, 2 deletions
diff --git a/hw/goldfish_nand.c b/hw/goldfish_nand.c index eb0c355..49d575a 100644 --- a/hw/goldfish_nand.c +++ b/hw/goldfish_nand.c @@ -130,6 +130,8 @@ typedef struct { uint32_t addr_high; uint32_t transfer_size; uint32_t data; + uint32_t batch_addr_low; + uint32_t batch_addr_high; uint32_t result; } nand_dev_controller_state; @@ -457,6 +459,18 @@ uint32_t nand_dev_do_cmd(nand_dev_controller_state *s, uint32_t cmd) uint64_t addr; nand_dev *dev; + if (cmd == NAND_CMD_WRITE_BATCH || cmd == NAND_CMD_READ_BATCH || + cmd == NAND_CMD_ERASE_BATCH) { + struct batch_data bd; + uint64_t bd_addr = ((uint64_t)s->batch_addr_high << 32) | s->batch_addr_low; + + cpu_physical_memory_read(bd_addr, (void*)&bd, sizeof(struct batch_data)); + s->dev = bd.dev; + s->addr_low = bd.addr_low; + s->addr_high = bd.addr_high; + s->transfer_size = bd.transfer_size; + s->data = bd.data; + } addr = s->addr_low | ((uint64_t)s->addr_high << 32); size = s->transfer_size; if(s->dev >= nand_dev_count) @@ -473,6 +487,7 @@ uint32_t nand_dev_do_cmd(nand_dev_controller_state *s, uint32_t cmd) #endif cpu_memory_rw_debug(cpu_single_env, s->data, (uint8_t*)dev->devname, size, 1); return size; + case NAND_CMD_READ_BATCH: case NAND_CMD_READ: if(addr >= dev->max_size) return 0; @@ -486,6 +501,7 @@ uint32_t nand_dev_do_cmd(nand_dev_controller_state *s, uint32_t cmd) #endif cpu_memory_rw_debug(cpu_single_env,s->data, &dev->data[addr], size, 1); return size; + case NAND_CMD_WRITE_BATCH: case NAND_CMD_WRITE: if(dev->flags & NAND_DEV_FLAG_READ_ONLY) return 0; @@ -501,6 +517,7 @@ uint32_t nand_dev_do_cmd(nand_dev_controller_state *s, uint32_t cmd) #endif cpu_memory_rw_debug(cpu_single_env,s->data, &dev->data[addr], size, 0); return size; + case NAND_CMD_ERASE_BATCH: case NAND_CMD_ERASE: if(dev->flags & NAND_DEV_FLAG_READ_ONLY) return 0; @@ -542,6 +559,12 @@ static void nand_dev_write(void *opaque, target_phys_addr_t offset, uint32_t val case NAND_ADDR_LOW: s->addr_low = value; break; + case NAND_BATCH_ADDR_LOW: + s->batch_addr_low = value; + break; + case NAND_BATCH_ADDR_HIGH: + s->batch_addr_high = value; + break; case NAND_TRANSFER_SIZE: s->transfer_size = value; break; @@ -550,6 +573,13 @@ static void nand_dev_write(void *opaque, target_phys_addr_t offset, uint32_t val break; case NAND_COMMAND: s->result = nand_dev_do_cmd(s, value); + if (value == NAND_CMD_WRITE_BATCH || value == NAND_CMD_READ_BATCH || + value == NAND_CMD_ERASE_BATCH) { + struct batch_data bd; + uint64_t bd_addr = ((uint64_t)s->batch_addr_high << 32) | s->batch_addr_low; + bd.result = s->result; + cpu_physical_memory_write(bd_addr, (void*)&bd, sizeof(struct batch_data)); + } break; default: cpu_abort(cpu_single_env, "nand_dev_write: Bad offset %x\n", offset); @@ -810,6 +840,9 @@ void nand_add_dev(const char *arg) if(dev->data == NULL) goto out_of_memory; dev->flags = read_only ? NAND_DEV_FLAG_READ_ONLY : 0; +#ifdef TARGET_I386 + dev->flags |= NAND_DEV_FLAG_BATCH_CAP; +#endif if (initfd >= 0) { do { diff --git a/hw/goldfish_nand_reg.h b/hw/goldfish_nand_reg.h index ea91461..34d7c44 100644 --- a/hw/goldfish_nand_reg.h +++ b/hw/goldfish_nand_reg.h @@ -18,11 +18,24 @@ enum nand_cmd { NAND_CMD_WRITE, NAND_CMD_ERASE, NAND_CMD_BLOCK_BAD_GET, // NAND_RESULT is 1 if block is bad, 0 if it is not - NAND_CMD_BLOCK_BAD_SET + NAND_CMD_BLOCK_BAD_SET, + NAND_CMD_READ_BATCH, // BATCH OP extensions. + NAND_CMD_WRITE_BATCH, + NAND_CMD_ERASE_BATCH +}; + +struct batch_data{ + uint32_t dev; + uint32_t addr_low; + uint32_t addr_high; + uint32_t transfer_size; + uint32_t data; + uint32_t result; }; enum nand_dev_flags { - NAND_DEV_FLAG_READ_ONLY = 0x00000001 + NAND_DEV_FLAG_READ_ONLY = 0x00000001, + NAND_DEV_FLAG_BATCH_CAP = 0x00000002 }; #define NAND_VERSION_CURRENT (1) @@ -49,6 +62,8 @@ enum nand_reg { NAND_TRANSFER_SIZE = 0x04c, NAND_ADDR_LOW = 0x050, NAND_ADDR_HIGH = 0x054, + NAND_BATCH_ADDR_LOW = 0x058, + NAND_BATCH_ADDR_HIGH= 0x05c, }; #endif |