diff options
author | David Schleef <ds@ti.com> | 2012-01-10 21:43:59 -0800 |
---|---|---|
committer | Ziyann <jaraidaniel@gmail.com> | 2014-10-01 12:58:31 +0200 |
commit | 7b1e8017bbb06d0d04eb78e62596e364419b15db (patch) | |
tree | d14cfb666ed8513d6ea4f861452ca6f6deea2843 | |
parent | efeba290447c54a1968c24d9094f729de1fc39f6 (diff) | |
download | kernel_samsung_tuna-7b1e8017bbb06d0d04eb78e62596e364419b15db.zip kernel_samsung_tuna-7b1e8017bbb06d0d04eb78e62596e364419b15db.tar.gz kernel_samsung_tuna-7b1e8017bbb06d0d04eb78e62596e364419b15db.tar.bz2 |
ion: add support for tiler reservation heap
The tiler reservation heap is for allocating TILER address space, but
not the underlying pages. The idea is to allocate TILER address space
and one or more ION buffers from the system heap, and map system heap
buffers into the allocated TILER address space.
Change-Id: Ibd53ab811259cabec87384cf2ab4f99c0cbdca23
Signed-off-by: David Schleef <ds@ti.com>
Conflicts:
drivers/gpu/ion/omap/omap_tiler_heap.c
-rw-r--r-- | drivers/gpu/ion/omap/omap_ion.c | 3 | ||||
-rwxr-xr-x[-rw-r--r--] | drivers/gpu/ion/omap/omap_tiler_heap.c | 174 | ||||
-rw-r--r-- | include/linux/omap_ion.h | 2 |
3 files changed, 118 insertions, 61 deletions
diff --git a/drivers/gpu/ion/omap/omap_ion.c b/drivers/gpu/ion/omap/omap_ion.c index a2e97e2..a2caea7 100644 --- a/drivers/gpu/ion/omap/omap_ion.c +++ b/drivers/gpu/ion/omap/omap_ion.c @@ -105,6 +105,9 @@ int omap_ion_probe(struct platform_device *pdev) nonsecure_tiler_heap = heaps[i]; else tiler_heap = heaps[i]; + } else if (heap_data->type == + OMAP_ION_HEAP_TYPE_TILER_RESERVATION) { + heaps[i] = omap_tiler_heap_create(heap_data); } else { heaps[i] = ion_heap_create(heap_data); } diff --git a/drivers/gpu/ion/omap/omap_tiler_heap.c b/drivers/gpu/ion/omap/omap_tiler_heap.c index c3a28dd..0cf033e 100644..100755 --- a/drivers/gpu/ion/omap/omap_tiler_heap.c +++ b/drivers/gpu/ion/omap/omap_tiler_heap.c @@ -32,6 +32,25 @@ #define TILER_ENABLE_NON_PAGE_ALIGNED_ALLOCATIONS 1 +struct omap_ion_heap { + struct ion_heap heap; + struct gen_pool *pool; + ion_phys_addr_t base; +}; + +struct omap_tiler_info { + tiler_blk_handle tiler_handle; /* handle of the allocation intiler */ + bool lump; /* true for a single lump allocation */ + u32 n_phys_pages; /* number of physical pages */ + u32 *phys_addrs; /* array addrs of pages */ + u32 n_tiler_pages; /* number of tiler pages */ + u32 *tiler_addrs; /* array of addrs of tiler pages */ + int fmt; /* tiler buffer format */ + u32 tiler_start; /* start addr in tiler -- if not page + aligned this may not equal the + first entry onf tiler_addrs */ +}; + static int omap_tiler_heap_allocate(struct ion_heap *heap, struct ion_buffer *buffer, unsigned long size, unsigned long align, @@ -46,17 +65,57 @@ static int omap_tiler_heap_allocate(struct ion_heap *heap, return -EINVAL; } -struct omap_tiler_info { - tiler_blk_handle tiler_handle; /* handle of the allocation intiler */ - bool lump; /* true for a single lump allocation */ - u32 n_phys_pages; /* number of physical pages */ - u32 *phys_addrs; /* array addrs of pages */ - u32 n_tiler_pages; /* number of tiler pages */ - u32 *tiler_addrs; /* array of addrs of tiler pages */ - u32 tiler_start; /* start addr in tiler -- if not page - aligned this may not equal the - first entry onf tiler_addrs */ -}; +static int omap_tiler_alloc_carveout(struct ion_heap *heap, + struct omap_tiler_info *info) +{ + struct omap_ion_heap *omap_heap = (struct omap_ion_heap *)heap; + int i; + int ret; + ion_phys_addr_t addr; + + addr = gen_pool_alloc(omap_heap->pool, info->n_phys_pages * PAGE_SIZE); + if (addr) { + info->lump = true; + for (i = 0; i < info->n_phys_pages; i++) + info->phys_addrs[i] = addr + i * PAGE_SIZE; + return 0; + } + + for (i = 0; i < info->n_phys_pages; i++) { + addr = gen_pool_alloc(omap_heap->pool, PAGE_SIZE); + + if (addr == 0) { + ret = -ENOMEM; + pr_err("%s: failed to allocate pages to back " + "tiler address space\n", __func__); + goto err; + } + info->phys_addrs[i] = addr; + } + return 0; + +err: + for (i -= 1; i >= 0; i--) + gen_pool_free(omap_heap->pool, info->phys_addrs[i], PAGE_SIZE); + return ret; +} + +static void omap_tiler_free_carveout(struct ion_heap *heap, + struct omap_tiler_info *info) +{ + struct omap_ion_heap *omap_heap = (struct omap_ion_heap *)heap; + int i; + + if (info->lump) { + gen_pool_free(omap_heap->pool, + info->phys_addrs[0], + info->n_phys_pages * PAGE_SIZE); + return; + } + + for (i = 0; i < info->n_phys_pages; i++) + gen_pool_free(omap_heap->pool, info->phys_addrs[i], PAGE_SIZE); +} int omap_tiler_alloc(struct ion_heap *heap, struct ion_client *client, @@ -70,8 +129,7 @@ int omap_tiler_alloc(struct ion_heap *heap, u32 tiler_start = 0; u32 v_size; tiler_blk_handle tiler_handle; - ion_phys_addr_t addr = 0; - int i = 0, ret; + int ret; if (data->fmt == TILER_PIXEL_FMT_PAGE && data->h != 1) { pr_err("%s: Page mode (1D) allocations must have a height " @@ -132,32 +190,20 @@ int omap_tiler_alloc(struct ion_heap *heap, info->phys_addrs = (u32 *)(info + 1); info->tiler_addrs = info->phys_addrs + n_phys_pages; - addr = ion_carveout_allocate(heap, n_phys_pages*PAGE_SIZE, 0); - if (addr == ION_CARVEOUT_ALLOCATE_FAIL) { - for (i = 0; i < n_phys_pages; i++) { - addr = ion_carveout_allocate(heap, PAGE_SIZE, 0); - - if (addr == ION_CARVEOUT_ALLOCATE_FAIL) { - ret = -ENOMEM; - pr_err("%s: failed to allocate pages to back " - "tiler address space\n", __func__); - goto err_alloc; - } - info->phys_addrs[i] = addr; + if ((heap->id == OMAP_ION_HEAP_TILER) || + (heap->id == OMAP_ION_HEAP_NONSECURE_TILER)) { + ret = omap_tiler_alloc_carveout(heap, info); + if (ret) + goto err_alloc; + + ret = tiler_pin_block(info->tiler_handle, info->phys_addrs, + info->n_phys_pages); + if (ret) { + pr_err("%s: failure to pin pages to tiler\n", + __func__); + goto err_pin; } - } else { - info->lump = true; - for (i = 0; i < n_phys_pages; i++) - info->phys_addrs[i] = addr + i*PAGE_SIZE; - } - - ret = tiler_pin_block(info->tiler_handle, info->phys_addrs, - info->n_phys_pages); - if (ret) { - pr_err("%s: failure to pin pages to tiler\n", __func__); - goto err_alloc; } - data->stride = tiler_block_vstride(info->tiler_handle); /* create an ion handle for the allocation */ @@ -186,16 +232,13 @@ int omap_tiler_alloc(struct ion_heap *heap, err: tiler_unpin_block(info->tiler_handle); -err_alloc: - tiler_free_block_area(info->tiler_handle); - if(info) - { - if (info->lump) - ion_carveout_free(heap, addr, n_phys_pages * PAGE_SIZE); - else - for (i -= 1; i >= 0; i--) - ion_carveout_free(heap, info->phys_addrs[i], PAGE_SIZE); +err_pin: + if ((heap->id == OMAP_ION_HEAP_TILER) || + (heap->id == OMAP_ION_HEAP_NONSECURE_TILER)) { + omap_tiler_free_carveout(heap, info); } +err_alloc: + tiler_free_block_area(tiler_handle); err_nomem: kfree(info); return ret; @@ -208,14 +251,9 @@ void omap_tiler_heap_free(struct ion_buffer *buffer) tiler_unpin_block(info->tiler_handle); tiler_free_block_area(info->tiler_handle); - if (info->lump) { - ion_carveout_free(buffer->heap, info->phys_addrs[0], - info->n_phys_pages*PAGE_SIZE); - } else { - int i; - for (i = 0; i < info->n_phys_pages; i++) - ion_carveout_free(buffer->heap, - info->phys_addrs[i], PAGE_SIZE); + if ((buffer->heap->id == OMAP_ION_HEAP_TILER) || + (buffer->heap->id == OMAP_ION_HEAP_NONSECURE_TILER)) { + omap_tiler_free_carveout(buffer->heap, info); } kfree(info); @@ -290,19 +328,33 @@ static struct ion_heap_ops omap_tiler_ops = { struct ion_heap *omap_tiler_heap_create(struct ion_platform_heap *data) { - struct ion_heap *heap; + struct omap_ion_heap *heap; - heap = ion_carveout_heap_create(data); + heap = kzalloc(sizeof(struct omap_ion_heap), GFP_KERNEL); if (!heap) return ERR_PTR(-ENOMEM); - heap->ops = &omap_tiler_ops; - heap->type = OMAP_ION_HEAP_TYPE_TILER; - heap->name = data->name; - heap->id = data->id; - return heap; + + if ((data->id == OMAP_ION_HEAP_TILER) || + (data->id == OMAP_ION_HEAP_NONSECURE_TILER)) { + heap->pool = gen_pool_create(12, -1); + if (!heap->pool) { + kfree(heap); + return ERR_PTR(-ENOMEM); + } + heap->base = data->base; + gen_pool_add(heap->pool, heap->base, data->size, -1); + } + heap->heap.ops = &omap_tiler_ops; + heap->heap.type = OMAP_ION_HEAP_TYPE_TILER; + heap->heap.name = data->name; + heap->heap.id = data->id; + return &heap->heap; } void omap_tiler_heap_destroy(struct ion_heap *heap) { + struct omap_ion_heap *omap_ion_heap = (struct omap_ion_heap *)heap; + if (omap_ion_heap->pool) + gen_pool_destroy(omap_ion_heap->pool); kfree(heap); } diff --git a/include/linux/omap_ion.h b/include/linux/omap_ion.h index 13a0d90..2209110 100644 --- a/include/linux/omap_ion.h +++ b/include/linux/omap_ion.h @@ -62,6 +62,7 @@ int omap_tiler_vinfo(struct ion_client *client, /* additional heaps used only on omap */ enum { OMAP_ION_HEAP_TYPE_TILER = ION_HEAP_TYPE_CUSTOM + 1, + OMAP_ION_HEAP_TYPE_TILER_RESERVATION, }; #define OMAP_ION_HEAP_TILER_MASK (1 << OMAP_ION_HEAP_TYPE_TILER) @@ -90,6 +91,7 @@ enum { OMAP_ION_HEAP_TILER, OMAP_ION_HEAP_SECURE_INPUT, OMAP_ION_HEAP_NONSECURE_TILER, + OMAP_ION_HEAP_TILER_RESERVATION, }; #endif /* _LINUX_ION_H */ |