aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/ion/ion.c103
-rw-r--r--drivers/gpu/ion/ion_carveout_heap.c49
-rw-r--r--drivers/gpu/ion/ion_priv.h20
-rwxr-xr-xdrivers/gpu/ion/omap/omap_tiler_heap.c95
-rw-r--r--include/linux/ion.h24
5 files changed, 283 insertions, 8 deletions
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index b35ff4b..bafcc37 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -83,6 +83,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
}
buffer->dev = dev;
buffer->size = len;
+ buffer->cached = false;
mutex_init(&buffer->lock);
ion_buffer_add(dev, buffer);
return buffer;
@@ -899,6 +900,60 @@ err:
return ret;
}
+static int ion_flush_cached(struct ion_handle *handle, size_t size,
+ unsigned long vaddr)
+{
+ struct ion_buffer *buffer;
+ int ret;
+
+ if (!handle->buffer->heap->ops->flush_user) {
+ pr_err("%s: this heap does not define a method for flushing\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ buffer = handle->buffer;
+
+ mutex_lock(&buffer->lock);
+ /* now flush buffer mapped to userspace */
+ ret = buffer->heap->ops->flush_user(buffer, size, vaddr);
+ mutex_unlock(&buffer->lock);
+ if (ret) {
+ pr_err("%s: failure flushing buffer\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int ion_inval_cached(struct ion_handle *handle, size_t size,
+ unsigned long vaddr)
+{
+ struct ion_buffer *buffer;
+ int ret;
+
+ if (!handle->buffer->heap->ops->inval_user) {
+ pr_err("%s: this heap does not define a method for invalidating\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ buffer = handle->buffer;
+
+ mutex_lock(&buffer->lock);
+ /* now flush buffer mapped to userspace */
+ ret = buffer->heap->ops->inval_user(buffer, size, vaddr);
+ mutex_unlock(&buffer->lock);
+ if (ret) {
+ pr_err("%s: failure invalidating buffer\n",
+ __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
static const struct file_operations ion_share_fops = {
.owner = THIS_MODULE,
.release = ion_share_release,
@@ -975,6 +1030,9 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
mutex_unlock(&client->lock);
return -EINVAL;
}
+
+ if (cmd == ION_IOC_MAP)
+ data.handle->buffer->cached = data.cacheable;
data.fd = ion_ioctl_share(filp, client, data.handle);
mutex_unlock(&client->lock);
if (copy_to_user((void __user *)arg, &data, sizeof(data)))
@@ -1008,6 +1066,51 @@ static long ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
return -EFAULT;
return dev->custom_ioctl(client, data.cmd, data.arg);
}
+
+ case ION_IOC_FLUSH_CACHED:
+ {
+ struct ion_cached_user_buf_data data;
+ int ret;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
+ if (!ion_handle_validate(client, data.handle)) {
+ pr_err("%s: invalid handle passed to cache flush ioctl.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return -EINVAL;
+ }
+
+ ret = ion_flush_cached(data.handle, data.size, data.vaddr);
+ if (ret)
+ return ret;
+ if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+ return -EFAULT;
+ break;
+ }
+
+ case ION_IOC_INVAL_CACHED:
+ {
+ struct ion_cached_user_buf_data data;
+ int ret;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
+ if (!ion_handle_validate(client, data.handle)) {
+ pr_err("%s: invalid handle passed to cache inval ioctl.\n",
+ __func__);
+ mutex_unlock(&client->lock);
+ return -EINVAL;
+ }
+
+ ret = ion_inval_cached(data.handle, data.size, data.vaddr);
+ if (ret)
+ return ret;
+ if (copy_to_user((void __user *)arg, &data, sizeof(data)))
+ return -EFAULT;
+ break;
+ }
+
default:
return -ENOTTY;
}
diff --git a/drivers/gpu/ion/ion_carveout_heap.c b/drivers/gpu/ion/ion_carveout_heap.c
index 606adae..ce71368 100644
--- a/drivers/gpu/ion/ion_carveout_heap.c
+++ b/drivers/gpu/ion/ion_carveout_heap.c
@@ -26,6 +26,7 @@
#include "ion_priv.h"
#include <asm/mach/map.h>
+#include <asm/cacheflush.h>
struct ion_carveout_heap {
struct ion_heap heap;
@@ -117,14 +118,60 @@ int ion_carveout_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
return remap_pfn_range(vma, vma->vm_start,
__phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
buffer->size,
- pgprot_noncached(vma->vm_page_prot));
+ (buffer->cached ? (vma->vm_page_prot)
+ : pgprot_writecombine(vma->vm_page_prot)));
}
+static void per_cpu_cache_flush_arm(void *arg)
+{
+ flush_cache_all();
+}
+
+int ion_carveout_heap_cache_operation(struct ion_buffer *buffer, size_t len,
+ unsigned long vaddr, enum cache_operation cacheop)
+{
+ if (!buffer || !buffer->cached) {
+ pr_err("%s(): buffer not mapped as cacheable\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (len > FULL_CACHE_FLUSH_THRESHOLD) {
+ on_each_cpu(per_cpu_cache_flush_arm, NULL, 1);
+ outer_flush_all();
+ return 0;
+ }
+
+ flush_cache_user_range(vaddr, (vaddr+len));
+
+ if (cacheop == CACHE_FLUSH)
+ outer_flush_range(buffer->priv_phys, buffer->priv_phys+len);
+ else
+ outer_inv_range(buffer->priv_phys, buffer->priv_phys+len);
+
+ return 0;
+}
+
+int ion_carveout_heap_flush_user(struct ion_buffer *buffer, size_t len,
+ unsigned long vaddr)
+{
+ return ion_carveout_heap_cache_operation(buffer, len,
+ vaddr, CACHE_FLUSH);
+}
+
+int ion_carveout_heap_inval_user(struct ion_buffer *buffer, size_t len,
+ unsigned long vaddr)
+{
+ return ion_carveout_heap_cache_operation(buffer, len,
+ vaddr, CACHE_INVALIDATE);
+}
static struct ion_heap_ops carveout_heap_ops = {
.allocate = ion_carveout_heap_allocate,
.free = ion_carveout_heap_free,
.phys = ion_carveout_heap_phys,
.map_user = ion_carveout_heap_map_user,
+ .flush_user = ion_carveout_heap_flush_user,
+ .inval_user = ion_carveout_heap_inval_user,
.map_kernel = ion_carveout_heap_map_kernel,
.unmap_kernel = ion_carveout_heap_unmap_kernel,
};
diff --git a/drivers/gpu/ion/ion_priv.h b/drivers/gpu/ion/ion_priv.h
index cf5fe60..ec51972 100644
--- a/drivers/gpu/ion/ion_priv.h
+++ b/drivers/gpu/ion/ion_priv.h
@@ -143,6 +143,7 @@ struct ion_buffer {
void *vaddr;
int dmap_cnt;
struct scatterlist *sglist;
+ bool cached;
};
/**
@@ -156,6 +157,8 @@ struct ion_buffer {
* @map_kernel map memory to the kernel
* @unmap_kernel unmap memory to the kernel
* @map_user map memory to userspace
+ * @flush_user flush memory if mapped as cacheable
+ * @inval_user invalidate memory if mapped as cacheable
*/
struct ion_heap_ops {
int (*allocate) (struct ion_heap *heap,
@@ -171,6 +174,10 @@ struct ion_heap_ops {
void (*unmap_kernel) (struct ion_heap *heap, struct ion_buffer *buffer);
int (*map_user) (struct ion_heap *mapper, struct ion_buffer *buffer,
struct vm_area_struct *vma);
+ int (*flush_user) (struct ion_buffer *buffer, size_t len,
+ unsigned long vaddr);
+ int (*inval_user) (struct ion_buffer *buffer, size_t len,
+ unsigned long vaddr);
};
/**
@@ -253,4 +260,17 @@ void ion_carveout_free(struct ion_heap *heap, ion_phys_addr_t addr,
*/
#define ION_CARVEOUT_ALLOCATE_FAIL -1
+/**
+ * Flushing entire cache is more efficient than flushing virtual address
+ * range of a buffer whose size is 200Kbytes or higher, since line by
+ * line operations of huge buffers consume lot of cpu cycles
+ */
+#define FULL_CACHE_FLUSH_THRESHOLD 200000
+
+enum cache_operation {
+ CACHE_CLEAN = 0x0,
+ CACHE_INVALIDATE = 0x1,
+ CACHE_FLUSH = 0x2,
+};
+
#endif /* _ION_PRIV_H */
diff --git a/drivers/gpu/ion/omap/omap_tiler_heap.c b/drivers/gpu/ion/omap/omap_tiler_heap.c
index aaf5a50..8f46c10 100755
--- a/drivers/gpu/ion/omap/omap_tiler_heap.c
+++ b/drivers/gpu/ion/omap/omap_tiler_heap.c
@@ -234,6 +234,7 @@ int omap_tiler_alloc(struct ion_heap *heap,
info->n_tiler_pages = n_tiler_pages;
info->phys_addrs = (u32 *)(info + 1);
info->tiler_addrs = info->phys_addrs + n_phys_pages;
+ info->fmt = data->fmt;
if ((heap->id == OMAP_ION_HEAP_TILER) ||
(heap->id == OMAP_ION_HEAP_NONSECURE_TILER)) {
@@ -362,24 +363,104 @@ int omap_tiler_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
unsigned long addr = vma->vm_start;
u32 vma_pages = (vma->vm_end - vma->vm_start) / PAGE_SIZE;
int n_pages = min(vma_pages, info->n_tiler_pages);
- int i, ret;
+ int i, ret = 0;
- for (i = vma->vm_pgoff; i < n_pages; i++, addr += PAGE_SIZE) {
+ if (TILER_PIXEL_FMT_PAGE == info->fmt) {
+ /* Since 1D buffer is linear, map whole buffer in one shot */
ret = remap_pfn_range(vma, addr,
- __phys_to_pfn(info->tiler_addrs[i]),
- PAGE_SIZE,
- pgprot_noncached(vma->vm_page_prot));
- if (ret)
- return ret;
+ __phys_to_pfn(info->tiler_addrs[0]),
+ (vma->vm_end - vma->vm_start),
+ (buffer->cached ?
+ (vma->vm_page_prot)
+ : pgprot_writecombine(vma->vm_page_prot)));
+ } else {
+ for (i = vma->vm_pgoff; i < n_pages; i++, addr += PAGE_SIZE) {
+ ret = remap_pfn_range(vma, addr,
+ __phys_to_pfn(info->tiler_addrs[i]),
+ PAGE_SIZE,
+ pgprot_writecombine(vma->vm_page_prot));
+ if (ret)
+ return ret;
+ }
+ }
+ return ret;
+}
+
+static void per_cpu_cache_flush_arm(void *arg)
+{
+ flush_cache_all();
+}
+
+int omap_tiler_cache_operation(struct ion_buffer *buffer, size_t len,
+ unsigned long vaddr, enum cache_operation cacheop)
+{
+ struct omap_tiler_info *info;
+ int n_pages;
+
+ if (!buffer) {
+ pr_err("%s(): buffer is NULL\n", __func__);
+ return -EINVAL;
+ }
+ if (!buffer->cached) {
+ pr_err("%s(): buffer not mapped as cacheable\n", __func__);
+ return -EINVAL;
+ }
+
+ info = buffer->priv_virt;
+ if (!info) {
+ pr_err("%s(): tiler info of buffer is NULL\n", __func__);
+ return -EINVAL;
}
+
+ n_pages = info->n_tiler_pages;
+ if (len > (n_pages * PAGE_SIZE)) {
+ pr_err("%s(): size to flush is greater than allocated size\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (TILER_PIXEL_FMT_PAGE != info->fmt) {
+ pr_err("%s(): only TILER 1D buffers can be cached\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ if (len > FULL_CACHE_FLUSH_THRESHOLD) {
+ on_each_cpu(per_cpu_cache_flush_arm, NULL, 1);
+ outer_flush_all();
+ return 0;
+ }
+
+ flush_cache_user_range(vaddr, vaddr + len);
+
+ if (cacheop == CACHE_FLUSH)
+ outer_flush_range(info->tiler_addrs[0],
+ info->tiler_addrs[0] + len);
+ else
+ outer_inv_range(info->tiler_addrs[0],
+ info->tiler_addrs[0] + len);
return 0;
}
+int omap_tiler_heap_flush_user(struct ion_buffer *buffer, size_t len,
+ unsigned long vaddr)
+{
+ return omap_tiler_cache_operation(buffer, len, vaddr, CACHE_FLUSH);
+}
+
+int omap_tiler_heap_inval_user(struct ion_buffer *buffer, size_t len,
+ unsigned long vaddr)
+{
+ return omap_tiler_cache_operation(buffer, len, vaddr, CACHE_INVALIDATE);
+}
+
static struct ion_heap_ops omap_tiler_ops = {
.allocate = omap_tiler_heap_allocate,
.free = omap_tiler_heap_free,
.phys = omap_tiler_phys,
.map_user = omap_tiler_heap_map_user,
+ .flush_user = omap_tiler_heap_flush_user,
+ .inval_user = omap_tiler_heap_inval_user,
};
struct ion_heap *omap_tiler_heap_create(struct ion_platform_heap *data)
diff --git a/include/linux/ion.h b/include/linux/ion.h
index 55fb82b..5ef711d 100644
--- a/include/linux/ion.h
+++ b/include/linux/ion.h
@@ -274,6 +274,7 @@ struct ion_allocation_data {
* struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
* @handle: a handle
* @fd: a file descriptor representing that handle
+ * @cacheable: flag indicate whether buffer needs to be cached or not
*
* For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
* the handle returned from ion alloc, and the kernel returns the file
@@ -283,6 +284,7 @@ struct ion_allocation_data {
struct ion_fd_data {
struct ion_handle *handle;
int fd;
+ unsigned char cacheable;
};
/**
@@ -306,6 +308,23 @@ struct ion_custom_data {
unsigned long arg;
};
+/**
+ * struct ion_cached_user_buf_data - metadata passed from userspace for
+ * flushing or invalidating the ion handle which was mapped cacheable.
+ * @handle: a handle
+ * @vaddr: virtual address corresponding to the handle after mapping
+ * @size: size of the buffer which should be flushed or invalidated
+ *
+ * For ION_IOC_FLUSH_CACHED & ION_IOC_INVAL_CACHED, userspace populates
+ * the handle field with the ion handle and vaddr with the virtual address
+ * corresponding to the handle along with size to be flushed/invalidated.
+ */
+struct ion_cached_user_buf_data {
+ struct ion_handle *handle;
+ unsigned long vaddr;
+ size_t size;
+};
+
#define ION_IOC_MAGIC 'I'
/**
@@ -362,4 +381,9 @@ struct ion_custom_data {
*/
#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
+#define ION_IOC_FLUSH_CACHED _IOWR(ION_IOC_MAGIC, 7, \
+ struct ion_cached_user_buf_data)
+#define ION_IOC_INVAL_CACHED _IOWR(ION_IOC_MAGIC, 8, \
+ struct ion_cached_user_buf_data)
+
#endif /* _LINUX_ION_H */