aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/ion
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/ion')
-rw-r--r--drivers/gpu/ion/Kconfig6
-rw-r--r--drivers/gpu/ion/Makefile1
-rw-r--r--drivers/gpu/ion/ion.c13
-rw-r--r--drivers/gpu/ion/omap/Makefile1
-rw-r--r--drivers/gpu/ion/omap/omap_ion.c167
-rw-r--r--drivers/gpu/ion/omap/omap_ion_priv.h28
-rw-r--r--drivers/gpu/ion/omap/omap_tiler_heap.c260
7 files changed, 475 insertions, 1 deletions
diff --git a/drivers/gpu/ion/Kconfig b/drivers/gpu/ion/Kconfig
index 5b48b4e..a68878a 100644
--- a/drivers/gpu/ion/Kconfig
+++ b/drivers/gpu/ion/Kconfig
@@ -10,3 +10,9 @@ config ION_TEGRA
help
Choose this option if you wish to use ion on an nVidia Tegra.
+config ION_OMAP
+ tristate "Ion for OMAP"
+ depends on ARCH_OMAP4 && ION && TI_TILER
+ help
+ Choose this option if you wish to use ion on OMAP4.
+
diff --git a/drivers/gpu/ion/Makefile b/drivers/gpu/ion/Makefile
index 73fe3fa..a81e0f3 100644
--- a/drivers/gpu/ion/Makefile
+++ b/drivers/gpu/ion/Makefile
@@ -1,2 +1,3 @@
obj-$(CONFIG_ION) += ion.o ion_heap.o ion_system_heap.o ion_carveout_heap.o
obj-$(CONFIG_ION_TEGRA) += tegra/
+obj-$(CONFIG_ION_OMAP) += omap/
diff --git a/drivers/gpu/ion/ion.c b/drivers/gpu/ion/ion.c
index 37b23af..688e7ed 100644
--- a/drivers/gpu/ion/ion.c
+++ b/drivers/gpu/ion/ion.c
@@ -332,6 +332,7 @@ end:
ion_buffer_put(buffer);
return handle;
}
+EXPORT_SYMBOL(ion_alloc);
void ion_free(struct ion_client *client, struct ion_handle *handle)
{
@@ -349,6 +350,7 @@ void ion_free(struct ion_client *client, struct ion_handle *handle)
}
ion_handle_put(handle);
}
+EXPORT_SYMBOL(ion_free);
static void ion_client_get(struct ion_client *client);
static int ion_client_put(struct ion_client *client);
@@ -406,6 +408,7 @@ int ion_phys(struct ion_client *client, struct ion_handle *handle,
ret = buffer->heap->ops->phys(buffer->heap, buffer, addr, len);
return ret;
}
+EXPORT_SYMBOL(ion_phys);
void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
{
@@ -443,6 +446,7 @@ void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle)
mutex_unlock(&client->lock);
return vaddr;
}
+EXPORT_SYMBOL(ion_map_kernel);
struct scatterlist *ion_map_dma(struct ion_client *client,
struct ion_handle *handle)
@@ -479,6 +483,7 @@ struct scatterlist *ion_map_dma(struct ion_client *client,
mutex_unlock(&client->lock);
return sglist;
}
+EXPORT_SYMBOL(ion_map_dma);
void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
{
@@ -494,6 +499,7 @@ void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle)
mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock);
}
+EXPORT_SYMBOL(ion_unmap_kernel);
void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
{
@@ -509,7 +515,7 @@ void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle)
mutex_unlock(&buffer->lock);
mutex_unlock(&client->lock);
}
-
+EXPORT_SYMBOL(ion_unmap_dma);
struct ion_buffer *ion_share(struct ion_client *client,
struct ion_handle *handle)
@@ -531,6 +537,7 @@ struct ion_buffer *ion_share(struct ion_client *client,
*/
return handle->buffer;
}
+EXPORT_SYMBOL(ion_share);
struct ion_handle *ion_import(struct ion_client *client,
struct ion_buffer *buffer)
@@ -552,6 +559,7 @@ end:
mutex_unlock(&client->lock);
return handle;
}
+EXPORT_SYMBOL(ion_import);
static const struct file_operations ion_share_fops;
@@ -575,6 +583,7 @@ end:
fput(file);
return handle;
}
+EXPORT_SYMBOL(ion_import_fd);
static int ion_debug_client_show(struct seq_file *s, void *unused)
{
@@ -728,6 +737,7 @@ struct ion_client *ion_client_create(struct ion_device *dev,
return client;
}
+EXPORT_SYMBOL(ion_client_create);
static void _ion_client_destroy(struct kref *kref)
{
@@ -768,6 +778,7 @@ void ion_client_destroy(struct ion_client *client)
{
ion_client_put(client);
}
+EXPORT_SYMBOL(ion_client_destroy);
static int ion_share_release(struct inode *inode, struct file* file)
{
diff --git a/drivers/gpu/ion/omap/Makefile b/drivers/gpu/ion/omap/Makefile
new file mode 100644
index 0000000..9b93884
--- /dev/null
+++ b/drivers/gpu/ion/omap/Makefile
@@ -0,0 +1 @@
+obj-y += omap_tiler_heap.o omap_ion.o
diff --git a/drivers/gpu/ion/omap/omap_ion.c b/drivers/gpu/ion/omap/omap_ion.c
new file mode 100644
index 0000000..1ae3e53
--- /dev/null
+++ b/drivers/gpu/ion/omap/omap_ion.c
@@ -0,0 +1,167 @@
+/*
+ * drivers/gpu/omap/omap_ion.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/err.h>
+#include <linux/ion.h>
+#include <linux/omap_ion.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include "../ion_priv.h"
+#include "omap_ion_priv.h"
+
+struct ion_device *omap_ion_device;
+EXPORT_SYMBOL(omap_ion_device);
+
+int num_heaps;
+struct ion_heap **heaps;
+struct ion_heap *tiler_heap;
+static struct ion_heap *nonsecure_tiler_heap;
+
+int omap_ion_tiler_alloc(struct ion_client *client,
+ struct omap_ion_tiler_alloc_data *data)
+{
+ return omap_tiler_alloc(tiler_heap, client, data);
+}
+
+int omap_ion_nonsecure_tiler_alloc(struct ion_client *client,
+ struct omap_ion_tiler_alloc_data *data)
+{
+ if (!nonsecure_tiler_heap)
+ return -ENOMEM;
+ return omap_tiler_alloc(nonsecure_tiler_heap, client, data);
+}
+
+long omap_ion_ioctl(struct ion_client *client, unsigned int cmd,
+ unsigned long arg)
+{
+ switch (cmd) {
+ case OMAP_ION_TILER_ALLOC:
+ {
+ struct omap_ion_tiler_alloc_data data;
+ int ret;
+
+ if (!tiler_heap) {
+ pr_err("%s: Tiler heap requested but no tiler "
+ "heap exists on this platform\n", __func__);
+ return -EINVAL;
+ }
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data)))
+ return -EFAULT;
+ ret = omap_ion_tiler_alloc(client, &data);
+ if (ret)
+ return ret;
+ if (copy_to_user((void __user *)arg, &data,
+ sizeof(data)))
+ return -EFAULT;
+ break;
+ }
+ default:
+ pr_err("%s: Unknown custom ioctl\n", __func__);
+ return -ENOTTY;
+ }
+ return 0;
+}
+
+int omap_ion_probe(struct platform_device *pdev)
+{
+ struct ion_platform_data *pdata = pdev->dev.platform_data;
+ int err;
+ int i;
+
+ num_heaps = pdata->nr;
+
+ heaps = kzalloc(sizeof(struct ion_heap *) * pdata->nr, GFP_KERNEL);
+
+ omap_ion_device = ion_device_create(omap_ion_ioctl);
+ if (IS_ERR_OR_NULL(omap_ion_device)) {
+ kfree(heaps);
+ return PTR_ERR(omap_ion_device);
+ }
+
+ /* create the heaps as specified in the board file */
+ for (i = 0; i < num_heaps; i++) {
+ struct ion_platform_heap *heap_data = &pdata->heaps[i];
+
+ if (heap_data->type == OMAP_ION_HEAP_TYPE_TILER) {
+ heaps[i] = omap_tiler_heap_create(heap_data);
+ if (heap_data->id == OMAP_ION_HEAP_NONSECURE_TILER)
+ nonsecure_tiler_heap = heaps[i];
+ else
+ tiler_heap = heaps[i];
+ } else {
+ heaps[i] = ion_heap_create(heap_data);
+ }
+ if (IS_ERR_OR_NULL(heaps[i])) {
+ err = PTR_ERR(heaps[i]);
+ goto err;
+ }
+ ion_device_add_heap(omap_ion_device, heaps[i]);
+ pr_info("%s: adding heap %s of type %d with %lx@%x\n",
+ __func__, heap_data->name, heap_data->type,
+ heap_data->base, heap_data->size);
+
+ }
+
+ platform_set_drvdata(pdev, omap_ion_device);
+ return 0;
+err:
+ for (i = 0; i < num_heaps; i++) {
+ if (heaps[i]) {
+ if (heaps[i]->type == OMAP_ION_HEAP_TYPE_TILER)
+ omap_tiler_heap_destroy(heaps[i]);
+ else
+ ion_heap_destroy(heaps[i]);
+ }
+ }
+ kfree(heaps);
+ return err;
+}
+
+int omap_ion_remove(struct platform_device *pdev)
+{
+ struct ion_device *idev = platform_get_drvdata(pdev);
+ int i;
+
+ ion_device_destroy(idev);
+ for (i = 0; i < num_heaps; i++)
+ if (heaps[i]->type == OMAP_ION_HEAP_TYPE_TILER)
+ omap_tiler_heap_destroy(heaps[i]);
+ else
+ ion_heap_destroy(heaps[i]);
+ kfree(heaps);
+ return 0;
+}
+
+static struct platform_driver ion_driver = {
+ .probe = omap_ion_probe,
+ .remove = omap_ion_remove,
+ .driver = { .name = "ion-omap4" }
+};
+
+static int __init ion_init(void)
+{
+ return platform_driver_register(&ion_driver);
+}
+
+static void __exit ion_exit(void)
+{
+ platform_driver_unregister(&ion_driver);
+}
+
+module_init(ion_init);
+module_exit(ion_exit);
+
diff --git a/drivers/gpu/ion/omap/omap_ion_priv.h b/drivers/gpu/ion/omap/omap_ion_priv.h
new file mode 100644
index 0000000..2bb3bda
--- /dev/null
+++ b/drivers/gpu/ion/omap/omap_ion_priv.h
@@ -0,0 +1,28 @@
+/*
+ * include/linux/omap/omap_ion_priv.h
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_OMAP_ION_PRIV_H
+#define _LINUX_OMAP_ION_PRIV_H
+
+#include <linux/types.h>
+
+int omap_tiler_alloc(struct ion_heap *heap,
+ struct ion_client *client,
+ struct omap_ion_tiler_alloc_data *data);
+struct ion_heap *omap_tiler_heap_create(struct ion_platform_heap *heap_data);
+void omap_tiler_heap_destroy(struct ion_heap *heap);
+
+#endif /* _LINUX_OMAP_ION_PRIV_H */
diff --git a/drivers/gpu/ion/omap/omap_tiler_heap.c b/drivers/gpu/ion/omap/omap_tiler_heap.c
new file mode 100644
index 0000000..652bbf9
--- /dev/null
+++ b/drivers/gpu/ion/omap/omap_tiler_heap.c
@@ -0,0 +1,260 @@
+/*
+ * drivers/gpu/ion/omap_tiler_heap.c
+ *
+ * Copyright (C) 2011 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/spinlock.h>
+
+#include <linux/err.h>
+#include <linux/genalloc.h>
+#include <linux/io.h>
+#include <linux/ion.h>
+#include <linux/mm.h>
+#include <linux/omap_ion.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <mach/tiler.h>
+#include <asm/mach/map.h>
+#include <asm/page.h>
+
+#include "../ion_priv.h"
+
+static int omap_tiler_heap_allocate(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ unsigned long size, unsigned long align,
+ unsigned long flags)
+{
+ if (size == 0)
+ return 0;
+
+ pr_err("%s: This should never be called directly -- use the "
+ "OMAP_ION_TILER_ALLOC flag to the ION_IOC_CUSTOM "
+ "instead\n", __func__);
+ return -EINVAL;
+}
+
+struct omap_tiler_info {
+ tiler_blk_handle tiler_handle; /* handle of the allocation intiler */
+ bool lump; /* true for a single lump allocation */
+ u32 n_phys_pages; /* number of physical pages */
+ u32 *phys_addrs; /* array addrs of pages */
+ u32 n_tiler_pages; /* number of tiler pages */
+ u32 *tiler_addrs; /* array of addrs of tiler pages */
+ u32 tiler_start; /* start addr in tiler -- if not page
+ aligned this may not equal the
+ first entry onf tiler_addrs */
+};
+
+int omap_tiler_alloc(struct ion_heap *heap,
+ struct ion_client *client,
+ struct omap_ion_tiler_alloc_data *data)
+{
+ struct ion_handle *handle;
+ struct ion_buffer *buffer;
+ struct omap_tiler_info *info;
+ u32 n_phys_pages;
+ u32 n_tiler_pages;
+ ion_phys_addr_t addr;
+ int i, ret;
+
+ if (data->fmt == TILER_PIXEL_FMT_PAGE && data->h != 1) {
+ pr_err("%s: Page mode (1D) allocations must have a height "
+ "of one\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = tiler_memsize(data->fmt, data->w, data->h,
+ &n_phys_pages,
+ &n_tiler_pages);
+
+ if (ret) {
+ pr_err("%s: invalid tiler request w %u h %u fmt %u\n", __func__,
+ data->w, data->h, data->fmt);
+ return ret;
+ }
+
+ BUG_ON(!n_phys_pages || !n_tiler_pages);
+
+ info = kzalloc(sizeof(struct omap_tiler_info) +
+ sizeof(u32) * n_phys_pages +
+ sizeof(u32) * n_tiler_pages, GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->n_phys_pages = n_phys_pages;
+ info->n_tiler_pages = n_tiler_pages;
+ info->phys_addrs = (u32 *)(info + 1);
+ info->tiler_addrs = info->phys_addrs + n_phys_pages;
+
+ info->tiler_handle = tiler_alloc_block_area(data->fmt, data->w, data->h,
+ &info->tiler_start,
+ info->tiler_addrs);
+ if (IS_ERR_OR_NULL(info->tiler_handle)) {
+ ret = PTR_ERR(info->tiler_handle);
+ pr_err("%s: failure to allocate address space from tiler\n",
+ __func__);
+ goto err_nomem;
+ }
+
+ addr = ion_carveout_allocate(heap, n_phys_pages*PAGE_SIZE, 0);
+ if (addr == ION_CARVEOUT_ALLOCATE_FAIL) {
+ for (i = 0; i < n_phys_pages; i++) {
+ addr = ion_carveout_allocate(heap, PAGE_SIZE, 0);
+
+ if (addr == ION_CARVEOUT_ALLOCATE_FAIL) {
+ ret = -ENOMEM;
+ pr_err("%s: failed to allocate pages to back "
+ "tiler address space\n", __func__);
+ goto err_alloc;
+ }
+ info->phys_addrs[i] = addr;
+ }
+ } else {
+ info->lump = true;
+ for (i = 0; i < n_phys_pages; i++)
+ info->phys_addrs[i] = addr + i*PAGE_SIZE;
+ }
+
+ ret = tiler_pin_block(info->tiler_handle, info->phys_addrs,
+ info->n_phys_pages);
+ if (ret) {
+ pr_err("%s: failure to pin pages to tiler\n", __func__);
+ goto err_alloc;
+ }
+
+ data->stride = tiler_block_vstride(info->tiler_handle);
+
+ /* create an ion handle for the allocation */
+ handle = ion_alloc(client, 0, 0, 1 << OMAP_ION_HEAP_TILER);
+ if (IS_ERR_OR_NULL(handle)) {
+ ret = PTR_ERR(handle);
+ pr_err("%s: failure to allocate handle to manage tiler"
+ " allocation\n", __func__);
+ goto err;
+ }
+
+ buffer = ion_handle_buffer(handle);
+ buffer->size = info->n_tiler_pages * PAGE_SIZE;
+ buffer->priv_virt = info;
+ data->handle = handle;
+ return 0;
+
+err:
+ tiler_unpin_block(info->tiler_handle);
+err_alloc:
+ tiler_free_block_area(info->tiler_handle);
+ if (info->lump)
+ ion_carveout_free(heap, addr, n_phys_pages * PAGE_SIZE);
+ else
+ for (i -= 1; i >= 0; i--)
+ ion_carveout_free(heap, info->phys_addrs[i], PAGE_SIZE);
+err_nomem:
+ kfree(info);
+ return ret;
+}
+
+void omap_tiler_heap_free(struct ion_buffer *buffer)
+{
+ struct omap_tiler_info *info = buffer->priv_virt;
+
+ tiler_unpin_block(info->tiler_handle);
+ tiler_free_block_area(info->tiler_handle);
+
+ if (info->lump) {
+ ion_carveout_free(buffer->heap, info->phys_addrs[0],
+ info->n_phys_pages*PAGE_SIZE);
+ } else {
+ int i;
+ for (i = 0; i < info->n_phys_pages; i++)
+ ion_carveout_free(buffer->heap,
+ info->phys_addrs[i], PAGE_SIZE);
+ }
+
+ kfree(info);
+}
+
+static int omap_tiler_phys(struct ion_heap *heap,
+ struct ion_buffer *buffer,
+ ion_phys_addr_t *addr, size_t *len)
+{
+ struct omap_tiler_info *info = buffer->priv_virt;
+
+ *addr = info->tiler_start;
+ *len = buffer->size;
+ return 0;
+}
+
+int omap_tiler_pages(struct ion_client *client, struct ion_handle *handle,
+ int *n, u32 **tiler_addrs)
+{
+ ion_phys_addr_t addr;
+ size_t len;
+ int ret;
+ struct omap_tiler_info *info = ion_handle_buffer(handle)->priv_virt;
+
+ /* validate that the handle exists in this client */
+ ret = ion_phys(client, handle, &addr, &len);
+ if (ret)
+ return ret;
+
+ *n = info->n_tiler_pages;
+ *tiler_addrs = info->tiler_addrs;
+ return 0;
+}
+
+int omap_tiler_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
+ struct vm_area_struct *vma)
+{
+ struct omap_tiler_info *info = buffer->priv_virt;
+ unsigned long addr = vma->vm_start;
+ u32 vma_pages = (vma->vm_end - vma->vm_start) / PAGE_SIZE;
+ int n_pages = min(vma_pages, info->n_tiler_pages);
+ int i, ret;
+
+ for (i = vma->vm_pgoff; i < n_pages; i++, addr += PAGE_SIZE) {
+ ret = remap_pfn_range(vma, addr,
+ __phys_to_pfn(info->tiler_addrs[i]),
+ PAGE_SIZE,
+ pgprot_noncached(vma->vm_page_prot));
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static struct ion_heap_ops omap_tiler_ops = {
+ .allocate = omap_tiler_heap_allocate,
+ .free = omap_tiler_heap_free,
+ .phys = omap_tiler_phys,
+ .map_user = omap_tiler_heap_map_user,
+};
+
+struct ion_heap *omap_tiler_heap_create(struct ion_platform_heap *data)
+{
+ struct ion_heap *heap;
+
+ heap = ion_carveout_heap_create(data);
+ if (!heap)
+ return ERR_PTR(-ENOMEM);
+ heap->ops = &omap_tiler_ops;
+ heap->type = OMAP_ION_HEAP_TYPE_TILER;
+ heap->name = data->name;
+ heap->id = data->id;
+ return heap;
+}
+
+void omap_tiler_heap_destroy(struct ion_heap *heap)
+{
+ kfree(heap);
+}