aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2009-07-24 09:57:34 +0200
committerDave Airlie <airlied@redhat.com>2009-07-29 15:56:13 +1000
commit542c6f6df51327dbb180cf4d9b34827e147efe17 (patch)
tree04400fb93788c542e1222ba8f33f975090e59e3a
parent4677f15c60421d48566c48c3149474e64977f071 (diff)
downloadkernel_samsung_crespo-542c6f6df51327dbb180cf4d9b34827e147efe17.zip
kernel_samsung_crespo-542c6f6df51327dbb180cf4d9b34827e147efe17.tar.gz
kernel_samsung_crespo-542c6f6df51327dbb180cf4d9b34827e147efe17.tar.bz2
drm/ttm: Fix ttm in-kernel copying of pages with non-standard caching attributes.
For x86 this affected highmem pages only, since they were always kmapped cache-coherent, and this is fixed using kmap_atomic_prot(). For other architectures that may not modify the linear kernel map we resort to vmap() for now, since kmap_atomic_prot() generally uses the linear kernel map for lowmem pages. This of course comes with a performance impact and should be optimized when possible. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c63
1 files changed, 52 insertions, 11 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 3e5d0c4..ce2e6f3 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -136,7 +136,8 @@ static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
}
static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
- unsigned long page)
+ unsigned long page,
+ pgprot_t prot)
{
struct page *d = ttm_tt_get_page(ttm, page);
void *dst;
@@ -145,17 +146,35 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
return -ENOMEM;
src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
- dst = kmap(d);
+
+#ifdef CONFIG_X86
+ dst = kmap_atomic_prot(d, KM_USER0, prot);
+#else
+ if (prot != PAGE_KERNEL)
+ dst = vmap(&d, 1, 0, prot);
+ else
+ dst = kmap(d);
+#endif
if (!dst)
return -ENOMEM;
memcpy_fromio(dst, src, PAGE_SIZE);
- kunmap(d);
+
+#ifdef CONFIG_X86
+ kunmap_atomic(dst, KM_USER0);
+#else
+ if (prot != PAGE_KERNEL)
+ vunmap(dst);
+ else
+ kunmap(d);
+#endif
+
return 0;
}
static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
- unsigned long page)
+ unsigned long page,
+ pgprot_t prot)
{
struct page *s = ttm_tt_get_page(ttm, page);
void *src;
@@ -164,12 +183,28 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
return -ENOMEM;
dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
- src = kmap(s);
+#ifdef CONFIG_X86
+ src = kmap_atomic_prot(s, KM_USER0, prot);
+#else
+ if (prot != PAGE_KERNEL)
+ src = vmap(&s, 1, 0, prot);
+ else
+ src = kmap(s);
+#endif
if (!src)
return -ENOMEM;
memcpy_toio(dst, src, PAGE_SIZE);
- kunmap(s);
+
+#ifdef CONFIG_X86
+ kunmap_atomic(src, KM_USER0);
+#else
+ if (prot != PAGE_KERNEL)
+ vunmap(src);
+ else
+ kunmap(s);
+#endif
+
return 0;
}
@@ -214,11 +249,17 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
for (i = 0; i < new_mem->num_pages; ++i) {
page = i * dir + add;
- if (old_iomap == NULL)
- ret = ttm_copy_ttm_io_page(ttm, new_iomap, page);
- else if (new_iomap == NULL)
- ret = ttm_copy_io_ttm_page(ttm, old_iomap, page);
- else
+ if (old_iomap == NULL) {
+ pgprot_t prot = ttm_io_prot(old_mem->placement,
+ PAGE_KERNEL);
+ ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
+ prot);
+ } else if (new_iomap == NULL) {
+ pgprot_t prot = ttm_io_prot(new_mem->placement,
+ PAGE_KERNEL);
+ ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
+ prot);
+ } else
ret = ttm_copy_io_page(new_iomap, old_iomap, page);
if (ret)
goto out1;