aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2007-10-16 01:26:06 -0700
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 09:43:01 -0700
commit8e65d24c7caf2a4c69b3ae0ce170bf3082ba359f (patch)
tree4f690448c1363bf02f74abd9293126c3e3a9e4c9 /include
parentdfb4f09609827301740ef0a11b37530d190f1681 (diff)
downloadkernel_samsung_aries-8e65d24c7caf2a4c69b3ae0ce170bf3082ba359f.zip
kernel_samsung_aries-8e65d24c7caf2a4c69b3ae0ce170bf3082ba359f.tar.gz
kernel_samsung_aries-8e65d24c7caf2a4c69b3ae0ce170bf3082ba359f.tar.bz2
SLUB: Do not use page->mapping
After moving the lockless_freelist to kmem_cache_cpu we no longer need page->lockless_freelist. Restructure the use of the struct page fields in such a way that we never touch the mapping field. This is turn allows us to remove the special casing of SLUB when determining the mapping of a page (needed for corner cases of virtual caches machines that need to flush caches of processors mapping a page). Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include')
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mm_types.h9
2 files changed, 2 insertions, 11 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6a68d41..292c686 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -568,10 +568,6 @@ static inline struct address_space *page_mapping(struct page *page)
VM_BUG_ON(PageSlab(page));
if (unlikely(PageSwapCache(page)))
mapping = &swapper_space;
-#ifdef CONFIG_SLUB
- else if (unlikely(PageSlab(page)))
- mapping = NULL;
-#endif
else if (unlikely((unsigned long)mapping & PAGE_MAPPING_ANON))
mapping = NULL;
return mapping;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 145b3d05..0cdc8fb 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -62,13 +62,8 @@ struct page {
#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
spinlock_t ptl;
#endif
- struct { /* SLUB uses */
- void **lockless_freelist;
- struct kmem_cache *slab; /* Pointer to slab */
- };
- struct {
- struct page *first_page; /* Compound pages */
- };
+ struct kmem_cache *slab; /* SLUB: Pointer to slab */
+ struct page *first_page; /* Compound tail pages */
};
union {
pgoff_t index; /* Our offset within mapping. */