diff options
author | Hugh Dickins <hugh@veritas.com> | 2008-02-04 22:28:50 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-05 09:44:15 -0800 |
commit | 73b1262fa43a778b1e154deea632cdef5009d6a1 (patch) | |
tree | 409f14e78e23cd999e8c3d1e0945f0fa9b67048f /mm | |
parent | f000944d03a5b74ab3c92b2fcdf0e944cc898065 (diff) | |
download | kernel_goldelico_gta04-73b1262fa43a778b1e154deea632cdef5009d6a1.zip kernel_goldelico_gta04-73b1262fa43a778b1e154deea632cdef5009d6a1.tar.gz kernel_goldelico_gta04-73b1262fa43a778b1e154deea632cdef5009d6a1.tar.bz2 |
tmpfs: move swap swizzling into shmem
move_to_swap_cache and move_from_swap_cache functions (which swizzle a page
between tmpfs page cache and swap cache, to avoid page copying) are only used
by shmem.c; and our subsequent fix for unionfs needs different treatments in
the two instances of move_from_swap_cache. Move them from swap_state.c into
their callsites shmem_writepage, shmem_unuse_inode and shmem_getpage, making
add_to_swap_cache externally visible.
shmem.c likes to say set_page_dirty where swap_state.c liked to say
SetPageDirty: respect that diversity, which __set_page_dirty_no_writeback
makes moot (and implies we should lose that "shift page from clean_pages to
dirty_pages list" comment: it's on neither).
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/shmem.c | 16 | ||||
-rw-r--r-- | mm/swap_state.c | 35 |
2 files changed, 13 insertions, 38 deletions
@@ -884,7 +884,9 @@ lost2: found: idx += offset; inode = &info->vfs_inode; - if (move_from_swap_cache(page, idx, inode->i_mapping) == 0) { + if (add_to_page_cache(page, inode->i_mapping, idx, GFP_ATOMIC) == 0) { + delete_from_swap_cache(page); + set_page_dirty(page); info->flags |= SHMEM_PAGEIN; shmem_swp_set(info, ptr + offset, 0); } @@ -972,7 +974,8 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) BUG_ON(!entry); BUG_ON(entry->val); - if (move_to_swap_cache(page, swap) == 0) { + if (add_to_swap_cache(page, swap, GFP_ATOMIC) == 0) { + remove_from_page_cache(page); shmem_swp_set(info, entry, swap.val); shmem_swp_unmap(entry); spin_unlock(&info->lock); @@ -982,6 +985,9 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc) list_move_tail(&info->swaplist, &shmem_swaplist); spin_unlock(&shmem_swaplist_lock); } + swap_duplicate(swap); + page_cache_release(page); /* pagecache ref */ + set_page_dirty(page); unlock_page(page); return 0; } @@ -1217,13 +1223,15 @@ repeat: SetPageUptodate(filepage); set_page_dirty(filepage); swap_free(swap); - } else if (!(error = move_from_swap_cache( - swappage, idx, mapping))) { + } else if (!(error = add_to_page_cache( + swappage, mapping, idx, GFP_ATOMIC))) { info->flags |= SHMEM_PAGEIN; shmem_swp_set(info, entry, 0); shmem_swp_unmap(entry); + delete_from_swap_cache(swappage); spin_unlock(&info->lock); filepage = swappage; + set_page_dirty(filepage); swap_free(swap); } else { shmem_swp_unmap(entry); diff --git a/mm/swap_state.c b/mm/swap_state.c index c75eda2..65b81c9 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -67,8 +67,7 @@ void show_swap_cache_info(void) * add_to_swap_cache resembles add_to_page_cache on swapper_space, * but sets SwapCache flag and private instead of mapping and index. */ -static int add_to_swap_cache(struct page *page, swp_entry_t entry, - gfp_t gfp_mask) +int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) { int error; @@ -183,38 +182,6 @@ void delete_from_swap_cache(struct page *page) page_cache_release(page); } -/* - * Strange swizzling function only for use by shmem_writepage - */ -int move_to_swap_cache(struct page *page, swp_entry_t entry) -{ - int err = add_to_swap_cache(page, entry, GFP_ATOMIC); - if (!err) { - remove_from_page_cache(page); - page_cache_release(page); /* pagecache ref */ - if (!swap_duplicate(entry)) - BUG(); - SetPageDirty(page); - } - return err; -} - -/* - * Strange swizzling function for shmem_getpage (and shmem_unuse) - */ -int move_from_swap_cache(struct page *page, unsigned long index, - struct address_space *mapping) -{ - int err = add_to_page_cache(page, mapping, index, GFP_ATOMIC); - if (!err) { - delete_from_swap_cache(page); - /* shift page from clean_pages to dirty_pages list */ - ClearPageDirty(page); - set_page_dirty(page); - } - return err; -} - /* * If we are the only user, then try to free up the swap cache. * |