aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVinayak Menon <vinmenon@codeaurora.org>2015-02-25 19:43:59 +0530
committerAndreas Blaesius <skate4life@gmx.de>2016-06-05 21:21:55 +0200
commit0a9de80ece1bf937de0363afe9016551c47c6c04 (patch)
treead88b9cf79f87693174bd462e5eaef4510cd9a76 /mm
parent581aaecd9b6c31b19a740db0c81983c468197a88 (diff)
downloadkernel_samsung_espresso10-0a9de80ece1bf937de0363afe9016551c47c6c04.zip
kernel_samsung_espresso10-0a9de80ece1bf937de0363afe9016551c47c6c04.tar.gz
kernel_samsung_espresso10-0a9de80ece1bf937de0363afe9016551c47c6c04.tar.bz2
mm: swap: don't delay swap free for fast swap devices
There are couple of issues with swapcache usage when ZRAM is used as swap device. 1) Kernel does a swap readahead which can be around 6 to 8 pages depending on total ram, which is not required for zram since accesses are fast. 2) Kernel delays the freeing up of swapcache expecting a later hit, which again is useless in the case of zram. 3) This is not related to swapcache, but zram usage itself. As mentioned in (2) kernel delays freeing of swapcache, but along with that it delays zram compressed page free also. i.e. there can be 2 copies, though one is compressed. This patch addresses these issues using two new flags QUEUE_FLAG_FAST and SWP_FAST, to indicate that accesses to the device will be fast and cheap, and instructs the swap layer to free up swap space agressively, and not to do read ahead. Change-Id: I5d2d5176a5f9420300bb2f843f6ecbdb25ea80e4 Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org> Signed-off-by: D. Andrei Măceș <dmaces@nd.edu> Conflicts: include/linux/blkdev.h include/linux/swap.h mm/swap_state.c mm/swapfile.c Conflicts: include/linux/blkdev.h
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c3
-rw-r--r--mm/swapfile.c33
-rw-r--r--mm/vmscan.c2
3 files changed, 32 insertions, 6 deletions
diff --git a/mm/memory.c b/mm/memory.c
index ac8d3a1..682fca1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3055,7 +3055,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
mem_cgroup_commit_charge_swapin(page, ptr);
swap_free(entry);
- if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
+ if ((PageSwapCache(page) && vm_swap_full(page_swap_info(page))) ||
+ (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
unlock_page(page);
if (swapcache) {
diff --git a/mm/swapfile.c b/mm/swapfile.c
index c8f4338..7197864 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -69,6 +69,26 @@ static inline unsigned char swap_count(unsigned char ent)
return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */
}
+bool is_swap_fast(swp_entry_t entry)
+{
+ struct swap_info_struct *p;
+ unsigned long type;
+
+ if (non_swap_entry(entry))
+ return false;
+
+ type = swp_type(entry);
+ if (type >= nr_swapfiles)
+ return false;
+
+ p = swap_info[type];
+
+ if (p->flags & SWP_FAST)
+ return true;
+
+ return false;
+}
+
/* returns 1 if swap entry is freed */
static int
__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
@@ -289,7 +309,7 @@ checks:
scan_base = offset = si->lowest_bit;
/* reuse swap entry of cache-only swap if not busy. */
- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ if (vm_swap_full(si) && si->swap_map[offset] == SWAP_HAS_CACHE) {
int swap_was_freed;
spin_unlock(&swap_lock);
swap_was_freed = __try_to_reclaim_swap(si, offset);
@@ -378,7 +398,8 @@ scan:
spin_lock(&swap_lock);
goto checks;
}
- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ if (vm_swap_full(si) &&
+ si->swap_map[offset] == SWAP_HAS_CACHE) {
spin_lock(&swap_lock);
goto checks;
}
@@ -393,7 +414,8 @@ scan:
spin_lock(&swap_lock);
goto checks;
}
- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ if (vm_swap_full(si) &&
+ si->swap_map[offset] == SWAP_HAS_CACHE) {
spin_lock(&swap_lock);
goto checks;
}
@@ -708,7 +730,8 @@ int free_swap_and_cache(swp_entry_t entry)
* Also recheck PageSwapCache now page is locked (above).
*/
if (PageSwapCache(page) && !PageWriteback(page) &&
- (!page_mapped(page) || vm_swap_full())) {
+ (!page_mapped(page) ||
+ vm_swap_full(page_swap_info(page)))) {
delete_from_swap_cache(page);
SetPageDirty(page);
}
@@ -2114,6 +2137,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
}
if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD))
p->flags |= SWP_DISCARDABLE;
+ if (blk_queue_fast(bdev_get_queue(p->bdev)))
+ p->flags |= SWP_FAST;
}
mutex_lock(&swapon_mutex);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a648d8e..5bd0dcb 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -984,7 +984,7 @@ cull_mlocked:
activate_locked:
/* Not a candidate for swapping, so reclaim swap space. */
- if (PageSwapCache(page) && vm_swap_full())
+ if (PageSwapCache(page) && vm_swap_full(page_swap_info(page)))
try_to_free_swap(page);
VM_BUG_ON(PageActive(page));
SetPageActive(page);