aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/memory.c3
-rw-r--r--mm/swapfile.c33
-rw-r--r--mm/vmscan.c2
3 files changed, 32 insertions, 6 deletions
diff --git a/mm/memory.c b/mm/memory.c
index ac8d3a1..682fca1 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3055,7 +3055,8 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
mem_cgroup_commit_charge_swapin(page, ptr);
swap_free(entry);
- if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
+ if ((PageSwapCache(page) && vm_swap_full(page_swap_info(page))) ||
+ (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
try_to_free_swap(page);
unlock_page(page);
if (swapcache) {
diff --git a/mm/swapfile.c b/mm/swapfile.c
index c8f4338..7197864 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -69,6 +69,26 @@ static inline unsigned char swap_count(unsigned char ent)
return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */
}
+bool is_swap_fast(swp_entry_t entry)
+{
+ struct swap_info_struct *p;
+ unsigned long type;
+
+ if (non_swap_entry(entry))
+ return false;
+
+ type = swp_type(entry);
+ if (type >= nr_swapfiles)
+ return false;
+
+ p = swap_info[type];
+
+ if (p->flags & SWP_FAST)
+ return true;
+
+ return false;
+}
+
/* returns 1 if swap entry is freed */
static int
__try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset)
@@ -289,7 +309,7 @@ checks:
scan_base = offset = si->lowest_bit;
/* reuse swap entry of cache-only swap if not busy. */
- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ if (vm_swap_full(si) && si->swap_map[offset] == SWAP_HAS_CACHE) {
int swap_was_freed;
spin_unlock(&swap_lock);
swap_was_freed = __try_to_reclaim_swap(si, offset);
@@ -378,7 +398,8 @@ scan:
spin_lock(&swap_lock);
goto checks;
}
- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ if (vm_swap_full(si) &&
+ si->swap_map[offset] == SWAP_HAS_CACHE) {
spin_lock(&swap_lock);
goto checks;
}
@@ -393,7 +414,8 @@ scan:
spin_lock(&swap_lock);
goto checks;
}
- if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) {
+ if (vm_swap_full(si) &&
+ si->swap_map[offset] == SWAP_HAS_CACHE) {
spin_lock(&swap_lock);
goto checks;
}
@@ -708,7 +730,8 @@ int free_swap_and_cache(swp_entry_t entry)
* Also recheck PageSwapCache now page is locked (above).
*/
if (PageSwapCache(page) && !PageWriteback(page) &&
- (!page_mapped(page) || vm_swap_full())) {
+ (!page_mapped(page) ||
+ vm_swap_full(page_swap_info(page)))) {
delete_from_swap_cache(page);
SetPageDirty(page);
}
@@ -2114,6 +2137,8 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
}
if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD))
p->flags |= SWP_DISCARDABLE;
+ if (blk_queue_fast(bdev_get_queue(p->bdev)))
+ p->flags |= SWP_FAST;
}
mutex_lock(&swapon_mutex);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index a648d8e..5bd0dcb 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -984,7 +984,7 @@ cull_mlocked:
activate_locked:
/* Not a candidate for swapping, so reclaim swap space. */
- if (PageSwapCache(page) && vm_swap_full())
+ if (PageSwapCache(page) && vm_swap_full(page_swap_info(page)))
try_to_free_swap(page);
VM_BUG_ON(PageActive(page));
SetPageActive(page);