aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2009-06-17 16:27:17 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-18 13:03:47 -0700
commit8a9478ca7f4bcb8945cec7f95d52dae2d5e50cbd (patch)
tree935e6ee3340ebe999374b54967cc2fa14e8d0060 /mm
parent20ebcdda78a282d1d5266887ddf8a2d670182576 (diff)
downloadkernel_samsung_tuna-8a9478ca7f4bcb8945cec7f95d52dae2d5e50cbd.zip
kernel_samsung_tuna-8a9478ca7f4bcb8945cec7f95d52dae2d5e50cbd.tar.gz
kernel_samsung_tuna-8a9478ca7f4bcb8945cec7f95d52dae2d5e50cbd.tar.bz2
memcg: fix swap accounting
This patch fixes mis-accounting of swap usage in memcg. In the current implementation, memcg's swap account is uncharged only when swap is completely freed. But there are several cases where swap cannot be freed cleanly. For handling that, this patch changes that memcg uncharges swap account when swap has no references other than cache. By this, memcg's swap entry accounting can be fully synchronous with the application's behavior. This patch also changes memcg's hooks for swap-out. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Acked-by: Balbir Singh <balbir@in.ibm.com> Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Li Zefan <lizf@cn.fujitsu.com> Cc: Dhaval Giani <dhaval@linux.vnet.ibm.com> Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c17
-rw-r--r--mm/swapfile.c16
2 files changed, 24 insertions, 9 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 3468c38..a83e039 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -189,6 +189,7 @@ enum charge_type {
MEM_CGROUP_CHARGE_TYPE_SHMEM, /* used by page migration of shmem */
MEM_CGROUP_CHARGE_TYPE_FORCE, /* used by force_empty */
MEM_CGROUP_CHARGE_TYPE_SWAPOUT, /* for accounting swapcache */
+ MEM_CGROUP_CHARGE_TYPE_DROP, /* a page was unused swap cache */
NR_CHARGE_TYPE,
};
@@ -1493,6 +1494,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
switch (ctype) {
case MEM_CGROUP_CHARGE_TYPE_MAPPED:
+ case MEM_CGROUP_CHARGE_TYPE_DROP:
if (page_mapped(page))
goto unlock_out;
break;
@@ -1556,18 +1558,23 @@ void mem_cgroup_uncharge_cache_page(struct page *page)
* called after __delete_from_swap_cache() and drop "page" account.
* memcg information is recorded to swap_cgroup of "ent"
*/
-void mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
+void
+mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
{
struct mem_cgroup *memcg;
+ int ctype = MEM_CGROUP_CHARGE_TYPE_SWAPOUT;
+
+ if (!swapout) /* this was a swap cache but the swap is unused ! */
+ ctype = MEM_CGROUP_CHARGE_TYPE_DROP;
+
+ memcg = __mem_cgroup_uncharge_common(page, ctype);
- memcg = __mem_cgroup_uncharge_common(page,
- MEM_CGROUP_CHARGE_TYPE_SWAPOUT);
/* record memcg information */
- if (do_swap_account && memcg) {
+ if (do_swap_account && swapout && memcg) {
swap_cgroup_record(ent, css_id(&memcg->css));
mem_cgroup_get(memcg);
}
- if (memcg)
+ if (swapout && memcg)
css_put(&memcg->css);
}
#endif
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 28faa01..d1ade1a 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -583,8 +583,9 @@ static int swap_entry_free(struct swap_info_struct *p,
swap_list.next = p - swap_info;
nr_swap_pages++;
p->inuse_pages--;
- mem_cgroup_uncharge_swap(ent);
}
+ if (!swap_count(count))
+ mem_cgroup_uncharge_swap(ent);
return count;
}
@@ -609,12 +610,19 @@ void swap_free(swp_entry_t entry)
void swapcache_free(swp_entry_t entry, struct page *page)
{
struct swap_info_struct *p;
+ int ret;
- if (page)
- mem_cgroup_uncharge_swapcache(page, entry);
p = swap_info_get(entry);
if (p) {
- swap_entry_free(p, entry, SWAP_CACHE);
+ ret = swap_entry_free(p, entry, SWAP_CACHE);
+ if (page) {
+ bool swapout;
+ if (ret)
+ swapout = true; /* the end of swap out */
+ else
+ swapout = false; /* no more swap users! */
+ mem_cgroup_uncharge_swapcache(page, entry, swapout);
+ }
spin_unlock(&swap_lock);
}
return;