aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>2011-03-23 16:42:19 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-23 19:46:22 -0700
commit56039efa18f2530fc23e8ef19e716b65ee2a1d1e (patch)
treea61cbd2f760e93363657622de2cd1591db028458 /mm
parent6c191cd01a935e5b53ef43c9403c771bb7a32b60 (diff)
downloadkernel_samsung_espresso10-56039efa18f2530fc23e8ef19e716b65ee2a1d1e.zip
kernel_samsung_espresso10-56039efa18f2530fc23e8ef19e716b65ee2a1d1e.tar.gz
kernel_samsung_espresso10-56039efa18f2530fc23e8ef19e716b65ee2a1d1e.tar.bz2
memcg: fix ugly initialization of return value is in caller
Remove initialization of vaiable in caller of memory cgroup function. Actually, it's return value of memcg function but it's initialized in caller. Some memory cgroup uses following style to bring the result of start function to the end function for avoiding races. mem_cgroup_start_A(&(*ptr)) /* Something very complicated can happen here. */ mem_cgroup_end_A(*ptr) In some calls, *ptr should be initialized to NULL be caller. But it's ugly. This patch fixes that *ptr is initialized by _start function. Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c8
-rw-r--r--mm/memory.c2
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/swapfile.c2
4 files changed, 9 insertions, 5 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e1ee6ad..b56bd74 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2475,7 +2475,7 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
/* shmem */
if (PageSwapCache(page)) {
- struct mem_cgroup *mem = NULL;
+ struct mem_cgroup *mem;
ret = mem_cgroup_try_charge_swapin(mm, page, gfp_mask, &mem);
if (!ret)
@@ -2501,6 +2501,8 @@ int mem_cgroup_try_charge_swapin(struct mm_struct *mm,
struct mem_cgroup *mem;
int ret;
+ *ptr = NULL;
+
if (mem_cgroup_disabled())
return 0;
@@ -2916,6 +2918,8 @@ int mem_cgroup_prepare_migration(struct page *page,
enum charge_type ctype;
int ret = 0;
+ *ptr = NULL;
+
VM_BUG_ON(PageTransHuge(page));
if (mem_cgroup_disabled())
return 0;
@@ -3058,7 +3062,7 @@ int mem_cgroup_shmem_charge_fallback(struct page *page,
struct mm_struct *mm,
gfp_t gfp_mask)
{
- struct mem_cgroup *mem = NULL;
+ struct mem_cgroup *mem;
int ret;
if (mem_cgroup_disabled())
diff --git a/mm/memory.c b/mm/memory.c
index 615be51..20d5f74 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2767,7 +2767,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
swp_entry_t entry;
pte_t pte;
int locked;
- struct mem_cgroup *ptr = NULL;
+ struct mem_cgroup *ptr;
int exclusive = 0;
int ret = 0;
diff --git a/mm/migrate.c b/mm/migrate.c
index 89e5c3f..b0406d7 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -633,7 +633,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
struct page *newpage = get_new_page(page, private, &result);
int remap_swapcache = 1;
int charge = 0;
- struct mem_cgroup *mem = NULL;
+ struct mem_cgroup *mem;
struct anon_vma *anon_vma = NULL;
if (!newpage)
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 71b42ec..039e616 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -880,7 +880,7 @@ unsigned int count_swap_pages(int type, int free)
static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, swp_entry_t entry, struct page *page)
{
- struct mem_cgroup *ptr = NULL;
+ struct mem_cgroup *ptr;
spinlock_t *ptl;
pte_t *pte;
int ret = 1;