aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c3
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memory.c3
-rw-r--r--mm/mempolicy.c5
-rw-r--r--mm/migrate.c3
-rw-r--r--mm/mmap.c11
-rw-r--r--mm/mprotect.c6
-rw-r--r--mm/mremap.c4
-rw-r--r--mm/oom_kill.c15
-rw-r--r--mm/slab.c307
-rw-r--r--mm/vmscan.c2
11 files changed, 188 insertions, 173 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 79f24a9..5209e47 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -25,6 +25,7 @@
#include <linux/uio.h>
#include <linux/hash.h>
#include <linux/writeback.h>
+#include <linux/backing-dev.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
#include <linux/security.h>
@@ -840,7 +841,7 @@ static void shrink_readahead_size_eio(struct file *filp,
/**
* do_generic_mapping_read - generic file read routine
* @mapping: address_space to be read
- * @_ra: file's readahead state
+ * @ra: file's readahead state
* @filp: the file to read
* @ppos: current file position
* @desc: read_descriptor
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ae2959b..034617f 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1020,7 +1020,7 @@ static long region_chg(struct list_head *head, long f, long t)
* size such that we can guarentee to record the reservation. */
if (&rg->link == head || t < rg->from) {
nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
- if (nrg == 0)
+ if (!nrg)
return -ENOMEM;
nrg->from = f;
nrg->to = f;
diff --git a/mm/memory.c b/mm/memory.c
index bd16dca..142683d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -259,9 +259,6 @@ void free_pgd_range(struct mmu_gather **tlb,
continue;
free_pud_range(*tlb, pgd, addr, next, floor, ceiling);
} while (pgd++, addr = next, addr != end);
-
- if (!(*tlb)->fullmm)
- flush_tlb_pgtables((*tlb)->mm, start, end);
}
void free_pgtables(struct mmu_gather **tlb, struct vm_area_struct *vma,
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 568152a..c1592a9 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -78,6 +78,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/module.h>
+#include <linux/nsproxy.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/compat.h>
@@ -940,7 +941,7 @@ asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
/* Find the mm_struct */
read_lock(&tasklist_lock);
- task = pid ? find_task_by_pid(pid) : current;
+ task = pid ? find_task_by_vpid(pid) : current;
if (!task) {
read_unlock(&tasklist_lock);
return -ESRCH;
@@ -1388,7 +1389,6 @@ EXPORT_SYMBOL(alloc_pages_current);
* keeps mempolicies cpuset relative after its cpuset moves. See
* further kernel/cpuset.c update_nodemask().
*/
-void *cpuset_being_rebound;
/* Slow path of a mempolicy copy */
struct mempolicy *__mpol_copy(struct mempolicy *old)
@@ -2019,4 +2019,3 @@ out:
m->version = (vma != priv->tail_vma) ? vma->vm_start : 0;
return 0;
}
-
diff --git a/mm/migrate.c b/mm/migrate.c
index 06d0877..4d6ee03 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -19,6 +19,7 @@
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include <linux/mm_inline.h>
+#include <linux/nsproxy.h>
#include <linux/pagevec.h>
#include <linux/rmap.h>
#include <linux/topology.h>
@@ -924,7 +925,7 @@ asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
/* Find the mm_struct */
read_lock(&tasklist_lock);
- task = pid ? find_task_by_pid(pid) : current;
+ task = pid ? find_task_by_vpid(pid) : current;
if (!task) {
read_unlock(&tasklist_lock);
return -ESRCH;
diff --git a/mm/mmap.c b/mm/mmap.c
index 4275e81..7a30c49 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1048,8 +1048,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
/* The open routine did something to the protections already? */
if (pgprot_val(vma->vm_page_prot) !=
- pgprot_val(protection_map[vm_flags &
- (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]))
+ pgprot_val(vm_get_page_prot(vm_flags)))
return 0;
/* Specialty mapping? */
@@ -1130,8 +1129,7 @@ munmap_back:
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_flags = vm_flags;
- vma->vm_page_prot = protection_map[vm_flags &
- (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
+ vma->vm_page_prot = vm_get_page_prot(vm_flags);
vma->vm_pgoff = pgoff;
if (file) {
@@ -2002,8 +2000,7 @@ unsigned long do_brk(unsigned long addr, unsigned long len)
vma->vm_end = addr + len;
vma->vm_pgoff = pgoff;
vma->vm_flags = flags;
- vma->vm_page_prot = protection_map[flags &
- (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
+ vma->vm_page_prot = vm_get_page_prot(flags);
vma_link(mm, vma, prev, rb_link, rb_parent);
out:
mm->total_vm += len >> PAGE_SHIFT;
@@ -2209,7 +2206,7 @@ int install_special_mapping(struct mm_struct *mm,
vma->vm_end = addr + len;
vma->vm_flags = vm_flags | mm->def_flags;
- vma->vm_page_prot = protection_map[vma->vm_flags & 7];
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
vma->vm_ops = &special_mapping_vmops;
vma->vm_private_data = pages;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 1d4d697..5522784 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -192,11 +192,9 @@ success:
* held in write mode.
*/
vma->vm_flags = newflags;
- vma->vm_page_prot = protection_map[newflags &
- (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
+ vma->vm_page_prot = vm_get_page_prot(newflags);
if (vma_wants_writenotify(vma)) {
- vma->vm_page_prot = protection_map[newflags &
- (VM_READ|VM_WRITE|VM_EXEC)];
+ vma->vm_page_prot = vm_get_page_prot(newflags);
dirty_accountable = 1;
}
diff --git a/mm/mremap.c b/mm/mremap.c
index 8ea5c24..08e3c7f 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -291,7 +291,7 @@ unsigned long do_mremap(unsigned long addr,
if ((addr <= new_addr) && (addr+old_len) > new_addr)
goto out;
- ret = security_file_mmap(0, 0, 0, 0, new_addr, 1);
+ ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
if (ret)
goto out;
@@ -399,7 +399,7 @@ unsigned long do_mremap(unsigned long addr,
goto out;
}
- ret = security_file_mmap(0, 0, 0, 0, new_addr, 1);
+ ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
if (ret)
goto out;
}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index a64decb..824cade 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -212,7 +212,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints)
if (!p->mm)
continue;
/* skip the init task */
- if (is_init(p))
+ if (is_global_init(p))
continue;
/*
@@ -265,7 +265,7 @@ static struct task_struct *select_bad_process(unsigned long *ppoints)
*/
static void __oom_kill_task(struct task_struct *p, int verbose)
{
- if (is_init(p)) {
+ if (is_global_init(p)) {
WARN_ON(1);
printk(KERN_WARNING "tried to kill init!\n");
return;
@@ -278,7 +278,8 @@ static void __oom_kill_task(struct task_struct *p, int verbose)
}
if (verbose)
- printk(KERN_ERR "Killed process %d (%s)\n", p->pid, p->comm);
+ printk(KERN_ERR "Killed process %d (%s)\n",
+ task_pid_nr(p), p->comm);
/*
* We give our sacrificial lamb high priority and access to
@@ -326,7 +327,7 @@ static int oom_kill_task(struct task_struct *p)
* to memory reserves though, otherwise we might deplete all memory.
*/
do_each_thread(g, q) {
- if (q->mm == mm && q->tgid != p->tgid)
+ if (q->mm == mm && !same_thread_group(q, p))
force_sig(SIGKILL, q);
} while_each_thread(g, q);
@@ -337,7 +338,6 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
unsigned long points, const char *message)
{
struct task_struct *c;
- struct list_head *tsk;
if (printk_ratelimit()) {
printk(KERN_WARNING "%s invoked oom-killer: "
@@ -357,11 +357,10 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
}
printk(KERN_ERR "%s: kill process %d (%s) score %li or a child\n",
- message, p->pid, p->comm, points);
+ message, task_pid_nr(p), p->comm, points);
/* Try to kill a child first */
- list_for_each(tsk, &p->children) {
- c = list_entry(tsk, struct task_struct, sibling);
+ list_for_each_entry(c, &p->children, sibling) {
if (c->mm == p->mm)
continue;
if (!oom_kill_task(c))
diff --git a/mm/slab.c b/mm/slab.c
index 3ce9bc0..54eb555 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1156,105 +1156,187 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
}
#endif
-static int __cpuinit cpuup_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static void __cpuinit cpuup_canceled(long cpu)
+{
+ struct kmem_cache *cachep;
+ struct kmem_list3 *l3 = NULL;
+ int node = cpu_to_node(cpu);
+
+ list_for_each_entry(cachep, &cache_chain, next) {
+ struct array_cache *nc;
+ struct array_cache *shared;
+ struct array_cache **alien;
+ cpumask_t mask;
+
+ mask = node_to_cpumask(node);
+ /* cpu is dead; no one can alloc from it. */
+ nc = cachep->array[cpu];
+ cachep->array[cpu] = NULL;
+ l3 = cachep->nodelists[node];
+
+ if (!l3)
+ goto free_array_cache;
+
+ spin_lock_irq(&l3->list_lock);
+
+ /* Free limit for this kmem_list3 */
+ l3->free_limit -= cachep->batchcount;
+ if (nc)
+ free_block(cachep, nc->entry, nc->avail, node);
+
+ if (!cpus_empty(mask)) {
+ spin_unlock_irq(&l3->list_lock);
+ goto free_array_cache;
+ }
+
+ shared = l3->shared;
+ if (shared) {
+ free_block(cachep, shared->entry,
+ shared->avail, node);
+ l3->shared = NULL;
+ }
+
+ alien = l3->alien;
+ l3->alien = NULL;
+
+ spin_unlock_irq(&l3->list_lock);
+
+ kfree(shared);
+ if (alien) {
+ drain_alien_cache(cachep, alien);
+ free_alien_cache(alien);
+ }
+free_array_cache:
+ kfree(nc);
+ }
+ /*
+ * In the previous loop, all the objects were freed to
+ * the respective cache's slabs, now we can go ahead and
+ * shrink each nodelist to its limit.
+ */
+ list_for_each_entry(cachep, &cache_chain, next) {
+ l3 = cachep->nodelists[node];
+ if (!l3)
+ continue;
+ drain_freelist(cachep, l3, l3->free_objects);
+ }
+}
+
+static int __cpuinit cpuup_prepare(long cpu)
{
- long cpu = (long)hcpu;
struct kmem_cache *cachep;
struct kmem_list3 *l3 = NULL;
int node = cpu_to_node(cpu);
const int memsize = sizeof(struct kmem_list3);
- switch (action) {
- case CPU_LOCK_ACQUIRE:
- mutex_lock(&cache_chain_mutex);
- break;
- case CPU_UP_PREPARE:
- case CPU_UP_PREPARE_FROZEN:
+ /*
+ * We need to do this right in the beginning since
+ * alloc_arraycache's are going to use this list.
+ * kmalloc_node allows us to add the slab to the right
+ * kmem_list3 and not this cpu's kmem_list3
+ */
+
+ list_for_each_entry(cachep, &cache_chain, next) {
/*
- * We need to do this right in the beginning since
- * alloc_arraycache's are going to use this list.
- * kmalloc_node allows us to add the slab to the right
- * kmem_list3 and not this cpu's kmem_list3
+ * Set up the size64 kmemlist for cpu before we can
+ * begin anything. Make sure some other cpu on this
+ * node has not already allocated this
*/
+ if (!cachep->nodelists[node]) {
+ l3 = kmalloc_node(memsize, GFP_KERNEL, node);
+ if (!l3)
+ goto bad;
+ kmem_list3_init(l3);
+ l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
+ ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
- list_for_each_entry(cachep, &cache_chain, next) {
/*
- * Set up the size64 kmemlist for cpu before we can
- * begin anything. Make sure some other cpu on this
- * node has not already allocated this
+ * The l3s don't come and go as CPUs come and
+ * go. cache_chain_mutex is sufficient
+ * protection here.
*/
- if (!cachep->nodelists[node]) {
- l3 = kmalloc_node(memsize, GFP_KERNEL, node);
- if (!l3)
- goto bad;
- kmem_list3_init(l3);
- l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
- ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
-
- /*
- * The l3s don't come and go as CPUs come and
- * go. cache_chain_mutex is sufficient
- * protection here.
- */
- cachep->nodelists[node] = l3;
- }
-
- spin_lock_irq(&cachep->nodelists[node]->list_lock);
- cachep->nodelists[node]->free_limit =
- (1 + nr_cpus_node(node)) *
- cachep->batchcount + cachep->num;
- spin_unlock_irq(&cachep->nodelists[node]->list_lock);
+ cachep->nodelists[node] = l3;
}
- /*
- * Now we can go ahead with allocating the shared arrays and
- * array caches
- */
- list_for_each_entry(cachep, &cache_chain, next) {
- struct array_cache *nc;
- struct array_cache *shared = NULL;
- struct array_cache **alien = NULL;
-
- nc = alloc_arraycache(node, cachep->limit,
- cachep->batchcount);
- if (!nc)
+ spin_lock_irq(&cachep->nodelists[node]->list_lock);
+ cachep->nodelists[node]->free_limit =
+ (1 + nr_cpus_node(node)) *
+ cachep->batchcount + cachep->num;
+ spin_unlock_irq(&cachep->nodelists[node]->list_lock);
+ }
+
+ /*
+ * Now we can go ahead with allocating the shared arrays and
+ * array caches
+ */
+ list_for_each_entry(cachep, &cache_chain, next) {
+ struct array_cache *nc;
+ struct array_cache *shared = NULL;
+ struct array_cache **alien = NULL;
+
+ nc = alloc_arraycache(node, cachep->limit,
+ cachep->batchcount);
+ if (!nc)
+ goto bad;
+ if (cachep->shared) {
+ shared = alloc_arraycache(node,
+ cachep->shared * cachep->batchcount,
+ 0xbaadf00d);
+ if (!shared) {
+ kfree(nc);
goto bad;
- if (cachep->shared) {
- shared = alloc_arraycache(node,
- cachep->shared * cachep->batchcount,
- 0xbaadf00d);
- if (!shared)
- goto bad;
}
- if (use_alien_caches) {
- alien = alloc_alien_cache(node, cachep->limit);
- if (!alien)
- goto bad;
- }
- cachep->array[cpu] = nc;
- l3 = cachep->nodelists[node];
- BUG_ON(!l3);
-
- spin_lock_irq(&l3->list_lock);
- if (!l3->shared) {
- /*
- * We are serialised from CPU_DEAD or
- * CPU_UP_CANCELLED by the cpucontrol lock
- */
- l3->shared = shared;
- shared = NULL;
+ }
+ if (use_alien_caches) {
+ alien = alloc_alien_cache(node, cachep->limit);
+ if (!alien) {
+ kfree(shared);
+ kfree(nc);
+ goto bad;
}
+ }
+ cachep->array[cpu] = nc;
+ l3 = cachep->nodelists[node];
+ BUG_ON(!l3);
+
+ spin_lock_irq(&l3->list_lock);
+ if (!l3->shared) {
+ /*
+ * We are serialised from CPU_DEAD or
+ * CPU_UP_CANCELLED by the cpucontrol lock
+ */
+ l3->shared = shared;
+ shared = NULL;
+ }
#ifdef CONFIG_NUMA
- if (!l3->alien) {
- l3->alien = alien;
- alien = NULL;
- }
-#endif
- spin_unlock_irq(&l3->list_lock);
- kfree(shared);
- free_alien_cache(alien);
+ if (!l3->alien) {
+ l3->alien = alien;
+ alien = NULL;
}
+#endif
+ spin_unlock_irq(&l3->list_lock);
+ kfree(shared);
+ free_alien_cache(alien);
+ }
+ return 0;
+bad:
+ cpuup_canceled(cpu);
+ return -ENOMEM;
+}
+
+static int __cpuinit cpuup_callback(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ long cpu = (long)hcpu;
+ int err = 0;
+
+ switch (action) {
+ case CPU_LOCK_ACQUIRE:
+ mutex_lock(&cache_chain_mutex);
+ break;
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ err = cpuup_prepare(cpu);
break;
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
@@ -1291,72 +1373,13 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
#endif
case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN:
- list_for_each_entry(cachep, &cache_chain, next) {
- struct array_cache *nc;
- struct array_cache *shared;
- struct array_cache **alien;
- cpumask_t mask;
-
- mask = node_to_cpumask(node);
- /* cpu is dead; no one can alloc from it. */
- nc = cachep->array[cpu];
- cachep->array[cpu] = NULL;
- l3 = cachep->nodelists[node];
-
- if (!l3)
- goto free_array_cache;
-
- spin_lock_irq(&l3->list_lock);
-
- /* Free limit for this kmem_list3 */
- l3->free_limit -= cachep->batchcount;
- if (nc)
- free_block(cachep, nc->entry, nc->avail, node);
-
- if (!cpus_empty(mask)) {
- spin_unlock_irq(&l3->list_lock);
- goto free_array_cache;
- }
-
- shared = l3->shared;
- if (shared) {
- free_block(cachep, shared->entry,
- shared->avail, node);
- l3->shared = NULL;
- }
-
- alien = l3->alien;
- l3->alien = NULL;
-
- spin_unlock_irq(&l3->list_lock);
-
- kfree(shared);
- if (alien) {
- drain_alien_cache(cachep, alien);
- free_alien_cache(alien);
- }
-free_array_cache:
- kfree(nc);
- }
- /*
- * In the previous loop, all the objects were freed to
- * the respective cache's slabs, now we can go ahead and
- * shrink each nodelist to its limit.
- */
- list_for_each_entry(cachep, &cache_chain, next) {
- l3 = cachep->nodelists[node];
- if (!l3)
- continue;
- drain_freelist(cachep, l3, l3->free_objects);
- }
+ cpuup_canceled(cpu);
break;
case CPU_LOCK_RELEASE:
mutex_unlock(&cache_chain_mutex);
break;
}
- return NOTIFY_OK;
-bad:
- return NOTIFY_BAD;
+ return err ? NOTIFY_BAD : NOTIFY_OK;
}
static struct notifier_block __cpuinitdata cpucache_notifier = {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e147138..cb474cc 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1282,7 +1282,7 @@ out:
*/
if (priority < 0)
priority = 0;
- for (i = 0; zones[i] != 0; i++) {
+ for (i = 0; zones[i] != NULL; i++) {
struct zone *zone = zones[i];
if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))