aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/mtd/maps/autcpu12-nvram.c19
-rw-r--r--drivers/mtd/nand/nandsim.c1
-rw-r--r--kernel/cpuset.c3
-rw-r--r--kernel/sched.c40
-rw-r--r--mm/mempolicy.c12
5 files changed, 62 insertions, 13 deletions
diff --git a/drivers/mtd/maps/autcpu12-nvram.c b/drivers/mtd/maps/autcpu12-nvram.c
index e5bfd0e..0598d52 100644
--- a/drivers/mtd/maps/autcpu12-nvram.c
+++ b/drivers/mtd/maps/autcpu12-nvram.c
@@ -43,7 +43,8 @@ struct map_info autcpu12_sram_map = {
static int __init init_autcpu12_sram (void)
{
- int err, save0, save1;
+ map_word tmp, save0, save1;
+ int err;
autcpu12_sram_map.virt = ioremap(0x12000000, SZ_128K);
if (!autcpu12_sram_map.virt) {
@@ -51,7 +52,7 @@ static int __init init_autcpu12_sram (void)
err = -EIO;
goto out;
}
- simple_map_init(&autcpu_sram_map);
+ simple_map_init(&autcpu12_sram_map);
/*
* Check for 32K/128K
@@ -61,20 +62,22 @@ static int __init init_autcpu12_sram (void)
* Read and check result on ofs 0x0
* Restore contents
*/
- save0 = map_read32(&autcpu12_sram_map,0);
- save1 = map_read32(&autcpu12_sram_map,0x10000);
- map_write32(&autcpu12_sram_map,~save0,0x10000);
+ save0 = map_read(&autcpu12_sram_map, 0);
+ save1 = map_read(&autcpu12_sram_map, 0x10000);
+ tmp.x[0] = ~save0.x[0];
+ map_write(&autcpu12_sram_map, tmp, 0x10000);
/* if we find this pattern on 0x0, we have 32K size
* restore contents and exit
*/
- if ( map_read32(&autcpu12_sram_map,0) != save0) {
- map_write32(&autcpu12_sram_map,save0,0x0);
+ tmp = map_read(&autcpu12_sram_map, 0);
+ if (!map_word_equal(&autcpu12_sram_map, tmp, save0)) {
+ map_write(&autcpu12_sram_map, save0, 0x0);
goto map;
}
/* We have a 128K found, restore 0x10000 and set size
* to 128K
*/
- map_write32(&autcpu12_sram_map,save1,0x10000);
+ map_write(&autcpu12_sram_map, save1, 0x10000);
autcpu12_sram_map.size = SZ_128K;
map:
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 63c8048..1f2b880 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -2355,6 +2355,7 @@ static int __init ns_init_module(void)
uint64_t new_size = (uint64_t)nsmtd->erasesize << overridesize;
if (new_size >> overridesize != nsmtd->erasesize) {
NS_ERR("overridesize is too big\n");
+ retval = -EINVAL;
goto err_exit;
}
/* N.B. This relies on nand_scan not doing anything with the size before we change it */
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index b2e84bd..6cbe033 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2080,6 +2080,9 @@ static void scan_for_empty_cpusets(struct cpuset *root)
* (of no affect) on systems that are actively using CPU hotplug
* but making no active use of cpusets.
*
+ * The only exception to this is suspend/resume, where we don't
+ * modify cpusets at all.
+ *
* This routine ensures that top_cpuset.cpus_allowed tracks
* cpu_active_mask on each CPU hotplug (cpuhp) event.
*
diff --git a/kernel/sched.c b/kernel/sched.c
index d759d33..e788b66 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -7778,34 +7778,66 @@ int __init sched_create_sysfs_power_savings_entries(struct sysdev_class *cls)
}
#endif /* CONFIG_SCHED_MC || CONFIG_SCHED_SMT */
+static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */
+
/*
* Update cpusets according to cpu_active mask. If cpusets are
* disabled, cpuset_update_active_cpus() becomes a simple wrapper
* around partition_sched_domains().
+ *
+ * If we come here as part of a suspend/resume, don't touch cpusets because we
+ * want to restore it back to its original state upon resume anyway.
*/
static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
- switch (action & ~CPU_TASKS_FROZEN) {
+ switch (action) {
+ case CPU_ONLINE_FROZEN:
+ case CPU_DOWN_FAILED_FROZEN:
+
+ /*
+ * num_cpus_frozen tracks how many CPUs are involved in suspend
+ * resume sequence. As long as this is not the last online
+ * operation in the resume sequence, just build a single sched
+ * domain, ignoring cpusets.
+ */
+ num_cpus_frozen--;
+ if (likely(num_cpus_frozen)) {
+ partition_sched_domains(1, NULL, NULL);
+ break;
+ }
+
+ /*
+ * This is the last CPU online operation. So fall through and
+ * restore the original sched domains by considering the
+ * cpuset configurations.
+ */
+
case CPU_ONLINE:
case CPU_DOWN_FAILED:
cpuset_update_active_cpus();
- return NOTIFY_OK;
+ break;
default:
return NOTIFY_DONE;
}
+ return NOTIFY_OK;
}
static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
void *hcpu)
{
- switch (action & ~CPU_TASKS_FROZEN) {
+ switch (action) {
case CPU_DOWN_PREPARE:
cpuset_update_active_cpus();
- return NOTIFY_OK;
+ break;
+ case CPU_DOWN_PREPARE_FROZEN:
+ num_cpus_frozen++;
+ partition_sched_domains(1, NULL, NULL);
+ break;
default:
return NOTIFY_DONE;
}
+ return NOTIFY_OK;
}
static int update_runtime(struct notifier_block *nfb,
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 6a569cc..5dce7d4 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1511,8 +1511,18 @@ struct mempolicy *get_vma_policy(struct task_struct *task,
addr);
if (vpol)
pol = vpol;
- } else if (vma->vm_policy)
+ } else if (vma->vm_policy) {
pol = vma->vm_policy;
+
+ /*
+ * shmem_alloc_page() passes MPOL_F_SHARED policy with
+ * a pseudo vma whose vma->vm_ops=NULL. Take a reference
+ * count on these policies which will be dropped by
+ * mpol_cond_put() later
+ */
+ if (mpol_needs_cond_ref(pol))
+ mpol_get(pol);
+ }
}
if (!pol)
pol = &default_policy;