aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cpu.c83
-rw-r--r--kernel/fork.c5
-rw-r--r--kernel/posix-cpu-timers.c2
-rw-r--r--kernel/power/main.c21
-rw-r--r--kernel/ptrace.c3
-rw-r--r--kernel/workqueue.c12
6 files changed, 76 insertions, 50 deletions
diff --git a/kernel/cpu.c b/kernel/cpu.c
index d61ba88..e882c6b 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -16,47 +16,76 @@
#include <asm/semaphore.h>
/* This protects CPUs going up and down... */
-DECLARE_MUTEX(cpucontrol);
-EXPORT_SYMBOL_GPL(cpucontrol);
+static DECLARE_MUTEX(cpucontrol);
static struct notifier_block *cpu_chain;
-/*
- * Used to check by callers if they need to acquire the cpucontrol
- * or not to protect a cpu from being removed. Its sometimes required to
- * call these functions both for normal operations, and in response to
- * a cpu being added/removed. If the context of the call is in the same
- * thread context as a CPU hotplug thread, we dont need to take the lock
- * since its already protected
- * check drivers/cpufreq/cpufreq.c for its usage - Ashok Raj
- */
+#ifdef CONFIG_HOTPLUG_CPU
+static struct task_struct *lock_cpu_hotplug_owner;
+static int lock_cpu_hotplug_depth;
-int current_in_cpu_hotplug(void)
+static int __lock_cpu_hotplug(int interruptible)
{
- return (current->flags & PF_HOTPLUG_CPU);
+ int ret = 0;
+
+ if (lock_cpu_hotplug_owner != current) {
+ if (interruptible)
+ ret = down_interruptible(&cpucontrol);
+ else
+ down(&cpucontrol);
+ }
+
+ /*
+ * Set only if we succeed in locking
+ */
+ if (!ret) {
+ lock_cpu_hotplug_depth++;
+ lock_cpu_hotplug_owner = current;
+ }
+
+ return ret;
}
-EXPORT_SYMBOL_GPL(current_in_cpu_hotplug);
+void lock_cpu_hotplug(void)
+{
+ __lock_cpu_hotplug(0);
+}
+EXPORT_SYMBOL_GPL(lock_cpu_hotplug);
+void unlock_cpu_hotplug(void)
+{
+ if (--lock_cpu_hotplug_depth == 0) {
+ lock_cpu_hotplug_owner = NULL;
+ up(&cpucontrol);
+ }
+}
+EXPORT_SYMBOL_GPL(unlock_cpu_hotplug);
+
+int lock_cpu_hotplug_interruptible(void)
+{
+ return __lock_cpu_hotplug(1);
+}
+EXPORT_SYMBOL_GPL(lock_cpu_hotplug_interruptible);
+#endif /* CONFIG_HOTPLUG_CPU */
/* Need to know about CPUs going up/down? */
int register_cpu_notifier(struct notifier_block *nb)
{
int ret;
- if ((ret = down_interruptible(&cpucontrol)) != 0)
+ if ((ret = lock_cpu_hotplug_interruptible()) != 0)
return ret;
ret = notifier_chain_register(&cpu_chain, nb);
- up(&cpucontrol);
+ unlock_cpu_hotplug();
return ret;
}
EXPORT_SYMBOL(register_cpu_notifier);
void unregister_cpu_notifier(struct notifier_block *nb)
{
- down(&cpucontrol);
+ lock_cpu_hotplug();
notifier_chain_unregister(&cpu_chain, nb);
- up(&cpucontrol);
+ unlock_cpu_hotplug();
}
EXPORT_SYMBOL(unregister_cpu_notifier);
@@ -112,13 +141,6 @@ int cpu_down(unsigned int cpu)
goto out;
}
- /*
- * Leave a trace in current->flags indicating we are already in
- * process of performing CPU hotplug. Callers can check if cpucontrol
- * is already acquired by current thread, and if so not cause
- * a dead lock by not acquiring the lock
- */
- current->flags |= PF_HOTPLUG_CPU;
err = notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE,
(void *)(long)cpu);
if (err == NOTIFY_BAD) {
@@ -171,7 +193,6 @@ out_thread:
out_allowed:
set_cpus_allowed(current, old_allowed);
out:
- current->flags &= ~PF_HOTPLUG_CPU;
unlock_cpu_hotplug();
return err;
}
@@ -182,7 +203,7 @@ int __devinit cpu_up(unsigned int cpu)
int ret;
void *hcpu = (void *)(long)cpu;
- if ((ret = down_interruptible(&cpucontrol)) != 0)
+ if ((ret = lock_cpu_hotplug_interruptible()) != 0)
return ret;
if (cpu_online(cpu) || !cpu_present(cpu)) {
@@ -190,11 +211,6 @@ int __devinit cpu_up(unsigned int cpu)
goto out;
}
- /*
- * Leave a trace in current->flags indicating we are already in
- * process of performing CPU hotplug.
- */
- current->flags |= PF_HOTPLUG_CPU;
ret = notifier_call_chain(&cpu_chain, CPU_UP_PREPARE, hcpu);
if (ret == NOTIFY_BAD) {
printk("%s: attempt to bring up CPU %u failed\n",
@@ -217,7 +233,6 @@ out_notify:
if (ret != 0)
notifier_call_chain(&cpu_chain, CPU_UP_CANCELED, hcpu);
out:
- current->flags &= ~PF_HOTPLUG_CPU;
- up(&cpucontrol);
+ unlock_cpu_hotplug();
return ret;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 1c1cf8d..fb8572a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1124,8 +1124,6 @@ static task_t *copy_process(unsigned long clone_flags,
if (unlikely(p->ptrace & PT_PTRACED))
__ptrace_link(p, current->parent);
- cpuset_fork(p);
-
attach_pid(p, PIDTYPE_PID, p->pid);
attach_pid(p, PIDTYPE_TGID, p->tgid);
if (thread_group_leader(p)) {
@@ -1135,13 +1133,14 @@ static task_t *copy_process(unsigned long clone_flags,
__get_cpu_var(process_counts)++;
}
- proc_fork_connector(p);
if (!current->signal->tty && p->signal->tty)
p->signal->tty = NULL;
nr_threads++;
total_forks++;
write_unlock_irq(&tasklist_lock);
+ proc_fork_connector(p);
+ cpuset_fork(p);
retval = 0;
fork_out:
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 84af54c..cae4f57 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -36,7 +36,7 @@ timespec_to_sample(clockid_t which_clock, const struct timespec *tp)
union cpu_time_count ret;
ret.sched = 0; /* high half always zero when .cpu used */
if (CPUCLOCK_WHICH(which_clock) == CPUCLOCK_SCHED) {
- ret.sched = tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
+ ret.sched = (unsigned long long)tp->tv_sec * NSEC_PER_SEC + tp->tv_nsec;
} else {
ret.cpu = timespec_to_cputime(tp);
}
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 6ee2cad..d253f3a 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -24,7 +24,7 @@
DECLARE_MUTEX(pm_sem);
-struct pm_ops * pm_ops = NULL;
+struct pm_ops *pm_ops;
suspend_disk_method_t pm_disk_mode = PM_DISK_SHUTDOWN;
/**
@@ -151,6 +151,18 @@ static char *pm_states[PM_SUSPEND_MAX] = {
#endif
};
+static inline int valid_state(suspend_state_t state)
+{
+ /* Suspend-to-disk does not really need low-level support.
+ * It can work with reboot if needed. */
+ if (state == PM_SUSPEND_DISK)
+ return 1;
+
+ if (pm_ops && pm_ops->valid && !pm_ops->valid(state))
+ return 0;
+ return 1;
+}
+
/**
* enter_state - Do common work of entering low-power state.
@@ -167,7 +179,7 @@ static int enter_state(suspend_state_t state)
{
int error;
- if (pm_ops && pm_ops->valid && !pm_ops->valid(state))
+ if (!valid_state(state))
return -ENODEV;
if (down_trylock(&pm_sem))
return -EBUSY;
@@ -238,9 +250,8 @@ static ssize_t state_show(struct subsystem * subsys, char * buf)
char * s = buf;
for (i = 0; i < PM_SUSPEND_MAX; i++) {
- if (pm_states[i] && pm_ops && (!pm_ops->valid
- ||(pm_ops->valid && pm_ops->valid(i))))
- s += sprintf(s,"%s ",pm_states[i]);
+ if (pm_states[i] && valid_state(i))
+ s += sprintf(s,"%s ", pm_states[i]);
}
s += sprintf(s,"\n");
return (s - buf);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 17ee7e5..656476e 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -241,7 +241,8 @@ int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, in
if (write) {
copy_to_user_page(vma, page, addr,
maddr + offset, buf, bytes);
- set_page_dirty_lock(page);
+ if (!PageCompound(page))
+ set_page_dirty_lock(page);
} else {
copy_from_user_page(vma, page, addr,
buf, maddr + offset, bytes);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 42df83d..2bd5aee 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -102,7 +102,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
if (!test_and_set_bit(0, &work->pending)) {
if (unlikely(is_single_threaded(wq)))
- cpu = 0;
+ cpu = any_online_cpu(cpu_online_map);
BUG_ON(!list_empty(&work->entry));
__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
ret = 1;
@@ -118,7 +118,7 @@ static void delayed_work_timer_fn(unsigned long __data)
int cpu = smp_processor_id();
if (unlikely(is_single_threaded(wq)))
- cpu = 0;
+ cpu = any_online_cpu(cpu_online_map);
__queue_work(per_cpu_ptr(wq->cpu_wq, cpu), work);
}
@@ -266,8 +266,8 @@ void fastcall flush_workqueue(struct workqueue_struct *wq)
might_sleep();
if (is_single_threaded(wq)) {
- /* Always use cpu 0's area. */
- flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, 0));
+ /* Always use first cpu's area. */
+ flush_cpu_workqueue(per_cpu_ptr(wq->cpu_wq, any_online_cpu(cpu_online_map)));
} else {
int cpu;
@@ -320,7 +320,7 @@ struct workqueue_struct *__create_workqueue(const char *name,
lock_cpu_hotplug();
if (singlethread) {
INIT_LIST_HEAD(&wq->list);
- p = create_workqueue_thread(wq, 0);
+ p = create_workqueue_thread(wq, any_online_cpu(cpu_online_map));
if (!p)
destroy = 1;
else
@@ -374,7 +374,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
/* We don't need the distraction of CPUs appearing and vanishing. */
lock_cpu_hotplug();
if (is_single_threaded(wq))
- cleanup_workqueue_thread(wq, 0);
+ cleanup_workqueue_thread(wq, any_online_cpu(cpu_online_map));
else {
for_each_online_cpu(cpu)
cleanup_workqueue_thread(wq, cpu);