diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/fork.c | 6 | ||||
-rw-r--r-- | kernel/sched.c | 6 | ||||
-rw-r--r-- | kernel/softirq.c | 20 | ||||
-rw-r--r-- | kernel/workqueue.c | 29 |
4 files changed, 55 insertions, 6 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index b373322..9bd7b65 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -1534,6 +1534,12 @@ asmlinkage long sys_unshare(unsigned long unshare_flags) check_unshare_flags(&unshare_flags); + /* Return -EINVAL for all unsupported flags */ + err = -EINVAL; + if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| + CLONE_VM|CLONE_FILES|CLONE_SYSVSEM)) + goto bad_unshare_out; + if ((err = unshare_thread(unshare_flags))) goto bad_unshare_out; if ((err = unshare_fs(unshare_flags, &new_fs))) diff --git a/kernel/sched.c b/kernel/sched.c index 4d46e90..6b6e0d7 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -707,12 +707,6 @@ static int recalc_task_prio(task_t *p, unsigned long long now) DEF_TIMESLICE); } else { /* - * The lower the sleep avg a task has the more - * rapidly it will rise with sleep time. - */ - sleep_time *= (MAX_BONUS - CURRENT_BONUS(p)) ? : 1; - - /* * Tasks waking from uninterruptible sleep are * limited in their sleep_avg rise as they * are likely to be waiting on I/O diff --git a/kernel/softirq.c b/kernel/softirq.c index ad3295c..ec8fed4 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -16,6 +16,7 @@ #include <linux/cpu.h> #include <linux/kthread.h> #include <linux/rcupdate.h> +#include <linux/smp.h> #include <asm/irq.h> /* @@ -495,3 +496,22 @@ __init int spawn_ksoftirqd(void) register_cpu_notifier(&cpu_nfb); return 0; } + +#ifdef CONFIG_SMP +/* + * Call a function on all processors + */ +int on_each_cpu(void (*func) (void *info), void *info, int retry, int wait) +{ + int ret = 0; + + preempt_disable(); + ret = smp_call_function(func, info, retry, wait); + local_irq_disable(); + func(info); + local_irq_enable(); + preempt_enable(); + return ret; +} +EXPORT_SYMBOL(on_each_cpu); +#endif diff --git a/kernel/workqueue.c b/kernel/workqueue.c index b052e2c..e9e464a 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -27,6 +27,7 @@ #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/kthread.h> +#include <linux/hardirq.h> /* * The per-CPU workqueue (if single thread, we always use the first @@ -476,6 +477,34 @@ void cancel_rearming_delayed_work(struct work_struct *work) } EXPORT_SYMBOL(cancel_rearming_delayed_work); +/** + * execute_in_process_context - reliably execute the routine with user context + * @fn: the function to execute + * @data: data to pass to the function + * @ew: guaranteed storage for the execute work structure (must + * be available when the work executes) + * + * Executes the function immediately if process context is available, + * otherwise schedules the function for delayed execution. + * + * Returns: 0 - function was executed + * 1 - function was scheduled for execution + */ +int execute_in_process_context(void (*fn)(void *data), void *data, + struct execute_work *ew) +{ + if (!in_interrupt()) { + fn(data); + return 0; + } + + INIT_WORK(&ew->work, fn, data); + schedule_work(&ew->work); + + return 1; +} +EXPORT_SYMBOL_GPL(execute_in_process_context); + int keventd_up(void) { return keventd_wq != NULL; |