aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTodd Poynor <toddpoynor@google.com>2013-01-15 19:33:11 -0800
committerTodd Poynor <toddpoynor@google.com>2013-01-15 19:33:11 -0800
commitfbad46f30d0ccb24fcadf4fa129d917fd0bfa1fe (patch)
treeb7ed0216b986c7a0c8f7ab7f2b77191d9b070ead /kernel
parent2627bab94940583e37f36ffa9f635e35a335c33d (diff)
parent2a68fec1118107fbaa40c631d45641d40d88ad71 (diff)
downloadkernel_samsung_tuna-fbad46f30d0ccb24fcadf4fa129d917fd0bfa1fe.zip
kernel_samsung_tuna-fbad46f30d0ccb24fcadf4fa129d917fd0bfa1fe.tar.gz
kernel_samsung_tuna-fbad46f30d0ccb24fcadf4fa129d917fd0bfa1fe.tar.bz2
Merge commit 'v3.0.58' into android-3.0
Change-Id: I05959ed26f71cf9197df59291e8e13f254b2115c
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/futex.c59
-rw-r--r--kernel/irq/manage.c23
-rw-r--r--kernel/resource.c7
-rw-r--r--kernel/sched_autogroup.c4
-rw-r--r--kernel/sched_autogroup.h5
-rw-r--r--kernel/trace/ftrace.c2
-rw-r--r--kernel/watchdog.c4
-rw-r--r--kernel/workqueue.c8
9 files changed, 74 insertions, 40 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 739553e..3948f0a 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2665,9 +2665,7 @@ static int cgroup_create_dir(struct cgroup *cgrp, struct dentry *dentry,
dentry->d_fsdata = cgrp;
inc_nlink(parent->d_inode);
rcu_assign_pointer(cgrp->dentry, dentry);
- dget(dentry);
}
- dput(dentry);
return error;
}
diff --git a/kernel/futex.c b/kernel/futex.c
index e45efe7..61e554e 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -716,7 +716,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb,
struct futex_pi_state **ps,
struct task_struct *task, int set_waiters)
{
- int lock_taken, ret, ownerdied = 0;
+ int lock_taken, ret, force_take = 0;
u32 uval, newval, curval, vpid = task_pid_vnr(task);
retry:
@@ -755,17 +755,15 @@ retry:
newval = curval | FUTEX_WAITERS;
/*
- * There are two cases, where a futex might have no owner (the
- * owner TID is 0): OWNER_DIED. We take over the futex in this
- * case. We also do an unconditional take over, when the owner
- * of the futex died.
- *
- * This is safe as we are protected by the hash bucket lock !
+ * Should we force take the futex? See below.
*/
- if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) {
- /* Keep the OWNER_DIED bit */
+ if (unlikely(force_take)) {
+ /*
+ * Keep the OWNER_DIED and the WAITERS bit and set the
+ * new TID value.
+ */
newval = (curval & ~FUTEX_TID_MASK) | vpid;
- ownerdied = 0;
+ force_take = 0;
lock_taken = 1;
}
@@ -775,7 +773,7 @@ retry:
goto retry;
/*
- * We took the lock due to owner died take over.
+ * We took the lock due to forced take over.
*/
if (unlikely(lock_taken))
return 1;
@@ -790,20 +788,25 @@ retry:
switch (ret) {
case -ESRCH:
/*
- * No owner found for this futex. Check if the
- * OWNER_DIED bit is set to figure out whether
- * this is a robust futex or not.
+ * We failed to find an owner for this
+ * futex. So we have no pi_state to block
+ * on. This can happen in two cases:
+ *
+ * 1) The owner died
+ * 2) A stale FUTEX_WAITERS bit
+ *
+ * Re-read the futex value.
*/
if (get_futex_value_locked(&curval, uaddr))
return -EFAULT;
/*
- * We simply start over in case of a robust
- * futex. The code above will take the futex
- * and return happy.
+ * If the owner died or we have a stale
+ * WAITERS bit the owner TID in the user space
+ * futex is 0.
*/
- if (curval & FUTEX_OWNER_DIED) {
- ownerdied = 1;
+ if (!(curval & FUTEX_TID_MASK)) {
+ force_take = 1;
goto retry;
}
default:
@@ -840,6 +843,9 @@ static void wake_futex(struct futex_q *q)
{
struct task_struct *p = q->task;
+ if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n"))
+ return;
+
/*
* We set q->lock_ptr = NULL _before_ we wake up the task. If
* a non-futex wake up happens on another CPU then the task
@@ -1075,6 +1081,10 @@ retry_private:
plist_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key1)) {
+ if (this->pi_state || this->rt_waiter) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
wake_futex(this);
if (++ret >= nr_wake)
break;
@@ -1087,6 +1097,10 @@ retry_private:
op_ret = 0;
plist_for_each_entry_safe(this, next, head, list) {
if (match_futex (&this->key, &key2)) {
+ if (this->pi_state || this->rt_waiter) {
+ ret = -EINVAL;
+ goto out_unlock;
+ }
wake_futex(this);
if (++op_ret >= nr_wake2)
break;
@@ -1095,6 +1109,7 @@ retry_private:
ret += op_ret;
}
+out_unlock:
double_unlock_hb(hb1, hb2);
out_put_keys:
put_futex_key(&key2);
@@ -1384,9 +1399,13 @@ retry_private:
/*
* FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
* be paired with each other and no other futex ops.
+ *
+ * We should never be requeueing a futex_q with a pi_state,
+ * which is awaiting a futex_unlock_pi().
*/
if ((requeue_pi && !this->rt_waiter) ||
- (!requeue_pi && this->rt_waiter)) {
+ (!requeue_pi && this->rt_waiter) ||
+ this->pi_state) {
ret = -EINVAL;
break;
}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index fa4a70e..3e1bdf9 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -698,6 +698,7 @@ static void
irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
{
cpumask_var_t mask;
+ bool valid = true;
if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
return;
@@ -712,10 +713,18 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
}
raw_spin_lock_irq(&desc->lock);
- cpumask_copy(mask, desc->irq_data.affinity);
+ /*
+ * This code is triggered unconditionally. Check the affinity
+ * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
+ */
+ if (desc->irq_data.affinity)
+ cpumask_copy(mask, desc->irq_data.affinity);
+ else
+ valid = false;
raw_spin_unlock_irq(&desc->lock);
- set_cpus_allowed_ptr(current, mask);
+ if (valid)
+ set_cpus_allowed_ptr(current, mask);
free_cpumask_var(mask);
}
#else
@@ -925,6 +934,16 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
*/
get_task_struct(t);
new->thread = t;
+ /*
+ * Tell the thread to set its affinity. This is
+ * important for shared interrupt handlers as we do
+ * not invoke setup_affinity() for the secondary
+ * handlers as everything is already set up. Even for
+ * interrupts marked with IRQF_NO_BALANCE this is
+ * correct as we want the thread to move to the cpu(s)
+ * on which the requesting code placed the interrupt.
+ */
+ set_bit(IRQTF_AFFINITY, &new->thread_flags);
}
if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
diff --git a/kernel/resource.c b/kernel/resource.c
index 3ff4017..b29b83d 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -419,6 +419,9 @@ static int __find_resource(struct resource *root, struct resource *old,
else
tmp.end = root->end;
+ if (tmp.end < tmp.start)
+ goto next;
+
resource_clip(&tmp, constraint->min, constraint->max);
arch_remove_reservations(&tmp);
@@ -436,8 +439,10 @@ static int __find_resource(struct resource *root, struct resource *old,
return 0;
}
}
- if (!this)
+
+next: if (!this || this->end == root->end)
break;
+
if (this != old)
tmp.start = this->end + 1;
this = this->sibling;
diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
index 429242f..f280df1 100644
--- a/kernel/sched_autogroup.c
+++ b/kernel/sched_autogroup.c
@@ -160,15 +160,11 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
p->signal->autogroup = autogroup_kref_get(ag);
- if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
- goto out;
-
t = p;
do {
sched_move_task(t);
} while_each_thread(p, t);
-out:
unlock_task_sighand(p, &flags);
autogroup_kref_put(prev);
}
diff --git a/kernel/sched_autogroup.h b/kernel/sched_autogroup.h
index 0557705..7b859ff 100644
--- a/kernel/sched_autogroup.h
+++ b/kernel/sched_autogroup.h
@@ -1,11 +1,6 @@
#ifdef CONFIG_SCHED_AUTOGROUP
struct autogroup {
- /*
- * reference doesn't mean how many thread attach to this
- * autogroup now. It just stands for the number of task
- * could use this autogroup.
- */
struct kref kref;
struct task_group *tg;
struct rw_semaphore lock;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 9f8e2e1..f88ea18 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2058,7 +2058,7 @@ static void reset_iter_read(struct ftrace_iterator *iter)
{
iter->pos = 0;
iter->func_pos = 0;
- iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
+ iter->flags &= ~(FTRACE_ITER_PRINTALL | FTRACE_ITER_HASH);
}
static void *t_start(struct seq_file *m, loff_t *pos)
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 3d0c56a..53e4432 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -113,7 +113,7 @@ static unsigned long get_timestamp(int this_cpu)
return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */
}
-static unsigned long get_sample_period(void)
+static u64 get_sample_period(void)
{
/*
* convert watchdog_thresh from seconds to ns
@@ -121,7 +121,7 @@ static unsigned long get_sample_period(void)
* increment before the hardlockup detector generates
* a warning
*/
- return get_softlockup_thresh() * (NSEC_PER_SEC / 5);
+ return get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5);
}
/* Commands for resetting the watchdog */
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index aef9452..dc8438d 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1145,8 +1145,8 @@ int queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) {
unsigned int lcpu;
- BUG_ON(timer_pending(timer));
- BUG_ON(!list_empty(&work->entry));
+ WARN_ON_ONCE(timer_pending(timer));
+ WARN_ON_ONCE(!list_empty(&work->entry));
timer_stats_timer_set_start_info(&dwork->timer);
@@ -2044,8 +2044,10 @@ static int rescuer_thread(void *__wq)
repeat:
set_current_state(TASK_INTERRUPTIBLE);
- if (kthread_should_stop())
+ if (kthread_should_stop()) {
+ __set_current_state(TASK_RUNNING);
return 0;
+ }
/*
* See whether any cpu is asking for help. Unbounded