aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-05-29 16:05:05 +0200
committerIngo Molnar <mingo@elte.hu>2008-05-29 16:05:05 +0200
commit6715930654e06c4d2e66e718ea159079f71838f4 (patch)
tree6a0a19fb62f3e99cb5f6bf6c34ae541f7c30fb42 /kernel
parentea3f01f8afd3bc5daff915cc4ea5cc5ea9e7d427 (diff)
parente490517a039a99d692cb3a5561941b0a5f576172 (diff)
downloadkernel_samsung_aries-6715930654e06c4d2e66e718ea159079f71838f4.zip
kernel_samsung_aries-6715930654e06c4d2e66e718ea159079f71838f4.tar.gz
kernel_samsung_aries-6715930654e06c4d2e66e718ea159079f71838f4.tar.bz2
Merge commit 'linus/master' into sched-fixes-for-linus
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit.c11
-rw-r--r--kernel/audit_tree.c5
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/exit.c7
-rw-r--r--kernel/fork.c130
-rw-r--r--kernel/module.c18
-rw-r--r--kernel/signal.c51
-rw-r--r--kernel/stop_machine.c7
-rw-r--r--kernel/sys.c6
-rw-r--r--kernel/sysctl.c5
10 files changed, 85 insertions, 157 deletions
diff --git a/kernel/audit.c b/kernel/audit.c
index b7d3709..e8692a5 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -572,16 +572,17 @@ void audit_send_reply(int pid, int seq, int type, int done, int multi,
skb = audit_make_reply(pid, seq, type, done, multi, payload, size);
if (!skb)
- return;
+ goto out;
reply->pid = pid;
reply->skb = skb;
tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply");
- if (IS_ERR(tsk)) {
- kfree(reply);
- kfree_skb(skb);
- }
+ if (!IS_ERR(tsk))
+ return;
+ kfree_skb(skb);
+out:
+ kfree(reply);
}
/*
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 9ef5e0a..f7921a2 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -172,10 +172,9 @@ static void insert_hash(struct audit_chunk *chunk)
struct audit_chunk *audit_tree_lookup(const struct inode *inode)
{
struct list_head *list = chunk_hash(inode);
- struct list_head *pos;
+ struct audit_chunk *p;
- list_for_each_rcu(pos, list) {
- struct audit_chunk *p = container_of(pos, struct audit_chunk, hash);
+ list_for_each_entry_rcu(p, list, hash) {
if (p->watch.inode == inode) {
get_inotify_watch(&p->watch);
return p;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index fbc6fc8..15ac0e1 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2903,7 +2903,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys)
cg = tsk->cgroups;
parent = task_cgroup(tsk, subsys->subsys_id);
- snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "node_%d", tsk->pid);
+ snprintf(nodename, MAX_CGROUP_TYPE_NAMELEN, "%d", tsk->pid);
/* Pin the hierarchy */
atomic_inc(&parent->root->sb->s_active);
diff --git a/kernel/exit.c b/kernel/exit.c
index 1510f78..8f6185e 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -126,6 +126,12 @@ static void __exit_signal(struct task_struct *tsk)
__unhash_process(tsk);
+ /*
+ * Do this under ->siglock, we can race with another thread
+ * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
+ */
+ flush_sigqueue(&tsk->pending);
+
tsk->signal = NULL;
tsk->sighand = NULL;
spin_unlock(&sighand->siglock);
@@ -133,7 +139,6 @@ static void __exit_signal(struct task_struct *tsk)
__cleanup_sighand(sighand);
clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
- flush_sigqueue(&tsk->pending);
if (sig) {
flush_sigqueue(&sig->shared_pending);
taskstats_tgid_free(sig);
diff --git a/kernel/fork.c b/kernel/fork.c
index 933e60e..19908b2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -660,136 +660,6 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
return 0;
}
-static int count_open_files(struct fdtable *fdt)
-{
- int size = fdt->max_fds;
- int i;
-
- /* Find the last open fd */
- for (i = size/(8*sizeof(long)); i > 0; ) {
- if (fdt->open_fds->fds_bits[--i])
- break;
- }
- i = (i+1) * 8 * sizeof(long);
- return i;
-}
-
-static struct files_struct *alloc_files(void)
-{
- struct files_struct *newf;
- struct fdtable *fdt;
-
- newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
- if (!newf)
- goto out;
-
- atomic_set(&newf->count, 1);
-
- spin_lock_init(&newf->file_lock);
- newf->next_fd = 0;
- fdt = &newf->fdtab;
- fdt->max_fds = NR_OPEN_DEFAULT;
- fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
- fdt->open_fds = (fd_set *)&newf->open_fds_init;
- fdt->fd = &newf->fd_array[0];
- INIT_RCU_HEAD(&fdt->rcu);
- fdt->next = NULL;
- rcu_assign_pointer(newf->fdt, fdt);
-out:
- return newf;
-}
-
-/*
- * Allocate a new files structure and copy contents from the
- * passed in files structure.
- * errorp will be valid only when the returned files_struct is NULL.
- */
-static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
-{
- struct files_struct *newf;
- struct file **old_fds, **new_fds;
- int open_files, size, i;
- struct fdtable *old_fdt, *new_fdt;
-
- *errorp = -ENOMEM;
- newf = alloc_files();
- if (!newf)
- goto out;
-
- spin_lock(&oldf->file_lock);
- old_fdt = files_fdtable(oldf);
- new_fdt = files_fdtable(newf);
- open_files = count_open_files(old_fdt);
-
- /*
- * Check whether we need to allocate a larger fd array and fd set.
- * Note: we're not a clone task, so the open count won't change.
- */
- if (open_files > new_fdt->max_fds) {
- new_fdt->max_fds = 0;
- spin_unlock(&oldf->file_lock);
- spin_lock(&newf->file_lock);
- *errorp = expand_files(newf, open_files-1);
- spin_unlock(&newf->file_lock);
- if (*errorp < 0)
- goto out_release;
- new_fdt = files_fdtable(newf);
- /*
- * Reacquire the oldf lock and a pointer to its fd table
- * who knows it may have a new bigger fd table. We need
- * the latest pointer.
- */
- spin_lock(&oldf->file_lock);
- old_fdt = files_fdtable(oldf);
- }
-
- old_fds = old_fdt->fd;
- new_fds = new_fdt->fd;
-
- memcpy(new_fdt->open_fds->fds_bits,
- old_fdt->open_fds->fds_bits, open_files/8);
- memcpy(new_fdt->close_on_exec->fds_bits,
- old_fdt->close_on_exec->fds_bits, open_files/8);
-
- for (i = open_files; i != 0; i--) {
- struct file *f = *old_fds++;
- if (f) {
- get_file(f);
- } else {
- /*
- * The fd may be claimed in the fd bitmap but not yet
- * instantiated in the files array if a sibling thread
- * is partway through open(). So make sure that this
- * fd is available to the new process.
- */
- FD_CLR(open_files - i, new_fdt->open_fds);
- }
- rcu_assign_pointer(*new_fds++, f);
- }
- spin_unlock(&oldf->file_lock);
-
- /* compute the remainder to be cleared */
- size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
-
- /* This is long word aligned thus could use a optimized version */
- memset(new_fds, 0, size);
-
- if (new_fdt->max_fds > open_files) {
- int left = (new_fdt->max_fds-open_files)/8;
- int start = open_files / (8 * sizeof(unsigned long));
-
- memset(&new_fdt->open_fds->fds_bits[start], 0, left);
- memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
- }
-
- return newf;
-
-out_release:
- kmem_cache_free(files_cachep, newf);
-out:
- return NULL;
-}
-
static int copy_files(unsigned long clone_flags, struct task_struct * tsk)
{
struct files_struct *oldf, *newf;
diff --git a/kernel/module.c b/kernel/module.c
index f5e9491..5f80478 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1337,7 +1337,19 @@ out_unreg:
kobject_put(&mod->mkobj.kobj);
return err;
}
-#endif
+
+static void mod_sysfs_fini(struct module *mod)
+{
+ kobject_put(&mod->mkobj.kobj);
+}
+
+#else /* CONFIG_SYSFS */
+
+static void mod_sysfs_fini(struct module *mod)
+{
+}
+
+#endif /* CONFIG_SYSFS */
static void mod_kobject_remove(struct module *mod)
{
@@ -1345,7 +1357,7 @@ static void mod_kobject_remove(struct module *mod)
module_param_sysfs_remove(mod);
kobject_put(mod->mkobj.drivers_dir);
kobject_put(mod->holders_dir);
- kobject_put(&mod->mkobj.kobj);
+ mod_sysfs_fini(mod);
}
/*
@@ -1780,7 +1792,7 @@ static struct module *load_module(void __user *umod,
/* Sanity checks against insmoding binaries or wrong arch,
weird elf version */
- if (memcmp(hdr->e_ident, ELFMAG, 4) != 0
+ if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0
|| hdr->e_type != ET_REL
|| !elf_check_arch(hdr)
|| hdr->e_shentsize != sizeof(*sechdrs)) {
diff --git a/kernel/signal.c b/kernel/signal.c
index 72bb4f5..6c0958e 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -231,6 +231,40 @@ void flush_signals(struct task_struct *t)
spin_unlock_irqrestore(&t->sighand->siglock, flags);
}
+static void __flush_itimer_signals(struct sigpending *pending)
+{
+ sigset_t signal, retain;
+ struct sigqueue *q, *n;
+
+ signal = pending->signal;
+ sigemptyset(&retain);
+
+ list_for_each_entry_safe(q, n, &pending->list, list) {
+ int sig = q->info.si_signo;
+
+ if (likely(q->info.si_code != SI_TIMER)) {
+ sigaddset(&retain, sig);
+ } else {
+ sigdelset(&signal, sig);
+ list_del_init(&q->list);
+ __sigqueue_free(q);
+ }
+ }
+
+ sigorsets(&pending->signal, &signal, &retain);
+}
+
+void flush_itimer_signals(void)
+{
+ struct task_struct *tsk = current;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tsk->sighand->siglock, flags);
+ __flush_itimer_signals(&tsk->pending);
+ __flush_itimer_signals(&tsk->signal->shared_pending);
+ spin_unlock_irqrestore(&tsk->sighand->siglock, flags);
+}
+
void ignore_signals(struct task_struct *t)
{
int i;
@@ -1240,17 +1274,22 @@ void sigqueue_free(struct sigqueue *q)
BUG_ON(!(q->flags & SIGQUEUE_PREALLOC));
/*
- * If the signal is still pending remove it from the
- * pending queue. We must hold ->siglock while testing
- * q->list to serialize with collect_signal().
+ * We must hold ->siglock while testing q->list
+ * to serialize with collect_signal() or with
+ * __exit_signal()->flush_sigqueue().
*/
spin_lock_irqsave(lock, flags);
+ q->flags &= ~SIGQUEUE_PREALLOC;
+ /*
+ * If it is queued it will be freed when dequeued,
+ * like the "regular" sigqueue.
+ */
if (!list_empty(&q->list))
- list_del_init(&q->list);
+ q = NULL;
spin_unlock_irqrestore(lock, flags);
- q->flags &= ~SIGQUEUE_PREALLOC;
- __sigqueue_free(q);
+ if (q)
+ __sigqueue_free(q);
}
int send_sigqueue(struct sigqueue *q, struct task_struct *t, int group)
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 0101aee..b7350bb 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -62,8 +62,7 @@ static int stopmachine(void *cpu)
* help our sisters onto their CPUs. */
if (!prepared && !irqs_disabled)
yield();
- else
- cpu_relax();
+ cpu_relax();
}
/* Ack: we are exiting. */
@@ -106,8 +105,10 @@ static int stop_machine(void)
}
/* Wait for them all to come to life. */
- while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads)
+ while (atomic_read(&stopmachine_thread_ack) != stopmachine_num_threads) {
yield();
+ cpu_relax();
+ }
/* If some failed, kill them all. */
if (ret < 0) {
diff --git a/kernel/sys.c b/kernel/sys.c
index 895d2d4..14e9728 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1652,7 +1652,7 @@ asmlinkage long sys_umask(int mask)
asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
- long uninitialized_var(error);
+ long error = 0;
if (security_task_prctl(option, arg2, arg3, arg4, arg5, &error))
return error;
@@ -1701,9 +1701,7 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
error = PR_TIMING_STATISTICAL;
break;
case PR_SET_TIMING:
- if (arg2 == PR_TIMING_STATISTICAL)
- error = 0;
- else
+ if (arg2 != PR_TIMING_STATISTICAL)
error = -EINVAL;
break;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index d7ffdc5..2911665 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -81,6 +81,7 @@ extern int compat_log;
extern int maps_protect;
extern int sysctl_stat_interval;
extern int latencytop_enabled;
+extern int sysctl_nr_open_min, sysctl_nr_open_max;
/* Constants used for minimum and maximum */
#if defined(CONFIG_DETECT_SOFTLOCKUP) || defined(CONFIG_HIGHMEM)
@@ -1190,7 +1191,9 @@ static struct ctl_table fs_table[] = {
.data = &sysctl_nr_open,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = &sysctl_nr_open_min,
+ .extra2 = &sysctl_nr_open_max,
},
{
.ctl_name = FS_DENTRY,