diff options
Diffstat (limited to 'kernel')
54 files changed, 237 insertions, 399 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index 37b2bea..e99dda0 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -607,7 +607,7 @@ void audit_trim_trees(void) spin_lock(&hash_lock); list_for_each_entry(node, &tree->chunks, list) { struct audit_chunk *chunk = find_chunk(node); - /* this could be NULL if the watch is dieing else where... */ + /* this could be NULL if the watch is dying else where... */ struct inode *inode = chunk->mark.i.inode; node->index |= 1U<<31; if (iterate_mounts(compare_root, inode, root_mnt)) diff --git a/kernel/auditsc.c b/kernel/auditsc.c index f49a031..b33513a 100644 --- a/kernel/auditsc.c +++ b/kernel/auditsc.c @@ -1011,7 +1011,7 @@ static int audit_log_pid_context(struct audit_context *context, pid_t pid, /* * to_send and len_sent accounting are very loose estimates. We aren't * really worried about a hard cap to MAX_EXECVE_AUDIT_LEN so much as being - * within about 500 bytes (next page boundry) + * within about 500 bytes (next page boundary) * * why snprintf? an int is up to 12 digits long. if we just assumed when * logging that a[%d]= was going to be 16 characters long we would be wasting diff --git a/kernel/cgroup.c b/kernel/cgroup.c index e31b220..25c7eb5 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -157,7 +157,7 @@ struct css_id { }; /* - * cgroup_event represents events which userspace want to recieve. + * cgroup_event represents events which userspace want to receive. */ struct cgroup_event { /* diff --git a/kernel/cpu.c b/kernel/cpu.c index c95fc4d..12b7458 100644 --- a/kernel/cpu.c +++ b/kernel/cpu.c @@ -126,7 +126,7 @@ static void cpu_hotplug_done(void) #else /* #if CONFIG_HOTPLUG_CPU */ static void cpu_hotplug_begin(void) {} static void cpu_hotplug_done(void) {} -#endif /* #esle #if CONFIG_HOTPLUG_CPU */ +#endif /* #else #if CONFIG_HOTPLUG_CPU */ /* Need to know about CPUs going up/down? */ int __ref register_cpu_notifier(struct notifier_block *nb) diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index cefd4a1..bad6786 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -538,7 +538,7 @@ return_normal: /* * For single stepping, try to only enter on the processor - * that was single stepping. To gaurd against a deadlock, the + * that was single stepping. To guard against a deadlock, the * kernel will only try for the value of sstep_tries before * giving up and continuing on. */ diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 6bc6e3b..be14779 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -441,9 +441,9 @@ static int kdb_check_regs(void) * symbol name, and offset to the caller. * * The argument may consist of a numeric value (decimal or - * hexidecimal), a symbol name, a register name (preceeded by the + * hexidecimal), a symbol name, a register name (preceded by the * percent sign), an environment variable with a numeric value - * (preceeded by a dollar sign) or a simple arithmetic expression + * (preceded by a dollar sign) or a simple arithmetic expression * consisting of a symbol name, +/-, and a numeric constant value * (offset). * Parameters: @@ -1335,7 +1335,7 @@ void kdb_print_state(const char *text, int value) * error The hardware-defined error code * reason2 kdb's current reason code. * Initially error but can change - * acording to kdb state. + * according to kdb state. * db_result Result code from break or debug point. * regs The exception frame at time of fault/breakpoint. * should always be valid. diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c index 6b2485d..5532dd3 100644 --- a/kernel/debug/kdb/kdb_support.c +++ b/kernel/debug/kdb/kdb_support.c @@ -545,7 +545,7 @@ int kdb_putword(unsigned long addr, unsigned long word, size_t size) * Mask for process state. * Notes: * The mask folds data from several sources into a single long value, so - * be carefull not to overlap the bits. TASK_* bits are in the LSB, + * be careful not to overlap the bits. TASK_* bits are in the LSB, * special cases like UNRUNNABLE are in the MSB. As of 2.6.10-rc1 there * is no overlap between TASK_* and EXIT_* but that may not always be * true, so EXIT_* bits are shifted left 16 bits before being stored in diff --git a/kernel/exit.c b/kernel/exit.c index 6a488ad..f5d2f63 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -841,7 +841,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead) /* Let father know we died * * Thread signals are configurable, but you aren't going to use - * that to send signals to arbitary processes. + * that to send signals to arbitrary processes. * That stops right now. * * If the parent exec id doesn't match the exec id we saved diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index 72606ba..c574f9a 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig @@ -10,13 +10,6 @@ menu "IRQ subsystem" config GENERIC_HARDIRQS def_bool y -# Select this to disable the deprecated stuff -config GENERIC_HARDIRQS_NO_DEPRECATED - bool - -config GENERIC_HARDIRQS_NO_COMPAT - bool - # Options selectable by the architecture code # Make sparse irq Kconfig switch below available diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c index 394784c..342d8f4 100644 --- a/kernel/irq/autoprobe.c +++ b/kernel/irq/autoprobe.c @@ -70,10 +70,8 @@ unsigned long probe_irq_on(void) raw_spin_lock_irq(&desc->lock); if (!desc->action && irq_settings_can_probe(desc)) { desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; - if (irq_startup(desc)) { - irq_compat_set_pending(desc); + if (irq_startup(desc)) desc->istate |= IRQS_PENDING; - } } raw_spin_unlock_irq(&desc->lock); } diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index 03099d5..4af1e2b 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c @@ -34,7 +34,6 @@ int irq_set_chip(unsigned int irq, struct irq_chip *chip) if (!chip) chip = &no_irq_chip; - irq_chip_set_defaults(chip); desc->irq_data.chip = chip; irq_put_desc_unlock(desc, flags); /* @@ -141,25 +140,21 @@ EXPORT_SYMBOL_GPL(irq_get_irq_data); static void irq_state_clr_disabled(struct irq_desc *desc) { irqd_clear(&desc->irq_data, IRQD_IRQ_DISABLED); - irq_compat_clr_disabled(desc); } static void irq_state_set_disabled(struct irq_desc *desc) { irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED); - irq_compat_set_disabled(desc); } static void irq_state_clr_masked(struct irq_desc *desc) { irqd_clear(&desc->irq_data, IRQD_IRQ_MASKED); - irq_compat_clr_masked(desc); } static void irq_state_set_masked(struct irq_desc *desc) { irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); - irq_compat_set_masked(desc); } int irq_startup(struct irq_desc *desc) @@ -209,126 +204,6 @@ void irq_disable(struct irq_desc *desc) } } -#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED -/* Temporary migration helpers */ -static void compat_irq_mask(struct irq_data *data) -{ - data->chip->mask(data->irq); -} - -static void compat_irq_unmask(struct irq_data *data) -{ - data->chip->unmask(data->irq); -} - -static void compat_irq_ack(struct irq_data *data) -{ - data->chip->ack(data->irq); -} - -static void compat_irq_mask_ack(struct irq_data *data) -{ - data->chip->mask_ack(data->irq); -} - -static void compat_irq_eoi(struct irq_data *data) -{ - data->chip->eoi(data->irq); -} - -static void compat_irq_enable(struct irq_data *data) -{ - data->chip->enable(data->irq); -} - -static void compat_irq_disable(struct irq_data *data) -{ - data->chip->disable(data->irq); -} - -static void compat_irq_shutdown(struct irq_data *data) -{ - data->chip->shutdown(data->irq); -} - -static unsigned int compat_irq_startup(struct irq_data *data) -{ - return data->chip->startup(data->irq); -} - -static int compat_irq_set_affinity(struct irq_data *data, - const struct cpumask *dest, bool force) -{ - return data->chip->set_affinity(data->irq, dest); -} - -static int compat_irq_set_type(struct irq_data *data, unsigned int type) -{ - return data->chip->set_type(data->irq, type); -} - -static int compat_irq_set_wake(struct irq_data *data, unsigned int on) -{ - return data->chip->set_wake(data->irq, on); -} - -static int compat_irq_retrigger(struct irq_data *data) -{ - return data->chip->retrigger(data->irq); -} - -static void compat_bus_lock(struct irq_data *data) -{ - data->chip->bus_lock(data->irq); -} - -static void compat_bus_sync_unlock(struct irq_data *data) -{ - data->chip->bus_sync_unlock(data->irq); -} -#endif - -/* - * Fixup enable/disable function pointers - */ -void irq_chip_set_defaults(struct irq_chip *chip) -{ -#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED - if (chip->enable) - chip->irq_enable = compat_irq_enable; - if (chip->disable) - chip->irq_disable = compat_irq_disable; - if (chip->shutdown) - chip->irq_shutdown = compat_irq_shutdown; - if (chip->startup) - chip->irq_startup = compat_irq_startup; - if (!chip->end) - chip->end = dummy_irq_chip.end; - if (chip->bus_lock) - chip->irq_bus_lock = compat_bus_lock; - if (chip->bus_sync_unlock) - chip->irq_bus_sync_unlock = compat_bus_sync_unlock; - if (chip->mask) - chip->irq_mask = compat_irq_mask; - if (chip->unmask) - chip->irq_unmask = compat_irq_unmask; - if (chip->ack) - chip->irq_ack = compat_irq_ack; - if (chip->mask_ack) - chip->irq_mask_ack = compat_irq_mask_ack; - if (chip->eoi) - chip->irq_eoi = compat_irq_eoi; - if (chip->set_affinity) - chip->irq_set_affinity = compat_irq_set_affinity; - if (chip->set_type) - chip->irq_set_type = compat_irq_set_type; - if (chip->set_wake) - chip->irq_set_wake = compat_irq_set_wake; - if (chip->retrigger) - chip->irq_retrigger = compat_irq_retrigger; -#endif -} - static inline void mask_ack_irq(struct irq_desc *desc) { if (desc->irq_data.chip->irq_mask_ack) @@ -381,7 +256,6 @@ void handle_nested_irq(unsigned int irq) if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) goto out_unlock; - irq_compat_set_progress(desc); irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); raw_spin_unlock_irq(&desc->lock); @@ -391,7 +265,6 @@ void handle_nested_irq(unsigned int irq) raw_spin_lock_irq(&desc->lock); irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); - irq_compat_clr_progress(desc); out_unlock: raw_spin_unlock_irq(&desc->lock); @@ -514,7 +387,6 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) * then mask it and get out of here: */ if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) { - irq_compat_set_pending(desc); desc->istate |= IRQS_PENDING; mask_irq(desc); goto out; @@ -543,7 +415,7 @@ out: * @desc: the interrupt description structure for this irq * * Interrupt occures on the falling and/or rising edge of a hardware - * signal. The occurence is latched into the irq controller hardware + * signal. The occurrence is latched into the irq controller hardware * and must be acked in order to be reenabled. After the ack another * interrupt can happen on the same source even before the first one * is handled by the associated event handler. If this happens it @@ -567,7 +439,6 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc) if (unlikely(irqd_irq_disabled(&desc->irq_data) || irqd_irq_inprogress(&desc->irq_data) || !desc->action)) { if (!irq_check_poll(desc)) { - irq_compat_set_pending(desc); desc->istate |= IRQS_PENDING; mask_ack_irq(desc); goto out_unlock; @@ -643,7 +514,7 @@ void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc) } while ((desc->istate & IRQS_PENDING) && !irqd_irq_disabled(&desc->irq_data)); -out_unlock: +out_eoi: chip->irq_eoi(&desc->irq_data); raw_spin_unlock(&desc->lock); } diff --git a/kernel/irq/compat.h b/kernel/irq/compat.h deleted file mode 100644 index 6bbaf66..0000000 --- a/kernel/irq/compat.h +++ /dev/null @@ -1,72 +0,0 @@ -/* - * Compat layer for transition period - */ -#ifndef CONFIG_GENERIC_HARDIRQS_NO_COMPAT -static inline void irq_compat_set_progress(struct irq_desc *desc) -{ - desc->status |= IRQ_INPROGRESS; -} - -static inline void irq_compat_clr_progress(struct irq_desc *desc) -{ - desc->status &= ~IRQ_INPROGRESS; -} -static inline void irq_compat_set_disabled(struct irq_desc *desc) -{ - desc->status |= IRQ_DISABLED; -} -static inline void irq_compat_clr_disabled(struct irq_desc *desc) -{ - desc->status &= ~IRQ_DISABLED; -} -static inline void irq_compat_set_pending(struct irq_desc *desc) -{ - desc->status |= IRQ_PENDING; -} - -static inline void irq_compat_clr_pending(struct irq_desc *desc) -{ - desc->status &= ~IRQ_PENDING; -} -static inline void irq_compat_set_masked(struct irq_desc *desc) -{ - desc->status |= IRQ_MASKED; -} - -static inline void irq_compat_clr_masked(struct irq_desc *desc) -{ - desc->status &= ~IRQ_MASKED; -} -static inline void irq_compat_set_move_pending(struct irq_desc *desc) -{ - desc->status |= IRQ_MOVE_PENDING; -} - -static inline void irq_compat_clr_move_pending(struct irq_desc *desc) -{ - desc->status &= ~IRQ_MOVE_PENDING; -} -static inline void irq_compat_set_affinity(struct irq_desc *desc) -{ - desc->status |= IRQ_AFFINITY_SET; -} - -static inline void irq_compat_clr_affinity(struct irq_desc *desc) -{ - desc->status &= ~IRQ_AFFINITY_SET; -} -#else -static inline void irq_compat_set_progress(struct irq_desc *desc) { } -static inline void irq_compat_clr_progress(struct irq_desc *desc) { } -static inline void irq_compat_set_disabled(struct irq_desc *desc) { } -static inline void irq_compat_clr_disabled(struct irq_desc *desc) { } -static inline void irq_compat_set_pending(struct irq_desc *desc) { } -static inline void irq_compat_clr_pending(struct irq_desc *desc) { } -static inline void irq_compat_set_masked(struct irq_desc *desc) { } -static inline void irq_compat_clr_masked(struct irq_desc *desc) { } -static inline void irq_compat_set_move_pending(struct irq_desc *desc) { } -static inline void irq_compat_clr_move_pending(struct irq_desc *desc) { } -static inline void irq_compat_set_affinity(struct irq_desc *desc) { } -static inline void irq_compat_clr_affinity(struct irq_desc *desc) { } -#endif - diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h index a0bd875..306cba3 100644 --- a/kernel/irq/debug.h +++ b/kernel/irq/debug.h @@ -4,7 +4,7 @@ #include <linux/kallsyms.h> -#define P(f) if (desc->status & f) printk("%14s set\n", #f) +#define P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f) #define PS(f) if (desc->istate & f) printk("%14s set\n", #f) /* FIXME */ #define PD(f) do { } while (0) diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c index 20dc547..b5fcd96 100644 --- a/kernel/irq/dummychip.c +++ b/kernel/irq/dummychip.c @@ -31,13 +31,6 @@ static unsigned int noop_ret(struct irq_data *data) return 0; } -#ifndef CONFIG_GENERIC_HARDIRQS_NO_DEPRECATED -static void compat_noop(unsigned int irq) { } -#define END_INIT .end = compat_noop -#else -#define END_INIT -#endif - /* * Generic no controller implementation */ @@ -48,7 +41,6 @@ struct irq_chip no_irq_chip = { .irq_enable = noop, .irq_disable = noop, .irq_ack = ack_bad, - END_INIT }; /* @@ -64,5 +56,4 @@ struct irq_chip dummy_irq_chip = { .irq_ack = noop, .irq_mask = noop, .irq_unmask = noop, - END_INIT }; diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 1a2fb77..90cb55f 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c @@ -175,9 +175,7 @@ irqreturn_t handle_irq_event(struct irq_desc *desc) struct irqaction *action = desc->action; irqreturn_t ret; - irq_compat_clr_pending(desc); desc->istate &= ~IRQS_PENDING; - irq_compat_set_progress(desc); irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); raw_spin_unlock(&desc->lock); @@ -185,6 +183,5 @@ irqreturn_t handle_irq_event(struct irq_desc *desc) raw_spin_lock(&desc->lock); irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS); - irq_compat_clr_progress(desc); return ret; } diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index 6b8b971..6546431 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h @@ -15,10 +15,6 @@ #define istate core_internal_state__do_not_mess_with_it -#ifdef CONFIG_GENERIC_HARDIRQS_NO_COMPAT -# define status status_use_accessors -#endif - extern int noirqdebug; /* @@ -61,15 +57,11 @@ enum { IRQS_SUSPENDED = 0x00000800, }; -#include "compat.h" #include "debug.h" #include "settings.h" #define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data) -/* Set default functions for irq_chip structures: */ -extern void irq_chip_set_defaults(struct irq_chip *chip); - extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, unsigned long flags); extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); @@ -156,13 +148,11 @@ irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags) static inline void irqd_set_move_pending(struct irq_data *d) { d->state_use_accessors |= IRQD_SETAFFINITY_PENDING; - irq_compat_set_move_pending(irq_data_to_desc(d)); } static inline void irqd_clr_move_pending(struct irq_data *d) { d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING; - irq_compat_clr_move_pending(irq_data_to_desc(d)); } static inline void irqd_clear(struct irq_data *d, unsigned int mask) diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index acf5407..07c1611 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -132,7 +132,7 @@ irq_get_pending(struct cpumask *mask, struct irq_desc *desc) } #else static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; } -static inline bool irq_move_pending(struct irq_desc *data) { return false; } +static inline bool irq_move_pending(struct irq_data *data) { return false; } static inline void irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { } static inline void @@ -166,7 +166,6 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) kref_get(&desc->affinity_notify->kref); schedule_work(&desc->affinity_notify->work); } - irq_compat_set_affinity(desc); irqd_set(data, IRQD_AFFINITY_SET); return ret; @@ -297,10 +296,8 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) if (cpumask_intersects(desc->irq_data.affinity, cpu_online_mask)) set = desc->irq_data.affinity; - else { - irq_compat_clr_affinity(desc); + else irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); - } } cpumask_and(mask, cpu_online_mask, set); @@ -587,8 +584,6 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, irqd_set(&desc->irq_data, IRQD_LEVEL); } - if (chip != desc->irq_data.chip) - irq_chip_set_defaults(desc->irq_data.chip); ret = 0; break; default: @@ -785,7 +780,6 @@ static int irq_thread(void *data) * but AFAICT IRQS_PENDING should be fine as it * retriggers the interrupt itself --- tglx */ - irq_compat_set_pending(desc); desc->istate |= IRQS_PENDING; raw_spin_unlock_irq(&desc->lock); } else { @@ -981,8 +975,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) new->thread_mask = 1 << ffz(thread_mask); if (!shared) { - irq_chip_set_defaults(desc->irq_data.chip); - init_waitqueue_head(&desc->wait_for_threads); /* Setup the type (level, edge polarity) if configured: */ @@ -1059,6 +1051,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) register_irq_proc(irq, desc); new->dir = NULL; register_handler_proc(irq, new); + free_cpumask_var(mask); return 0; diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c index e33d9c8..4742090 100644 --- a/kernel/irq/migration.c +++ b/kernel/irq/migration.c @@ -35,7 +35,7 @@ void irq_move_masked_irq(struct irq_data *idata) * do the disable, re-program, enable sequence. * This is *not* particularly important for level triggered * but in a edge trigger case, we might be setting rte - * when an active trigger is comming in. This could + * when an active trigger is coming in. This could * cause some ioapics to mal-function. * Being paranoid i guess! * @@ -53,11 +53,6 @@ void irq_move_masked_irq(struct irq_data *idata) cpumask_clear(desc->pending_mask); } -void move_masked_irq(int irq) -{ - irq_move_masked_irq(irq_get_irq_data(irq)); -} - void irq_move_irq(struct irq_data *idata) { bool masked; @@ -80,8 +75,3 @@ void irq_move_irq(struct irq_data *idata) if (!masked) idata->chip->irq_unmask(idata); } - -void move_native_irq(int irq) -{ - irq_move_irq(irq_get_irq_data(irq)); -} diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 626d092..dd201bd 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c @@ -364,6 +364,10 @@ int __weak arch_show_interrupts(struct seq_file *p, int prec) return 0; } +#ifndef ACTUAL_NR_IRQS +# define ACTUAL_NR_IRQS nr_irqs +#endif + int show_interrupts(struct seq_file *p, void *v) { static int prec; @@ -373,10 +377,10 @@ int show_interrupts(struct seq_file *p, void *v) struct irqaction *action; struct irq_desc *desc; - if (i > nr_irqs) + if (i > ACTUAL_NR_IRQS) return 0; - if (i == nr_irqs) + if (i == ACTUAL_NR_IRQS) return arch_show_interrupts(p, prec); /* print header and calculate the width of the first column */ diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c index ad683a9..14dd576 100644 --- a/kernel/irq/resend.c +++ b/kernel/irq/resend.c @@ -65,7 +65,6 @@ void check_irq_resend(struct irq_desc *desc, unsigned int irq) if (desc->istate & IRQS_REPLAY) return; if (desc->istate & IRQS_PENDING) { - irq_compat_clr_pending(desc); desc->istate &= ~IRQS_PENDING; desc->istate |= IRQS_REPLAY; diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h index 0227ad3..0d91730 100644 --- a/kernel/irq/settings.h +++ b/kernel/irq/settings.h @@ -15,17 +15,8 @@ enum { _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, }; -#define IRQ_INPROGRESS GOT_YOU_MORON -#define IRQ_REPLAY GOT_YOU_MORON -#define IRQ_WAITING GOT_YOU_MORON -#define IRQ_DISABLED GOT_YOU_MORON -#define IRQ_PENDING GOT_YOU_MORON -#define IRQ_MASKED GOT_YOU_MORON -#define IRQ_WAKEUP GOT_YOU_MORON -#define IRQ_MOVE_PENDING GOT_YOU_MORON #define IRQ_PER_CPU GOT_YOU_MORON #define IRQ_NO_BALANCING GOT_YOU_MORON -#define IRQ_AFFINITY_SET GOT_YOU_MORON #define IRQ_LEVEL GOT_YOU_MORON #define IRQ_NOPROBE GOT_YOU_MORON #define IRQ_NOREQUEST GOT_YOU_MORON @@ -37,102 +28,98 @@ enum { static inline void irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set) { - desc->status &= ~(clr & _IRQF_MODIFY_MASK); - desc->status |= (set & _IRQF_MODIFY_MASK); + desc->status_use_accessors &= ~(clr & _IRQF_MODIFY_MASK); + desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK); } static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) { - return desc->status & _IRQ_PER_CPU; + return desc->status_use_accessors & _IRQ_PER_CPU; } static inline void irq_settings_set_per_cpu(struct irq_desc *desc) { - desc->status |= _IRQ_PER_CPU; + desc->status_use_accessors |= _IRQ_PER_CPU; } static inline void irq_settings_set_no_balancing(struct irq_desc *desc) { - desc->status |= _IRQ_NO_BALANCING; + desc->status_use_accessors |= _IRQ_NO_BALANCING; } static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc) { - return desc->status & _IRQ_NO_BALANCING; + return desc->status_use_accessors & _IRQ_NO_BALANCING; } static inline u32 irq_settings_get_trigger_mask(struct irq_desc *desc) { - return desc->status & IRQ_TYPE_SENSE_MASK; + return desc->status_use_accessors & IRQ_TYPE_SENSE_MASK; } static inline void irq_settings_set_trigger_mask(struct irq_desc *desc, u32 mask) { - desc->status &= ~IRQ_TYPE_SENSE_MASK; - desc->status |= mask & IRQ_TYPE_SENSE_MASK; + desc->status_use_accessors &= ~IRQ_TYPE_SENSE_MASK; + desc->status_use_accessors |= mask & IRQ_TYPE_SENSE_MASK; } static inline bool irq_settings_is_level(struct irq_desc *desc) { - return desc->status & _IRQ_LEVEL; + return desc->status_use_accessors & _IRQ_LEVEL; } static inline void irq_settings_clr_level(struct irq_desc *desc) { - desc->status &= ~_IRQ_LEVEL; + desc->status_use_accessors &= ~_IRQ_LEVEL; } static inline void irq_settings_set_level(struct irq_desc *desc) { - desc->status |= _IRQ_LEVEL; + desc->status_use_accessors |= _IRQ_LEVEL; } static inline bool irq_settings_can_request(struct irq_desc *desc) { - return !(desc->status & _IRQ_NOREQUEST); + return !(desc->status_use_accessors & _IRQ_NOREQUEST); } static inline void irq_settings_clr_norequest(struct irq_desc *desc) { - desc->status &= ~_IRQ_NOREQUEST; + desc->status_use_accessors &= ~_IRQ_NOREQUEST; } static inline void irq_settings_set_norequest(struct irq_desc *desc) { - desc->status |= _IRQ_NOREQUEST; + desc->status_use_accessors |= _IRQ_NOREQUEST; } static inline bool irq_settings_can_probe(struct irq_desc *desc) { - return !(desc->status & _IRQ_NOPROBE); + return !(desc->status_use_accessors & _IRQ_NOPROBE); } static inline void irq_settings_clr_noprobe(struct irq_desc *desc) { - desc->status &= ~_IRQ_NOPROBE; + desc->status_use_accessors &= ~_IRQ_NOPROBE; } static inline void irq_settings_set_noprobe(struct irq_desc *desc) { - desc->status |= _IRQ_NOPROBE; + desc->status_use_accessors |= _IRQ_NOPROBE; } static inline bool irq_settings_can_move_pcntxt(struct irq_desc *desc) { - return desc->status & _IRQ_MOVE_PCNTXT; + return desc->status_use_accessors & _IRQ_MOVE_PCNTXT; } static inline bool irq_settings_can_autoenable(struct irq_desc *desc) { - return !(desc->status & _IRQ_NOAUTOEN); + return !(desc->status_use_accessors & _IRQ_NOAUTOEN); } static inline bool irq_settings_is_nested_thread(struct irq_desc *desc) { - return desc->status & _IRQ_NESTED_THREAD; + return desc->status_use_accessors & _IRQ_NESTED_THREAD; } - -/* Nothing should touch desc->status from now on */ -#undef status -#define status USE_THE_PROPER_WRAPPERS_YOU_MORON diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 83f4799..dfbd550 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c @@ -93,7 +93,6 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) * Already running: If it is shared get the other * CPU to go looking for our mystery interrupt too */ - irq_compat_set_pending(desc); desc->istate |= IRQS_PENDING; goto out; } diff --git a/kernel/kexec.c b/kernel/kexec.c index ec19b92..55936f9 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -144,7 +144,7 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry, /* Initialize the list of destination pages */ INIT_LIST_HEAD(&image->dest_pages); - /* Initialize the list of unuseable pages */ + /* Initialize the list of unusable pages */ INIT_LIST_HEAD(&image->unuseable_pages); /* Read in the segments */ @@ -454,7 +454,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image, /* Deal with the destination pages I have inadvertently allocated. * * Ideally I would convert multi-page allocations into single - * page allocations, and add everyting to image->dest_pages. + * page allocations, and add everything to image->dest_pages. * * For now it is simpler to just free the pages. */ @@ -602,7 +602,7 @@ static void kimage_free_extra_pages(struct kimage *image) /* Walk through and free any extra destination pages I may have */ kimage_free_page_list(&image->dest_pages); - /* Walk through and free any unuseable pages I have cached */ + /* Walk through and free any unusable pages I have cached */ kimage_free_page_list(&image->unuseable_pages); } @@ -1099,7 +1099,8 @@ size_t crash_get_memory_size(void) return size; } -static void free_reserved_phys_range(unsigned long begin, unsigned long end) +void __weak crash_free_reserved_phys_range(unsigned long begin, + unsigned long end) { unsigned long addr; @@ -1135,7 +1136,7 @@ int crash_shrink_memory(unsigned long new_size) start = roundup(start, PAGE_SIZE); end = roundup(start + new_size, PAGE_SIZE); - free_reserved_phys_range(end, crashk_res.end); + crash_free_reserved_phys_range(end, crashk_res.end); if ((start == end) && (crashk_res.parent != NULL)) release_resource(&crashk_res); diff --git a/kernel/kthread.c b/kernel/kthread.c index 684ab3f..3b34d27 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -139,7 +139,7 @@ static void create_kthread(struct kthread_create_info *create) * in @node, to get NUMA affinity for kthread stack, or else give -1. * When woken, the thread will run @threadfn() with @data as its * argument. @threadfn() can either call do_exit() directly if it is a - * standalone thread for which noone will call kthread_stop(), or + * standalone thread for which no one will call kthread_stop(), or * return when 'kthread_should_stop()' is true (which means * kthread_stop() has been called). The return value should be zero * or a negative error number; it will be passed to kthread_stop(). diff --git a/kernel/latencytop.c b/kernel/latencytop.c index ee74b35..376066e 100644 --- a/kernel/latencytop.c +++ b/kernel/latencytop.c @@ -153,7 +153,7 @@ static inline void store_stacktrace(struct task_struct *tsk, } /** - * __account_scheduler_latency - record an occured latency + * __account_scheduler_latency - record an occurred latency * @tsk - the task struct of the task hitting the latency * @usecs - the duration of the latency in microseconds * @inter - 1 if the sleep was interruptible, 0 if uninterruptible diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 0d2058d..53a6895 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -2309,7 +2309,7 @@ void trace_hardirqs_on_caller(unsigned long ip) if (unlikely(curr->hardirqs_enabled)) { /* * Neither irq nor preemption are disabled here - * so this is racy by nature but loosing one hit + * so this is racy by nature but losing one hit * in a stat is not a big deal. */ __debug_atomic_inc(redundant_hardirqs_on); @@ -2620,7 +2620,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, if (!graph_lock()) return 0; /* - * Make sure we didnt race: + * Make sure we didn't race: */ if (unlikely(hlock_class(this)->usage_mask & new_mask)) { graph_unlock(); diff --git a/kernel/module.c b/kernel/module.c index 1f9f7bc..d5938a5 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -809,7 +809,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user, wait_for_zero_refcount(mod); mutex_unlock(&module_mutex); - /* Final destruction now noone is using it. */ + /* Final destruction now no one is using it. */ if (mod->exit != NULL) mod->exit(); blocking_notifier_call_chain(&module_notify_list, @@ -2777,7 +2777,7 @@ static struct module *load_module(void __user *umod, mod->state = MODULE_STATE_COMING; /* Now sew it into the lists so we can get lockdep and oops - * info during argument parsing. Noone should access us, since + * info during argument parsing. No one should access us, since * strong_try_module_get() will fail. * lockdep/oops can run asynchronous, so use the RCU list insertion * function to insert in a way safe to concurrent readers. @@ -2971,7 +2971,7 @@ static const char *get_ksymbol(struct module *mod, else nextval = (unsigned long)mod->module_core+mod->core_text_size; - /* Scan for closest preceeding symbol, and next symbol. (ELF + /* Scan for closest preceding symbol, and next symbol. (ELF starts real symbols at 1). */ for (i = 1; i < mod->num_symtab; i++) { if (mod->symtab[i].st_shndx == SHN_UNDEF) diff --git a/kernel/mutex.c b/kernel/mutex.c index a5889fb..c4195fa 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -245,7 +245,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, } __set_task_state(task, state); - /* didnt get the lock, go to sleep: */ + /* didn't get the lock, go to sleep: */ spin_unlock_mutex(&lock->wait_lock, flags); preempt_enable_no_resched(); schedule(); diff --git a/kernel/padata.c b/kernel/padata.c index 7510194..b91941d 100644 --- a/kernel/padata.c +++ b/kernel/padata.c @@ -262,7 +262,7 @@ static void padata_reorder(struct parallel_data *pd) /* * This cpu has to do the parallel processing of the next * object. It's waiting in the cpu's parallelization queue, - * so exit imediately. + * so exit immediately. */ if (PTR_ERR(padata) == -ENODATA) { del_timer(&pd->timer); @@ -284,7 +284,7 @@ static void padata_reorder(struct parallel_data *pd) /* * The next object that needs serialization might have arrived to * the reorder queues in the meantime, we will be called again - * from the timer function if noone else cares for it. + * from the timer function if no one else cares for it. */ if (atomic_read(&pd->reorder_objects) && !(pinst->flags & PADATA_RESET)) @@ -515,7 +515,7 @@ static void __padata_stop(struct padata_instance *pinst) put_online_cpus(); } -/* Replace the internal control stucture with a new one. */ +/* Replace the internal control structure with a new one. */ static void padata_replace(struct padata_instance *pinst, struct parallel_data *pd_new) { @@ -768,7 +768,7 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu) } /** - * padata_remove_cpu - remove a cpu from the one or both(serial and paralell) + * padata_remove_cpu - remove a cpu from the one or both(serial and parallel) * padata cpumasks. * * @pinst: padata instance diff --git a/kernel/params.c b/kernel/params.c index 0da1411..7ab388a 100644 --- a/kernel/params.c +++ b/kernel/params.c @@ -95,7 +95,7 @@ static int parse_one(char *param, /* Find parameter */ for (i = 0; i < num_params; i++) { if (parameq(param, params[i].name)) { - /* Noone handled NULL, so do it here. */ + /* No one handled NULL, so do it here. */ if (!val && params[i].ops->set != param_set_bool) return -EINVAL; DEBUGP("They are equal! Calling %p\n", diff --git a/kernel/perf_event.c b/kernel/perf_event.c index c75925c..27960f1 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c @@ -145,8 +145,8 @@ static struct srcu_struct pmus_srcu; */ int sysctl_perf_event_paranoid __read_mostly = 1; -/* Minimum for 128 pages + 1 for the user control page */ -int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */ +/* Minimum for 512 kiB + 1 user control page */ +int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */ /* * max perf event sample rate @@ -6531,6 +6531,11 @@ SYSCALL_DEFINE5(perf_event_open, goto err_alloc; } + if (task) { + put_task_struct(task); + task = NULL; + } + /* * Look up the group leader (we will attach this event to it): */ diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 67fea9d..0791b13 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c @@ -1347,7 +1347,7 @@ void run_posix_cpu_timers(struct task_struct *tsk) /* * Now that all the timers on our list have the firing flag, - * noone will touch their list entries but us. We'll take + * no one will touch their list entries but us. We'll take * each timer's lock before clearing its firing flag, so no * timer call will interfere. */ diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c index 4c01249..e5498d7 100644 --- a/kernel/posix-timers.c +++ b/kernel/posix-timers.c @@ -313,7 +313,7 @@ static void schedule_next_timer(struct k_itimer *timr) * restarted (i.e. we have flagged this in the sys_private entry of the * info block). * - * To protect aginst the timer going away while the interrupt is queued, + * To protect against the timer going away while the interrupt is queued, * we require that the it_requeue_pending flag be set. */ void do_schedule_next_timer(struct siginfo *info) diff --git a/kernel/power/main.c b/kernel/power/main.c index 8eaba5f..de9aef8 100644 --- a/kernel/power/main.c +++ b/kernel/power/main.c @@ -224,7 +224,7 @@ power_attr(state); * writing to 'state'. It first should read from 'wakeup_count' and store * the read value. Then, after carrying out its own preparations for the system * transition to a sleep state, it should write the stored value to - * 'wakeup_count'. If that fails, at least one wakeup event has occured since + * 'wakeup_count'. If that fails, at least one wakeup event has occurred since * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it * is allowed to write to 'state', but the transition will be aborted if there * are any wakeup events detected after 'wakeup_count' was written to. diff --git a/kernel/sched.c b/kernel/sched.c index f592ce6..4801363 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2309,7 +2309,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state) * Cause a process which is running on another CPU to enter * kernel-mode, without any delay. (to get signals handled.) * - * NOTE: this function doesnt have to take the runqueue lock, + * NOTE: this function doesn't have to take the runqueue lock, * because all it wants to ensure is that the remote task enters * the kernel. If the IPI races and the task has been migrated * to another CPU then no harm is done and the purpose has been @@ -4997,7 +4997,7 @@ recheck: */ raw_spin_lock_irqsave(&p->pi_lock, flags); /* - * To be able to change p->policy safely, the apropriate + * To be able to change p->policy safely, the appropriate * runqueue lock must be held. */ rq = __task_rq_lock(p); @@ -5011,6 +5011,17 @@ recheck: return -EINVAL; } + /* + * If not changing anything there's no need to proceed further: + */ + if (unlikely(policy == p->policy && (!rt_policy(policy) || + param->sched_priority == p->rt_priority))) { + + __task_rq_unlock(rq); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); + return 0; + } + #ifdef CONFIG_RT_GROUP_SCHED if (user) { /* @@ -5705,7 +5716,7 @@ void show_state_filter(unsigned long state_filter) do_each_thread(g, p) { /* * reset the NMI-timeout, listing all files on a slow - * console might take alot of time: + * console might take a lot of time: */ touch_nmi_watchdog(); if (!state_filter || (p->state & state_filter)) @@ -6320,6 +6331,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) break; #endif } + + update_max_interval(); + return NOTIFY_OK; } diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c index 5946ac5..429242f 100644 --- a/kernel/sched_autogroup.c +++ b/kernel/sched_autogroup.c @@ -179,7 +179,7 @@ void sched_autogroup_create_attach(struct task_struct *p) struct autogroup *ag = autogroup_create(); autogroup_move_group(p, ag); - /* drop extra refrence added by autogroup_create() */ + /* drop extra reference added by autogroup_create() */ autogroup_kref_put(ag); } EXPORT_SYMBOL(sched_autogroup_create_attach); diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 3f7ec9e..7f00772 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -22,6 +22,7 @@ #include <linux/latencytop.h> #include <linux/sched.h> +#include <linux/cpumask.h> /* * Targeted preemption latency for CPU-bound tasks: @@ -3061,7 +3062,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu, /* * if *imbalance is less than the average load per runnable task - * there is no gaurantee that any tasks will be moved so we'll have + * there is no guarantee that any tasks will be moved so we'll have * a think about bumping its value to force at least one task to be * moved */ @@ -3819,6 +3820,17 @@ void select_nohz_load_balancer(int stop_tick) static DEFINE_SPINLOCK(balancing); +static unsigned long __read_mostly max_load_balance_interval = HZ/10; + +/* + * Scale the max load_balance interval with the number of CPUs in the system. + * This trades load-balance latency on larger machines for less cross talk. + */ +static void update_max_interval(void) +{ + max_load_balance_interval = HZ*num_online_cpus()/10; +} + /* * It checks each scheduling domain to see if it is due to be balanced, * and initiates a balancing operation if so. @@ -3848,10 +3860,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle) /* scale ms to jiffies */ interval = msecs_to_jiffies(interval); - if (unlikely(!interval)) - interval = 1; - if (interval > HZ*NR_CPUS/10) - interval = HZ*NR_CPUS/10; + interval = clamp(interval, 1UL, max_load_balance_interval); need_serialize = sd->flags & SD_SERIALIZE; diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index db308cb..e7cebdc 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1378,7 +1378,7 @@ retry: task = pick_next_pushable_task(rq); if (task_cpu(next_task) == rq->cpu && task == next_task) { /* - * If we get here, the task hasnt moved at all, but + * If we get here, the task hasn't moved at all, but * it has failed to push. We will not try again, * since the other cpus will pull from us when they * are ready. @@ -1488,7 +1488,7 @@ static int pull_rt_task(struct rq *this_rq) /* * We continue with the search, just in * case there's an even higher prio task - * in another runqueue. (low likelyhood + * in another runqueue. (low likelihood * but possible) */ } diff --git a/kernel/signal.c b/kernel/signal.c index 1186cf7..29e233f 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -226,7 +226,7 @@ static inline void print_dropped_signal(int sig) /* * allocate a new signal queue record * - this may be called without locks if and only if t == current, otherwise an - * appopriate lock must be held to stop the target task from exiting + * appropriate lock must be held to stop the target task from exiting */ static struct sigqueue * __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) @@ -375,15 +375,15 @@ int unhandled_signal(struct task_struct *tsk, int sig) return !tracehook_consider_fatal_signal(tsk, sig); } - -/* Notify the system that a driver wants to block all signals for this +/* + * Notify the system that a driver wants to block all signals for this * process, and wants to be notified if any signals at all were to be * sent/acted upon. If the notifier routine returns non-zero, then the * signal will be acted upon after all. If the notifier routine returns 0, * then then signal will be blocked. Only one block per process is * allowed. priv is a pointer to private data that the notifier routine - * can use to determine if the signal should be blocked or not. */ - + * can use to determine if the signal should be blocked or not. + */ void block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) { @@ -434,9 +434,10 @@ still_pending: copy_siginfo(info, &first->info); __sigqueue_free(first); } else { - /* Ok, it wasn't in the queue. This must be - a fast-pathed signal or we must have been - out of queue space. So zero out the info. + /* + * Ok, it wasn't in the queue. This must be + * a fast-pathed signal or we must have been + * out of queue space. So zero out the info. */ info->si_signo = sig; info->si_errno = 0; @@ -468,7 +469,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, } /* - * Dequeue a signal and return the element to the caller, which is + * Dequeue a signal and return the element to the caller, which is * expected to free it. * * All callers have to hold the siglock. @@ -490,7 +491,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info) * itimers are process shared and we restart periodic * itimers in the signal delivery path to prevent DoS * attacks in the high resolution timer case. This is - * compliant with the old way of self restarting + * compliant with the old way of self-restarting * itimers, as the SIGALRM is a legacy signal and only * queued once. Changing the restart behaviour to * restart the timer in the signal dequeue path is @@ -923,14 +924,15 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t, if (info == SEND_SIG_FORCED) goto out_set; - /* Real-time signals must be queued if sent by sigqueue, or - some other real-time mechanism. It is implementation - defined whether kill() does so. We attempt to do so, on - the principle of least surprise, but since kill is not - allowed to fail with EAGAIN when low on memory we just - make sure at least one signal gets delivered and don't - pass on the info struct. */ - + /* + * Real-time signals must be queued if sent by sigqueue, or + * some other real-time mechanism. It is implementation + * defined whether kill() does so. We attempt to do so, on + * the principle of least surprise, but since kill is not + * allowed to fail with EAGAIN when low on memory we just + * make sure at least one signal gets delivered and don't + * pass on the info struct. + */ if (sig < SIGRTMIN) override_rlimit = (is_si_special(info) || info->si_code >= 0); else @@ -1201,8 +1203,7 @@ retry: return error; } -int -kill_proc_info(int sig, struct siginfo *info, pid_t pid) +int kill_proc_info(int sig, struct siginfo *info, pid_t pid) { int error; rcu_read_lock(); @@ -1299,8 +1300,7 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid) * These are for backward compatibility with the rest of the kernel source. */ -int -send_sig_info(int sig, struct siginfo *info, struct task_struct *p) +int send_sig_info(int sig, struct siginfo *info, struct task_struct *p) { /* * Make sure legacy kernel users don't send in bad values @@ -1368,7 +1368,7 @@ EXPORT_SYMBOL(kill_pid); * These functions support sending signals using preallocated sigqueue * structures. This is needed "because realtime applications cannot * afford to lose notifications of asynchronous events, like timer - * expirations or I/O completions". In the case of Posix Timers + * expirations or I/O completions". In the case of POSIX Timers * we allocate the sigqueue structure from the timer_create. If this * allocation fails we are able to report the failure to the application * with an EAGAIN error. @@ -1553,7 +1553,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why) info.si_signo = SIGCHLD; info.si_errno = 0; /* - * see comment in do_notify_parent() abot the following 3 lines + * see comment in do_notify_parent() about the following 4 lines */ rcu_read_lock(); info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns); @@ -1611,7 +1611,7 @@ static inline int may_ptrace_stop(void) } /* - * Return nonzero if there is a SIGKILL that should be waking us up. + * Return non-zero if there is a SIGKILL that should be waking us up. * Called with the siglock held. */ static int sigkill_pending(struct task_struct *tsk) @@ -1735,7 +1735,7 @@ void ptrace_notify(int exit_code) /* * This performs the stopping for SIGSTOP and other stop signals. * We have to stop all threads in the thread group. - * Returns nonzero if we've actually stopped and released the siglock. + * Returns non-zero if we've actually stopped and released the siglock. * Returns zero if we didn't stop and still hold the siglock. */ static int do_signal_stop(int signr) @@ -1823,10 +1823,12 @@ static int ptrace_signal(int signr, siginfo_t *info, current->exit_code = 0; - /* Update the siginfo structure if the signal has - changed. If the debugger wanted something - specific in the siginfo structure then it should - have updated *info via PTRACE_SETSIGINFO. */ + /* + * Update the siginfo structure if the signal has + * changed. If the debugger wanted something + * specific in the siginfo structure then it should + * have updated *info via PTRACE_SETSIGINFO. + */ if (signr != info->si_signo) { info->si_signo = signr; info->si_errno = 0; @@ -1885,7 +1887,7 @@ relock: for (;;) { struct k_sigaction *ka; /* - * Tracing can induce an artifical signal and choose sigaction. + * Tracing can induce an artificial signal and choose sigaction. * The return value in @signr determines the default action, * but @info->si_signo is the signal number we will report. */ @@ -2034,7 +2036,8 @@ void exit_signals(struct task_struct *tsk) if (!signal_pending(tsk)) goto out; - /* It could be that __group_complete_signal() choose us to + /* + * It could be that __group_complete_signal() choose us to * notify about group-wide signal. Another thread should be * woken now to take the signal since we will not. */ @@ -2072,6 +2075,9 @@ EXPORT_SYMBOL(unblock_all_signals); * System call entry points. */ +/** + * sys_restart_syscall - restart a system call + */ SYSCALL_DEFINE0(restart_syscall) { struct restart_block *restart = ¤t_thread_info()->restart_block; @@ -2125,6 +2131,13 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset) return error; } +/** + * sys_rt_sigprocmask - change the list of currently blocked signals + * @how: whether to add, remove, or set signals + * @set: stores pending signals + * @oset: previous value of signal mask if non-null + * @sigsetsize: size of sigset_t type + */ SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set, sigset_t __user *, oset, size_t, sigsetsize) { @@ -2183,8 +2196,14 @@ long do_sigpending(void __user *set, unsigned long sigsetsize) out: return error; -} +} +/** + * sys_rt_sigpending - examine a pending signal that has been raised + * while blocked + * @set: stores pending signals + * @sigsetsize: size of sigset_t type or larger + */ SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize) { return do_sigpending(set, sigsetsize); @@ -2233,9 +2252,9 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) err |= __put_user(from->si_trapno, &to->si_trapno); #endif #ifdef BUS_MCEERR_AO - /* + /* * Other callers might not initialize the si_lsb field, - * so check explicitely for the right codes here. + * so check explicitly for the right codes here. */ if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); @@ -2264,6 +2283,14 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from) #endif +/** + * sys_rt_sigtimedwait - synchronously wait for queued signals specified + * in @uthese + * @uthese: queued signals to wait for + * @uinfo: if non-null, the signal's siginfo is returned here + * @uts: upper bound on process time suspension + * @sigsetsize: size of sigset_t type + */ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, siginfo_t __user *, uinfo, const struct timespec __user *, uts, size_t, sigsetsize) @@ -2280,7 +2307,7 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, if (copy_from_user(&these, uthese, sizeof(these))) return -EFAULT; - + /* * Invert the set of allowed signals to get those we * want to block. @@ -2305,9 +2332,11 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, + (ts.tv_sec || ts.tv_nsec)); if (timeout) { - /* None ready -- temporarily unblock those we're + /* + * None ready -- temporarily unblock those we're * interested while we are sleeping in so that we'll - * be awakened when they arrive. */ + * be awakened when they arrive. + */ current->real_blocked = current->blocked; sigandsets(¤t->blocked, ¤t->blocked, &these); recalc_sigpending(); @@ -2339,6 +2368,11 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, return ret; } +/** + * sys_kill - send a signal to a process + * @pid: the PID of the process + * @sig: signal to be sent + */ SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) { struct siginfo info; @@ -2414,7 +2448,11 @@ SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig) return do_tkill(tgid, pid, sig); } -/* +/** + * sys_tkill - send signal to one specific task + * @pid: the PID of the task + * @sig: signal to be sent + * * Send a signal to only one task, even if it's a CLONE_THREAD task. */ SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) @@ -2426,6 +2464,12 @@ SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) return do_tkill(0, pid, sig); } +/** + * sys_rt_sigqueueinfo - send signal information to a signal + * @pid: the PID of the thread + * @sig: signal to be sent + * @uinfo: signal info to be sent + */ SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, siginfo_t __user *, uinfo) { @@ -2553,12 +2597,11 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s error = -EINVAL; /* - * - * Note - this code used to test ss_flags incorrectly + * Note - this code used to test ss_flags incorrectly: * old code may have been written using ss_flags==0 * to mean ss_flags==SS_ONSTACK (as this was the only * way that worked) - this fix preserves that older - * mechanism + * mechanism. */ if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) goto out; @@ -2592,6 +2635,10 @@ out: #ifdef __ARCH_WANT_SYS_SIGPENDING +/** + * sys_sigpending - examine pending signals + * @set: where mask of pending signal is returned + */ SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) { return do_sigpending(set, sizeof(*set)); @@ -2600,8 +2647,15 @@ SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) #endif #ifdef __ARCH_WANT_SYS_SIGPROCMASK -/* Some platforms have their own version with special arguments others - support only sys_rt_sigprocmask. */ +/** + * sys_sigprocmask - examine and change blocked signals + * @how: whether to add, remove, or set signals + * @set: signals to add or remove (if non-null) + * @oset: previous value of signal mask if non-null + * + * Some platforms have their own version with special arguments; + * others support only sys_rt_sigprocmask. + */ SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set, old_sigset_t __user *, oset) @@ -2654,6 +2708,13 @@ out: #endif /* __ARCH_WANT_SYS_SIGPROCMASK */ #ifdef __ARCH_WANT_SYS_RT_SIGACTION +/** + * sys_rt_sigaction - alter an action taken by a process + * @sig: signal to be sent + * @act: the thread group ID of the thread + * @oact: the PID of the thread + * @sigsetsize: size of sigset_t type + */ SYSCALL_DEFINE4(rt_sigaction, int, sig, const struct sigaction __user *, act, struct sigaction __user *, oact, @@ -2740,6 +2801,12 @@ SYSCALL_DEFINE0(pause) #endif #ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND +/** + * sys_rt_sigsuspend - replace the signal mask for a value with the + * @unewset value until a signal is received + * @unewset: new signal mask value + * @sigsetsize: size of sigset_t type + */ SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) { sigset_t newset; diff --git a/kernel/softirq.c b/kernel/softirq.c index 735d870..174f976 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -567,7 +567,7 @@ static void __tasklet_hrtimer_trampoline(unsigned long data) /** * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks * @ttimer: tasklet_hrtimer which is initialized - * @function: hrtimer callback funtion which gets called from softirq context + * @function: hrtimer callback function which gets called from softirq context * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME) * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL) */ diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c index b2fa506..a470154 100644 --- a/kernel/time/jiffies.c +++ b/kernel/time/jiffies.c @@ -34,7 +34,7 @@ * inaccuracies caused by missed or lost timer * interrupts and the inability for the timer * interrupt hardware to accuratly tick at the - * requested HZ value. It is also not reccomended + * requested HZ value. It is also not recommended * for "tick-less" systems. */ #define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/ACTHZ)) diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c index 5f1bb8e..f6117a4 100644 --- a/kernel/time/ntp.c +++ b/kernel/time/ntp.c @@ -652,6 +652,8 @@ int do_adjtimex(struct timex *txc) struct timespec delta; delta.tv_sec = txc->time.tv_sec; delta.tv_nsec = txc->time.tv_usec; + if (!capable(CAP_SYS_TIME)) + return -EPERM; if (!(txc->modes & ADJ_NANO)) delta.tv_nsec *= 1000; result = timekeeping_inject_offset(&delta); diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c index 2f3b585..a5d0a3a 100644 --- a/kernel/time/timer_stats.c +++ b/kernel/time/timer_stats.c @@ -236,7 +236,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf, unsigned int timer_flag) { /* - * It doesnt matter which lock we take: + * It doesn't matter which lock we take: */ raw_spinlock_t *lock; struct entry *entry, input; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index c075f4e..ee24fa1 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1268,7 +1268,7 @@ static int ftrace_update_code(struct module *mod) p->flags = 0L; /* - * Do the initial record convertion from mcount jump + * Do the initial record conversion from mcount jump * to the NOP instructions. */ if (!ftrace_code_disable(mod, p)) { @@ -3425,7 +3425,7 @@ graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) atomic_set(&t->tracing_graph_pause, 0); atomic_set(&t->trace_overrun, 0); t->ftrace_timestamp = 0; - /* make curr_ret_stack visable before we add the ret_stack */ + /* make curr_ret_stack visible before we add the ret_stack */ smp_wmb(); t->ret_stack = ret_stack; } diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index d9c8bca..0ef7b4b 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -1478,7 +1478,7 @@ static inline unsigned long rb_page_entries(struct buffer_page *bpage) return local_read(&bpage->entries) & RB_WRITE_MASK; } -/* Size is determined by what has been commited */ +/* Size is determined by what has been committed */ static inline unsigned rb_page_size(struct buffer_page *bpage) { return rb_page_commit(bpage); @@ -2932,7 +2932,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) /* * cpu_buffer->pages just needs to point to the buffer, it * has no specific buffer page to point to. Lets move it out - * of our way so we don't accidently swap it. + * of our way so we don't accidentally swap it. */ cpu_buffer->pages = reader->list.prev; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 9541c27..d38c16a 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3239,7 +3239,7 @@ waitagain: trace_seq_init(&iter->seq); /* - * If there was nothing to send to user, inspite of consuming trace + * If there was nothing to send to user, in spite of consuming trace * entries, go back to wait for more entries. */ if (sret == -EBUSY) diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c index 685a67d..6302747 100644 --- a/kernel/trace/trace_clock.c +++ b/kernel/trace/trace_clock.c @@ -46,7 +46,7 @@ u64 notrace trace_clock_local(void) } /* - * trace_clock(): 'inbetween' trace clock. Not completely serialized, + * trace_clock(): 'between' trace clock. Not completely serialized, * but not completely incorrect when crossing CPUs either. * * This is based on cpu_clock(), which will allow at most ~1 jiffy of diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h index 1516cb3..e32744c 100644 --- a/kernel/trace/trace_entries.h +++ b/kernel/trace/trace_entries.h @@ -27,7 +27,7 @@ * in the structure. * * * for structures within structures, the format of the internal - * structure is layed out. This allows the internal structure + * structure is laid out. This allows the internal structure * to be deciphered for the format file. Although these macros * may become out of sync with the internal structure, they * will create a compile error if it happens. Since the diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 76b0598..962cdb2 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c @@ -905,7 +905,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s, * * returns 1 if * - we are inside irq code - * - we just extered irq code + * - we just entered irq code * * retunns 0 if * - funcgraph-interrupts option is set diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c index 92b6e1e..a4969b4 100644 --- a/kernel/trace/trace_irqsoff.c +++ b/kernel/trace/trace_irqsoff.c @@ -80,7 +80,7 @@ static struct tracer_flags tracer_flags = { * skip the latency if the sequence has changed - some other section * did a maximum and could disturb our measurement with serial console * printouts, etc. Truly coinciding maximum latencies should be rare - * and what happens together happens separately as well, so this doesnt + * and what happens together happens separately as well, so this doesn't * decrease the validity of the maximum found: */ static __cacheline_aligned_in_smp unsigned long max_sequence; diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c index 8435b43..35d55a3 100644 --- a/kernel/trace/trace_kprobe.c +++ b/kernel/trace/trace_kprobe.c @@ -1839,7 +1839,7 @@ static void unregister_probe_event(struct trace_probe *tp) kfree(tp->call.print_fmt); } -/* Make a debugfs interface for controling probe points */ +/* Make a debugfs interface for controlling probe points */ static __init int init_kprobe_trace(void) { struct dentry *d_tracer; diff --git a/kernel/user-return-notifier.c b/kernel/user-return-notifier.c index eb27fd3..92cb706 100644 --- a/kernel/user-return-notifier.c +++ b/kernel/user-return-notifier.c @@ -20,7 +20,7 @@ EXPORT_SYMBOL_GPL(user_return_notifier_register); /* * Removes a registered user return notifier. Must be called from atomic - * context, and from the same cpu registration occured in. + * context, and from the same cpu registration occurred in. */ void user_return_notifier_unregister(struct user_return_notifier *urn) { diff --git a/kernel/wait.c b/kernel/wait.c index b0310eb..f45ea8d 100644 --- a/kernel/wait.c +++ b/kernel/wait.c @@ -142,7 +142,7 @@ EXPORT_SYMBOL(finish_wait); * woken up through the queue. * * This prevents waiter starvation where an exclusive waiter - * aborts and is woken up concurrently and noone wakes up + * aborts and is woken up concurrently and no one wakes up * the next waiter. */ void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 04ef830..8859a41 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1291,7 +1291,7 @@ __acquires(&gcwq->lock) return true; spin_unlock_irq(&gcwq->lock); - /* CPU has come up inbetween, retry migration */ + /* CPU has come up in between, retry migration */ cpu_relax(); } } |