diff options
author | Pekka Enberg <penberg@kernel.org> | 2010-10-24 19:57:05 +0300 |
---|---|---|
committer | Pekka Enberg <penberg@kernel.org> | 2010-10-24 19:57:05 +0300 |
commit | 6d4121f6c20a0e86231d52f535f1c82423b3326f (patch) | |
tree | 5c235cac699ca86b504850aa663ddadde0455a61 /lib | |
parent | 92a5bbc11ff2442a54b2f1d313088c245828ef4e (diff) | |
parent | 35da7a307c535f9c2929cae277f3df425c9f9b1e (diff) | |
download | kernel_samsung_smdk4412-6d4121f6c20a0e86231d52f535f1c82423b3326f.zip kernel_samsung_smdk4412-6d4121f6c20a0e86231d52f535f1c82423b3326f.tar.gz kernel_samsung_smdk4412-6d4121f6c20a0e86231d52f535f1c82423b3326f.tar.bz2 |
Merge branch 'master' into for-linus
Conflicts:
include/linux/percpu.h
mm/percpu.c
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig.debug | 58 | ||||
-rw-r--r-- | lib/bug.c | 6 | ||||
-rw-r--r-- | lib/dma-debug.c | 1 | ||||
-rw-r--r-- | lib/dynamic_debug.c | 140 | ||||
-rw-r--r-- | lib/kobject.c | 39 | ||||
-rw-r--r-- | lib/list_sort.c | 2 | ||||
-rw-r--r-- | lib/radix-tree.c | 2 | ||||
-rw-r--r-- | lib/swiotlb.c | 18 |
8 files changed, 183 insertions, 83 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index b626365..69a3266 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -461,6 +461,15 @@ config DEBUG_MUTEXES This feature allows mutex semantics violations to be detected and reported. +config BKL + bool "Big Kernel Lock" if (SMP || PREEMPT) + default y + help + This is the traditional lock that is used in old code instead + of proper locking. All drivers that use the BKL should depend + on this symbol. + Say Y here unless you are working on removing the BKL. + config DEBUG_LOCK_ALLOC bool "Lock debugging: detect incorrect freeing of live locks" depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT @@ -482,6 +491,7 @@ config PROVE_LOCKING select DEBUG_SPINLOCK select DEBUG_MUTEXES select DEBUG_LOCK_ALLOC + select TRACE_IRQFLAGS default n help This feature enables the kernel to prove that all locking @@ -539,6 +549,23 @@ config PROVE_RCU_REPEATEDLY disabling, allowing multiple RCU-lockdep warnings to be printed on a single reboot. + Say Y to allow multiple RCU-lockdep warnings per boot. + + Say N if you are unsure. + +config SPARSE_RCU_POINTER + bool "RCU debugging: sparse-based checks for pointer usage" + default n + help + This feature enables the __rcu sparse annotation for + RCU-protected pointers. This annotation will cause sparse + to flag any non-RCU used of annotated pointers. This can be + helpful when debugging RCU usage. Please note that this feature + is not intended to enforce code cleanliness; it is instead merely + a debugging aid. + + Say Y to make sparse flag questionable use of RCU-protected pointers + Say N if you are unsure. config LOCKDEP @@ -579,11 +606,10 @@ config DEBUG_LOCKDEP of more runtime overhead. config TRACE_IRQFLAGS - depends on DEBUG_KERNEL bool - default y - depends on TRACE_IRQFLAGS_SUPPORT - depends on PROVE_LOCKING + help + Enables hooks to interrupt enabling and disabling for + either tracing or lock debugging. config DEBUG_SPINLOCK_SLEEP bool "Spinlock debugging: sleep-inside-spinlock checking" @@ -832,6 +858,30 @@ config RCU_CPU_STALL_DETECTOR Say Y if you are unsure. +config RCU_CPU_STALL_TIMEOUT + int "RCU CPU stall timeout in seconds" + depends on RCU_CPU_STALL_DETECTOR + range 3 300 + default 60 + help + If a given RCU grace period extends more than the specified + number of seconds, a CPU stall warning is printed. If the + RCU grace period persists, additional CPU stall warnings are + printed at more widely spaced intervals. + +config RCU_CPU_STALL_DETECTOR_RUNNABLE + bool "RCU CPU stall checking starts automatically at boot" + depends on RCU_CPU_STALL_DETECTOR + default y + help + If set, start checking for RCU CPU stalls immediately on + boot. Otherwise, RCU CPU stall checking must be manually + enabled. + + Say Y if you are unsure. + + Say N if you wish to suppress RCU CPU stall checking during boot. + config RCU_CPU_STALL_VERBOSE bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU @@ -72,8 +72,8 @@ static const struct bug_entry *module_find_bug(unsigned long bugaddr) return NULL; } -int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, - struct module *mod) +void module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, + struct module *mod) { char *secstrings; unsigned int i; @@ -97,8 +97,6 @@ int module_bug_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, * could potentially lead to deadlock and thus be counter-productive. */ list_add(&mod->bug_list, &module_bug_list); - - return 0; } void module_bug_cleanup(struct module *mod) diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 01e6427..4bfb047 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -590,6 +590,7 @@ out_unlock: static const struct file_operations filter_fops = { .read = filter_read, .write = filter_write, + .llseek = default_llseek, }; static int dma_debug_fs_init(void) diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 02afc25..3094318 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -26,19 +26,11 @@ #include <linux/dynamic_debug.h> #include <linux/debugfs.h> #include <linux/slab.h> +#include <linux/jump_label.h> extern struct _ddebug __start___verbose[]; extern struct _ddebug __stop___verbose[]; -/* dynamic_debug_enabled, and dynamic_debug_enabled2 are bitmasks in which - * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They - * use independent hash functions, to reduce the chance of false positives. - */ -long long dynamic_debug_enabled; -EXPORT_SYMBOL_GPL(dynamic_debug_enabled); -long long dynamic_debug_enabled2; -EXPORT_SYMBOL_GPL(dynamic_debug_enabled2); - struct ddebug_table { struct list_head link; char *mod_name; @@ -88,26 +80,6 @@ static char *ddebug_describe_flags(struct _ddebug *dp, char *buf, } /* - * must be called with ddebug_lock held - */ - -static int disabled_hash(char hash, bool first_table) -{ - struct ddebug_table *dt; - char table_hash_value; - - list_for_each_entry(dt, &ddebug_tables, link) { - if (first_table) - table_hash_value = dt->ddebugs->primary_hash; - else - table_hash_value = dt->ddebugs->secondary_hash; - if (dt->num_enabled && (hash == table_hash_value)) - return 0; - } - return 1; -} - -/* * Search the tables for _ddebug's which match the given * `query' and apply the `flags' and `mask' to them. Tells * the user which ddebug's were changed, or whether none @@ -170,17 +142,9 @@ static void ddebug_change(const struct ddebug_query *query, dt->num_enabled++; dp->flags = newflags; if (newflags) { - dynamic_debug_enabled |= - (1LL << dp->primary_hash); - dynamic_debug_enabled2 |= - (1LL << dp->secondary_hash); + jump_label_enable(&dp->enabled); } else { - if (disabled_hash(dp->primary_hash, true)) - dynamic_debug_enabled &= - ~(1LL << dp->primary_hash); - if (disabled_hash(dp->secondary_hash, false)) - dynamic_debug_enabled2 &= - ~(1LL << dp->secondary_hash); + jump_label_disable(&dp->enabled); } if (verbose) printk(KERN_INFO @@ -429,6 +393,40 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp, return 0; } +static int ddebug_exec_query(char *query_string) +{ + unsigned int flags = 0, mask = 0; + struct ddebug_query query; +#define MAXWORDS 9 + int nwords; + char *words[MAXWORDS]; + + nwords = ddebug_tokenize(query_string, words, MAXWORDS); + if (nwords <= 0) + return -EINVAL; + if (ddebug_parse_query(words, nwords-1, &query)) + return -EINVAL; + if (ddebug_parse_flags(words[nwords-1], &flags, &mask)) + return -EINVAL; + + /* actually go and implement the change */ + ddebug_change(&query, flags, mask); + return 0; +} + +static __initdata char ddebug_setup_string[1024]; +static __init int ddebug_setup_query(char *str) +{ + if (strlen(str) >= 1024) { + pr_warning("ddebug boot param string too large\n"); + return 0; + } + strcpy(ddebug_setup_string, str); + return 1; +} + +__setup("ddebug_query=", ddebug_setup_query); + /* * File_ops->write method for <debugfs>/dynamic_debug/conrol. Gathers the * command text from userspace, parses and executes it. @@ -436,12 +434,8 @@ static int ddebug_parse_flags(const char *str, unsigned int *flagsp, static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf, size_t len, loff_t *offp) { - unsigned int flags = 0, mask = 0; - struct ddebug_query query; -#define MAXWORDS 9 - int nwords; - char *words[MAXWORDS]; char tmpbuf[256]; + int ret; if (len == 0) return 0; @@ -455,16 +449,9 @@ static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf, printk(KERN_INFO "%s: read %d bytes from userspace\n", __func__, (int)len); - nwords = ddebug_tokenize(tmpbuf, words, MAXWORDS); - if (nwords <= 0) - return -EINVAL; - if (ddebug_parse_query(words, nwords-1, &query)) - return -EINVAL; - if (ddebug_parse_flags(words[nwords-1], &flags, &mask)) - return -EINVAL; - - /* actually go and implement the change */ - ddebug_change(&query, flags, mask); + ret = ddebug_exec_query(tmpbuf); + if (ret) + return ret; *offp += len; return len; @@ -725,13 +712,14 @@ static void ddebug_remove_all_tables(void) mutex_unlock(&ddebug_lock); } -static int __init dynamic_debug_init(void) +static __initdata int ddebug_init_success; + +static int __init dynamic_debug_init_debugfs(void) { struct dentry *dir, *file; - struct _ddebug *iter, *iter_start; - const char *modname = NULL; - int ret = 0; - int n = 0; + + if (!ddebug_init_success) + return -ENODEV; dir = debugfs_create_dir("dynamic_debug", NULL); if (!dir) @@ -742,6 +730,16 @@ static int __init dynamic_debug_init(void) debugfs_remove(dir); return -ENOMEM; } + return 0; +} + +static int __init dynamic_debug_init(void) +{ + struct _ddebug *iter, *iter_start; + const char *modname = NULL; + int ret = 0; + int n = 0; + if (__start___verbose != __stop___verbose) { iter = __start___verbose; modname = iter->modname; @@ -759,12 +757,26 @@ static int __init dynamic_debug_init(void) } ret = ddebug_add_module(iter_start, n, modname); } + + /* ddebug_query boot param got passed -> set it up */ + if (ddebug_setup_string[0] != '\0') { + ret = ddebug_exec_query(ddebug_setup_string); + if (ret) + pr_warning("Invalid ddebug boot param %s", + ddebug_setup_string); + else + pr_info("ddebug initialized with string %s", + ddebug_setup_string); + } + out_free: - if (ret) { + if (ret) ddebug_remove_all_tables(); - debugfs_remove(dir); - debugfs_remove(file); - } + else + ddebug_init_success = 1; return 0; } -module_init(dynamic_debug_init); +/* Allow early initialization for boot messages via boot param */ +arch_initcall(dynamic_debug_init); +/* Debugfs setup must be done later */ +module_init(dynamic_debug_init_debugfs); diff --git a/lib/kobject.c b/lib/kobject.c index f07c572..82dc34c 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -746,17 +746,56 @@ void kset_unregister(struct kset *k) */ struct kobject *kset_find_obj(struct kset *kset, const char *name) { + return kset_find_obj_hinted(kset, name, NULL); +} + +/** + * kset_find_obj_hinted - search for object in kset given a predecessor hint. + * @kset: kset we're looking in. + * @name: object's name. + * @hint: hint to possible object's predecessor. + * + * Check the hint's next object and if it is a match return it directly, + * otherwise, fall back to the behavior of kset_find_obj(). Either way + * a reference for the returned object is held and the reference on the + * hinted object is released. + */ +struct kobject *kset_find_obj_hinted(struct kset *kset, const char *name, + struct kobject *hint) +{ struct kobject *k; struct kobject *ret = NULL; spin_lock(&kset->list_lock); + + if (!hint) + goto slow_search; + + /* end of list detection */ + if (hint->entry.next == kset->list.next) + goto slow_search; + + k = container_of(hint->entry.next, struct kobject, entry); + if (!kobject_name(k) || strcmp(kobject_name(k), name)) + goto slow_search; + + ret = kobject_get(k); + goto unlock_exit; + +slow_search: list_for_each_entry(k, &kset->list, entry) { if (kobject_name(k) && !strcmp(kobject_name(k), name)) { ret = kobject_get(k); break; } } + +unlock_exit: spin_unlock(&kset->list_lock); + + if (hint) + kobject_put(hint); + return ret; } diff --git a/lib/list_sort.c b/lib/list_sort.c index 4b5cb79..a7616fa 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c @@ -70,7 +70,7 @@ static void merge_and_restore_back_links(void *priv, * element comparison is needed, so the client's cmp() * routine can invoke cond_resched() periodically. */ - (*cmp)(priv, tail, tail); + (*cmp)(priv, tail->next, tail->next); tail->next->prev = tail; tail = tail->next; diff --git a/lib/radix-tree.c b/lib/radix-tree.c index efd16fa..6f412ab4 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -49,7 +49,7 @@ struct radix_tree_node { unsigned int height; /* Height from the bottom */ unsigned int count; struct rcu_head rcu_head; - void *slots[RADIX_TREE_MAP_SIZE]; + void __rcu *slots[RADIX_TREE_MAP_SIZE]; unsigned long tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS]; }; diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 34e3082..7c06ee5 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -70,7 +70,7 @@ static unsigned long io_tlb_nslabs; */ static unsigned long io_tlb_overflow = 32*1024; -void *io_tlb_overflow_buffer; +static void *io_tlb_overflow_buffer; /* * This is a free list describing the number of free entries available from @@ -147,16 +147,16 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE * between io_tlb_start and io_tlb_end. */ - io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); + io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); for (i = 0; i < io_tlb_nslabs; i++) io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE); io_tlb_index = 0; - io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t)); + io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); /* * Get the overflow emergency buffer */ - io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); + io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow)); if (!io_tlb_overflow_buffer) panic("Cannot allocate SWIOTLB overflow buffer!\n"); if (verbose) @@ -182,7 +182,7 @@ swiotlb_init_with_default_size(size_t default_size, int verbose) /* * Get IO TLB memory from the low pages */ - io_tlb_start = alloc_bootmem_low_pages(bytes); + io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes)); if (!io_tlb_start) panic("Cannot allocate SWIOTLB buffer"); @@ -308,13 +308,13 @@ void __init swiotlb_free(void) get_order(io_tlb_nslabs << IO_TLB_SHIFT)); } else { free_bootmem_late(__pa(io_tlb_overflow_buffer), - io_tlb_overflow); + PAGE_ALIGN(io_tlb_overflow)); free_bootmem_late(__pa(io_tlb_orig_addr), - io_tlb_nslabs * sizeof(phys_addr_t)); + PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); free_bootmem_late(__pa(io_tlb_list), - io_tlb_nslabs * sizeof(int)); + PAGE_ALIGN(io_tlb_nslabs * sizeof(int))); free_bootmem_late(__pa(io_tlb_start), - io_tlb_nslabs << IO_TLB_SHIFT); + PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT)); } } |