From 5db53f3e80dee2d9dff5e534f9e9fe1db17c9936 Mon Sep 17 00:00:00 2001 From: Joern Engel Date: Fri, 20 Nov 2009 20:13:39 +0100 Subject: [LogFS] add new flash file system This is a new flash file system. See Documentation/filesystems/logfs.txt Signed-off-by: Joern Engel --- lib/Kconfig | 3 + lib/Makefile | 1 + lib/btree.c | 797 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 801 insertions(+) create mode 100644 lib/btree.c (limited to 'lib') diff --git a/lib/Kconfig b/lib/Kconfig index bb1326d..277fbfb 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -156,6 +156,9 @@ config TEXTSEARCH_BM config TEXTSEARCH_FSM tristate +config BTREE + boolean + config HAS_IOMEM boolean depends on !NO_IOMEM diff --git a/lib/Makefile b/lib/Makefile index 2e78277..cff8261 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -41,6 +41,7 @@ lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o +obj-$(CONFIG_BTREE) += btree.o obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o obj-$(CONFIG_DEBUG_LIST) += list_debug.o obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o diff --git a/lib/btree.c b/lib/btree.c new file mode 100644 index 0000000..41859a8 --- /dev/null +++ b/lib/btree.c @@ -0,0 +1,797 @@ +/* + * lib/btree.c - Simple In-memory B+Tree + * + * As should be obvious for Linux kernel code, license is GPLv2 + * + * Copyright (c) 2007-2008 Joern Engel + * Bits and pieces stolen from Peter Zijlstra's code, which is + * Copyright 2007, Red Hat Inc. Peter Zijlstra + * GPLv2 + * + * see http://programming.kicks-ass.net/kernel-patches/vma_lookup/btree.patch + * + * A relatively simple B+Tree implementation. I have written it as a learning + * excercise to understand how B+Trees work. Turned out to be useful as well. + * + * B+Trees can be used similar to Linux radix trees (which don't have anything + * in common with textbook radix trees, beware). Prerequisite for them working + * well is that access to a random tree node is much faster than a large number + * of operations within each node. + * + * Disks have fulfilled the prerequisite for a long time. More recently DRAM + * has gained similar properties, as memory access times, when measured in cpu + * cycles, have increased. Cacheline sizes have increased as well, which also + * helps B+Trees. + * + * Compared to radix trees, B+Trees are more efficient when dealing with a + * sparsely populated address space. Between 25% and 50% of the memory is + * occupied with valid pointers. When densely populated, radix trees contain + * ~98% pointers - hard to beat. Very sparse radix trees contain only ~2% + * pointers. + * + * This particular implementation stores pointers identified by a long value. + * Storing NULL pointers is illegal, lookup will return NULL when no entry + * was found. + * + * A tricks was used that is not commonly found in textbooks. The lowest + * values are to the right, not to the left. All used slots within a node + * are on the left, all unused slots contain NUL values. Most operations + * simply loop once over all slots and terminate on the first NUL. + */ + +#include +#include +#include +#include +#include + +#define MAX(a, b) ((a) > (b) ? (a) : (b)) +#define NODESIZE MAX(L1_CACHE_BYTES, 128) + +struct btree_geo { + int keylen; + int no_pairs; + int no_longs; +}; + +struct btree_geo btree_geo32 = { + .keylen = 1, + .no_pairs = NODESIZE / sizeof(long) / 2, + .no_longs = NODESIZE / sizeof(long) / 2, +}; +EXPORT_SYMBOL_GPL(btree_geo32); + +#define LONG_PER_U64 (64 / BITS_PER_LONG) +struct btree_geo btree_geo64 = { + .keylen = LONG_PER_U64, + .no_pairs = NODESIZE / sizeof(long) / (1 + LONG_PER_U64), + .no_longs = LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + LONG_PER_U64)), +}; +EXPORT_SYMBOL_GPL(btree_geo64); + +struct btree_geo btree_geo128 = { + .keylen = 2 * LONG_PER_U64, + .no_pairs = NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64), + .no_longs = 2 * LONG_PER_U64 * (NODESIZE / sizeof(long) / (1 + 2 * LONG_PER_U64)), +}; +EXPORT_SYMBOL_GPL(btree_geo128); + +static struct kmem_cache *btree_cachep; + +void *btree_alloc(gfp_t gfp_mask, void *pool_data) +{ + return kmem_cache_alloc(btree_cachep, gfp_mask); +} +EXPORT_SYMBOL_GPL(btree_alloc); + +void btree_free(void *element, void *pool_data) +{ + kmem_cache_free(btree_cachep, element); +} +EXPORT_SYMBOL_GPL(btree_free); + +static unsigned long *btree_node_alloc(struct btree_head *head, gfp_t gfp) +{ + unsigned long *node; + + node = mempool_alloc(head->mempool, gfp); + memset(node, 0, NODESIZE); + return node; +} + +static int longcmp(const unsigned long *l1, const unsigned long *l2, size_t n) +{ + size_t i; + + for (i = 0; i < n; i++) { + if (l1[i] < l2[i]) + return -1; + if (l1[i] > l2[i]) + return 1; + } + return 0; +} + +static unsigned long *longcpy(unsigned long *dest, const unsigned long *src, + size_t n) +{ + size_t i; + + for (i = 0; i < n; i++) + dest[i] = src[i]; + return dest; +} + +static unsigned long *longset(unsigned long *s, unsigned long c, size_t n) +{ + size_t i; + + for (i = 0; i < n; i++) + s[i] = c; + return s; +} + +static void dec_key(struct btree_geo *geo, unsigned long *key) +{ + unsigned long val; + int i; + + for (i = geo->keylen - 1; i >= 0; i--) { + val = key[i]; + key[i] = val - 1; + if (val) + break; + } +} + +static unsigned long *bkey(struct btree_geo *geo, unsigned long *node, int n) +{ + return &node[n * geo->keylen]; +} + +static void *bval(struct btree_geo *geo, unsigned long *node, int n) +{ + return (void *)node[geo->no_longs + n]; +} + +static void setkey(struct btree_geo *geo, unsigned long *node, int n, + unsigned long *key) +{ + longcpy(bkey(geo, node, n), key, geo->keylen); +} + +static void setval(struct btree_geo *geo, unsigned long *node, int n, + void *val) +{ + node[geo->no_longs + n] = (unsigned long) val; +} + +static void clearpair(struct btree_geo *geo, unsigned long *node, int n) +{ + longset(bkey(geo, node, n), 0, geo->keylen); + node[geo->no_longs + n] = 0; +} + +static inline void __btree_init(struct btree_head *head) +{ + head->node = NULL; + head->height = 0; +} + +void btree_init_mempool(struct btree_head *head, mempool_t *mempool) +{ + __btree_init(head); + head->mempool = mempool; +} +EXPORT_SYMBOL_GPL(btree_init_mempool); + +int btree_init(struct btree_head *head) +{ + __btree_init(head); + head->mempool = mempool_create(0, btree_alloc, btree_free, NULL); + if (!head->mempool) + return -ENOMEM; + return 0; +} +EXPORT_SYMBOL_GPL(btree_init); + +void btree_destroy(struct btree_head *head) +{ + mempool_destroy(head->mempool); + head->mempool = NULL; +} +EXPORT_SYMBOL_GPL(btree_destroy); + +void *btree_last(struct btree_head *head, struct btree_geo *geo, + unsigned long *key) +{ + int height = head->height; + unsigned long *node = head->node; + + if (height == 0) + return NULL; + + for ( ; height > 1; height--) + node = bval(geo, node, 0); + + longcpy(key, bkey(geo, node, 0), geo->keylen); + return bval(geo, node, 0); +} +EXPORT_SYMBOL_GPL(btree_last); + +static int keycmp(struct btree_geo *geo, unsigned long *node, int pos, + unsigned long *key) +{ + return longcmp(bkey(geo, node, pos), key, geo->keylen); +} + +static int keyzero(struct btree_geo *geo, unsigned long *key) +{ + int i; + + for (i = 0; i < geo->keylen; i++) + if (key[i]) + return 0; + + return 1; +} + +void *btree_lookup(struct btree_head *head, struct btree_geo *geo, + unsigned long *key) +{ + int i, height = head->height; + unsigned long *node = head->node; + + if (height == 0) + return NULL; + + for ( ; height > 1; height--) { + for (i = 0; i < geo->no_pairs; i++) + if (keycmp(geo, node, i, key) <= 0) + break; + if (i == geo->no_pairs) + return NULL; + node = bval(geo, node, i); + if (!node) + return NULL; + } + + if (!node) + return NULL; + + for (i = 0; i < geo->no_pairs; i++) + if (keycmp(geo, node, i, key) == 0) + return bval(geo, node, i); + return NULL; +} +EXPORT_SYMBOL_GPL(btree_lookup); + +int btree_update(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, void *val) +{ + int i, height = head->height; + unsigned long *node = head->node; + + if (height == 0) + return -ENOENT; + + for ( ; height > 1; height--) { + for (i = 0; i < geo->no_pairs; i++) + if (keycmp(geo, node, i, key) <= 0) + break; + if (i == geo->no_pairs) + return -ENOENT; + node = bval(geo, node, i); + if (!node) + return -ENOENT; + } + + if (!node) + return -ENOENT; + + for (i = 0; i < geo->no_pairs; i++) + if (keycmp(geo, node, i, key) == 0) { + setval(geo, node, i, val); + return 0; + } + return -ENOENT; +} +EXPORT_SYMBOL_GPL(btree_update); + +/* + * Usually this function is quite similar to normal lookup. But the key of + * a parent node may be smaller than the smallest key of all its siblings. + * In such a case we cannot just return NULL, as we have only proven that no + * key smaller than __key, but larger than this parent key exists. + * So we set __key to the parent key and retry. We have to use the smallest + * such parent key, which is the last parent key we encountered. + */ +void *btree_get_prev(struct btree_head *head, struct btree_geo *geo, + unsigned long *__key) +{ + int i, height; + unsigned long *node, *oldnode; + unsigned long *retry_key = NULL, key[geo->keylen]; + + if (keyzero(geo, __key)) + return NULL; + + if (head->height == 0) + return NULL; +retry: + longcpy(key, __key, geo->keylen); + dec_key(geo, key); + + node = head->node; + for (height = head->height ; height > 1; height--) { + for (i = 0; i < geo->no_pairs; i++) + if (keycmp(geo, node, i, key) <= 0) + break; + if (i == geo->no_pairs) + goto miss; + oldnode = node; + node = bval(geo, node, i); + if (!node) + goto miss; + retry_key = bkey(geo, oldnode, i); + } + + if (!node) + goto miss; + + for (i = 0; i < geo->no_pairs; i++) { + if (keycmp(geo, node, i, key) <= 0) { + if (bval(geo, node, i)) { + longcpy(__key, bkey(geo, node, i), geo->keylen); + return bval(geo, node, i); + } else + goto miss; + } + } +miss: + if (retry_key) { + __key = retry_key; + retry_key = NULL; + goto retry; + } + return NULL; +} + +static int getpos(struct btree_geo *geo, unsigned long *node, + unsigned long *key) +{ + int i; + + for (i = 0; i < geo->no_pairs; i++) { + if (keycmp(geo, node, i, key) <= 0) + break; + } + return i; +} + +static int getfill(struct btree_geo *geo, unsigned long *node, int start) +{ + int i; + + for (i = start; i < geo->no_pairs; i++) + if (!bval(geo, node, i)) + break; + return i; +} + +/* + * locate the correct leaf node in the btree + */ +static unsigned long *find_level(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, int level) +{ + unsigned long *node = head->node; + int i, height; + + for (height = head->height; height > level; height--) { + for (i = 0; i < geo->no_pairs; i++) + if (keycmp(geo, node, i, key) <= 0) + break; + + if ((i == geo->no_pairs) || !bval(geo, node, i)) { + /* right-most key is too large, update it */ + /* FIXME: If the right-most key on higher levels is + * always zero, this wouldn't be necessary. */ + i--; + setkey(geo, node, i, key); + } + BUG_ON(i < 0); + node = bval(geo, node, i); + } + BUG_ON(!node); + return node; +} + +static int btree_grow(struct btree_head *head, struct btree_geo *geo, + gfp_t gfp) +{ + unsigned long *node; + int fill; + + node = btree_node_alloc(head, gfp); + if (!node) + return -ENOMEM; + if (head->node) { + fill = getfill(geo, head->node, 0); + setkey(geo, node, 0, bkey(geo, head->node, fill - 1)); + setval(geo, node, 0, head->node); + } + head->node = node; + head->height++; + return 0; +} + +static void btree_shrink(struct btree_head *head, struct btree_geo *geo) +{ + unsigned long *node; + int fill; + + if (head->height <= 1) + return; + + node = head->node; + fill = getfill(geo, node, 0); + BUG_ON(fill > 1); + head->node = bval(geo, node, 0); + head->height--; + mempool_free(node, head->mempool); +} + +static int btree_insert_level(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, void *val, int level, + gfp_t gfp) +{ + unsigned long *node; + int i, pos, fill, err; + + BUG_ON(!val); + if (head->height < level) { + err = btree_grow(head, geo, gfp); + if (err) + return err; + } + +retry: + node = find_level(head, geo, key, level); + pos = getpos(geo, node, key); + fill = getfill(geo, node, pos); + /* two identical keys are not allowed */ + BUG_ON(pos < fill && keycmp(geo, node, pos, key) == 0); + + if (fill == geo->no_pairs) { + /* need to split node */ + unsigned long *new; + + new = btree_node_alloc(head, gfp); + if (!new) + return -ENOMEM; + err = btree_insert_level(head, geo, + bkey(geo, node, fill / 2 - 1), + new, level + 1, gfp); + if (err) { + mempool_free(new, head->mempool); + return err; + } + for (i = 0; i < fill / 2; i++) { + setkey(geo, new, i, bkey(geo, node, i)); + setval(geo, new, i, bval(geo, node, i)); + setkey(geo, node, i, bkey(geo, node, i + fill / 2)); + setval(geo, node, i, bval(geo, node, i + fill / 2)); + clearpair(geo, node, i + fill / 2); + } + if (fill & 1) { + setkey(geo, node, i, bkey(geo, node, fill - 1)); + setval(geo, node, i, bval(geo, node, fill - 1)); + clearpair(geo, node, fill - 1); + } + goto retry; + } + BUG_ON(fill >= geo->no_pairs); + + /* shift and insert */ + for (i = fill; i > pos; i--) { + setkey(geo, node, i, bkey(geo, node, i - 1)); + setval(geo, node, i, bval(geo, node, i - 1)); + } + setkey(geo, node, pos, key); + setval(geo, node, pos, val); + + return 0; +} + +int btree_insert(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, void *val, gfp_t gfp) +{ + return btree_insert_level(head, geo, key, val, 1, gfp); +} +EXPORT_SYMBOL_GPL(btree_insert); + +static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, int level); +static void merge(struct btree_head *head, struct btree_geo *geo, int level, + unsigned long *left, int lfill, + unsigned long *right, int rfill, + unsigned long *parent, int lpos) +{ + int i; + + for (i = 0; i < rfill; i++) { + /* Move all keys to the left */ + setkey(geo, left, lfill + i, bkey(geo, right, i)); + setval(geo, left, lfill + i, bval(geo, right, i)); + } + /* Exchange left and right child in parent */ + setval(geo, parent, lpos, right); + setval(geo, parent, lpos + 1, left); + /* Remove left (formerly right) child from parent */ + btree_remove_level(head, geo, bkey(geo, parent, lpos), level + 1); + mempool_free(right, head->mempool); +} + +static void rebalance(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, int level, unsigned long *child, int fill) +{ + unsigned long *parent, *left = NULL, *right = NULL; + int i, no_left, no_right; + + if (fill == 0) { + /* Because we don't steal entries from a neigbour, this case + * can happen. Parent node contains a single child, this + * node, so merging with a sibling never happens. + */ + btree_remove_level(head, geo, key, level + 1); + mempool_free(child, head->mempool); + return; + } + + parent = find_level(head, geo, key, level + 1); + i = getpos(geo, parent, key); + BUG_ON(bval(geo, parent, i) != child); + + if (i > 0) { + left = bval(geo, parent, i - 1); + no_left = getfill(geo, left, 0); + if (fill + no_left <= geo->no_pairs) { + merge(head, geo, level, + left, no_left, + child, fill, + parent, i - 1); + return; + } + } + if (i + 1 < getfill(geo, parent, i)) { + right = bval(geo, parent, i + 1); + no_right = getfill(geo, right, 0); + if (fill + no_right <= geo->no_pairs) { + merge(head, geo, level, + child, fill, + right, no_right, + parent, i); + return; + } + } + /* + * We could also try to steal one entry from the left or right + * neighbor. By not doing so we changed the invariant from + * "all nodes are at least half full" to "no two neighboring + * nodes can be merged". Which means that the average fill of + * all nodes is still half or better. + */ +} + +static void *btree_remove_level(struct btree_head *head, struct btree_geo *geo, + unsigned long *key, int level) +{ + unsigned long *node; + int i, pos, fill; + void *ret; + + if (level > head->height) { + /* we recursed all the way up */ + head->height = 0; + head->node = NULL; + return NULL; + } + + node = find_level(head, geo, key, level); + pos = getpos(geo, node, key); + fill = getfill(geo, node, pos); + if ((level == 1) && (keycmp(geo, node, pos, key) != 0)) + return NULL; + ret = bval(geo, node, pos); + + /* remove and shift */ + for (i = pos; i < fill - 1; i++) { + setkey(geo, node, i, bkey(geo, node, i + 1)); + setval(geo, node, i, bval(geo, node, i + 1)); + } + clearpair(geo, node, fill - 1); + + if (fill - 1 < geo->no_pairs / 2) { + if (level < head->height) + rebalance(head, geo, key, level, node, fill - 1); + else if (fill - 1 == 1) + btree_shrink(head, geo); + } + + return ret; +} + +void *btree_remove(struct btree_head *head, struct btree_geo *geo, + unsigned long *key) +{ + if (head->height == 0) + return NULL; + + return btree_remove_level(head, geo, key, 1); +} +EXPORT_SYMBOL_GPL(btree_remove); + +int btree_merge(struct btree_head *target, struct btree_head *victim, + struct btree_geo *geo, gfp_t gfp) +{ + unsigned long key[geo->keylen]; + unsigned long dup[geo->keylen]; + void *val; + int err; + + BUG_ON(target == victim); + + if (!(target->node)) { + /* target is empty, just copy fields over */ + target->node = victim->node; + target->height = victim->height; + __btree_init(victim); + return 0; + } + + /* TODO: This needs some optimizations. Currently we do three tree + * walks to remove a single object from the victim. + */ + for (;;) { + if (!btree_last(victim, geo, key)) + break; + val = btree_lookup(victim, geo, key); + err = btree_insert(target, geo, key, val, gfp); + if (err) + return err; + /* We must make a copy of the key, as the original will get + * mangled inside btree_remove. */ + longcpy(dup, key, geo->keylen); + btree_remove(victim, geo, dup); + } + return 0; +} +EXPORT_SYMBOL_GPL(btree_merge); + +static size_t __btree_for_each(struct btree_head *head, struct btree_geo *geo, + unsigned long *node, unsigned long opaque, + void (*func)(void *elem, unsigned long opaque, + unsigned long *key, size_t index, + void *func2), + void *func2, int reap, int height, size_t count) +{ + int i; + unsigned long *child; + + for (i = 0; i < geo->no_pairs; i++) { + child = bval(geo, node, i); + if (!child) + break; + if (height > 1) + count = __btree_for_each(head, geo, child, opaque, + func, func2, reap, height - 1, count); + else + func(child, opaque, bkey(geo, node, i), count++, + func2); + } + if (reap) + mempool_free(node, head->mempool); + return count; +} + +static void empty(void *elem, unsigned long opaque, unsigned long *key, + size_t index, void *func2) +{ +} + +void visitorl(void *elem, unsigned long opaque, unsigned long *key, + size_t index, void *__func) +{ + visitorl_t func = __func; + + func(elem, opaque, *key, index); +} +EXPORT_SYMBOL_GPL(visitorl); + +void visitor32(void *elem, unsigned long opaque, unsigned long *__key, + size_t index, void *__func) +{ + visitor32_t func = __func; + u32 *key = (void *)__key; + + func(elem, opaque, *key, index); +} +EXPORT_SYMBOL_GPL(visitor32); + +void visitor64(void *elem, unsigned long opaque, unsigned long *__key, + size_t index, void *__func) +{ + visitor64_t func = __func; + u64 *key = (void *)__key; + + func(elem, opaque, *key, index); +} +EXPORT_SYMBOL_GPL(visitor64); + +void visitor128(void *elem, unsigned long opaque, unsigned long *__key, + size_t index, void *__func) +{ + visitor128_t func = __func; + u64 *key = (void *)__key; + + func(elem, opaque, key[0], key[1], index); +} +EXPORT_SYMBOL_GPL(visitor128); + +size_t btree_visitor(struct btree_head *head, struct btree_geo *geo, + unsigned long opaque, + void (*func)(void *elem, unsigned long opaque, + unsigned long *key, + size_t index, void *func2), + void *func2) +{ + size_t count = 0; + + if (!func2) + func = empty; + if (head->node) + count = __btree_for_each(head, geo, head->node, opaque, func, + func2, 0, head->height, 0); + return count; +} +EXPORT_SYMBOL_GPL(btree_visitor); + +size_t btree_grim_visitor(struct btree_head *head, struct btree_geo *geo, + unsigned long opaque, + void (*func)(void *elem, unsigned long opaque, + unsigned long *key, + size_t index, void *func2), + void *func2) +{ + size_t count = 0; + + if (!func2) + func = empty; + if (head->node) + count = __btree_for_each(head, geo, head->node, opaque, func, + func2, 1, head->height, 0); + __btree_init(head); + return count; +} +EXPORT_SYMBOL_GPL(btree_grim_visitor); + +static int __init btree_module_init(void) +{ + btree_cachep = kmem_cache_create("btree_node", NODESIZE, 0, + SLAB_HWCACHE_ALIGN, NULL); + return 0; +} + +static void __exit btree_module_exit(void) +{ + kmem_cache_destroy(btree_cachep); +} + +/* If core code starts using btree, initialization should happen even earlier */ +module_init(btree_module_init); +module_exit(btree_module_exit); + +MODULE_AUTHOR("Joern Engel "); +MODULE_AUTHOR("Johannes Berg "); +MODULE_LICENSE("GPL"); -- cgit v1.1 From 39d997b514e12d5aff0dca206eb8996b3957927e Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Mon, 21 Dec 2009 16:20:16 -0800 Subject: x86, core: Optimize hweight32() Optimize hweight32 by using the same technique in hweight64. The proof of this technique can be found in the commit log for f9b4192923fa6e38331e88214b1fe5fc21583fcc ("bitops: hweight() speedup"). The userspace benchmark on x86_32 showed 20% speedup with bitmap_weight() which uses hweight32 to count bits for each unsigned long on 32bit architectures. int main(void) { #define SZ (1024 * 1024 * 512) static DECLARE_BITMAP(bitmap, SZ) = { [0 ... 100] = 1, }; return bitmap_weight(bitmap, SZ); } Signed-off-by: Akinobu Mita Signed-off-by: Andrew Morton Cc: Linus Torvalds LKML-Reference: <1258603932-4590-1-git-send-email-akinobu.mita@gmail.com> [ only x86 sets ARCH_HAS_FAST_MULTIPLIER so we do this via the x86 tree] Signed-off-by: Ingo Molnar --- lib/hweight.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'lib') diff --git a/lib/hweight.c b/lib/hweight.c index 389424e..63ee4eb 100644 --- a/lib/hweight.c +++ b/lib/hweight.c @@ -11,11 +11,18 @@ unsigned int hweight32(unsigned int w) { +#ifdef ARCH_HAS_FAST_MULTIPLIER + w -= (w >> 1) & 0x55555555; + w = (w & 0x33333333) + ((w >> 2) & 0x33333333); + w = (w + (w >> 4)) & 0x0f0f0f0f; + return (w * 0x01010101) >> 24; +#else unsigned int res = w - ((w >> 1) & 0x55555555); res = (res & 0x33333333) + ((res >> 2) & 0x33333333); res = (res + (res >> 4)) & 0x0F0F0F0F; res = res + (res >> 8); return (res + (res >> 16)) & 0x000000FF; +#endif } EXPORT_SYMBOL(hweight32); -- cgit v1.1 From bc7259a2ce764ea16200eb9e53f6e136e918d065 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Thu, 7 Jan 2010 11:43:50 +0000 Subject: lib/vsprintf.c: Add %pMF to format FDDI bit reversed MAC addresses On Mon, 2010-01-04 at 23:43 +0000, Maciej W. Rozycki wrote: > The example below shows an address, and the sequence of bits or symbols > that would be transmitted when the address is used in the Source Address > or Destination Address fields on the MAC header. The transmission line > shows the address bits in the order transmitted, from left to right. For > IEEE 802 LANs these correspond to actual bits on the medium. The FDDI > symbols line shows how the FDDI PHY sends the address bits as encoded > symbols. > > MSB: 35:7B:12:00:00:01 > Canonical: AC-DE-48-00-00-80 > Transmission: 00110101 01111011 00010010 00000000 00000000 00000001 > FDDI Symbols: 35 7B 12 00 00 01" > > Please note that this address has its group bit clear. > > This notation is also defined in the "FDDI MEDIA ACCESS CONTROL-2 > (MAC-2)" (X3T9/92-120) document although that book does not have a need > to use the MSB form and it's skipped. Adds 6 bytes to object size for x86 New: $ size lib/vsprintf.o text data bss dec hex filename 8664 0 2 8666 21da lib/vsprintf.o $ size lib/vsprintf.o text data bss dec hex filename 8658 0 2 8660 21d4 lib/vsprintf.o Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- lib/vsprintf.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index d4996cf..dc48d2b 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include /* for PAGE_SIZE */ @@ -681,11 +682,21 @@ static char *mac_address_string(char *buf, char *end, u8 *addr, char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")]; char *p = mac_addr; int i; + bool bitrev; + char separator; + + if (fmt[1] == 'F') { /* FDDI canonical format */ + bitrev = true; + separator = '-'; + } else { + bitrev = false; + separator = ':'; + } for (i = 0; i < 6; i++) { - p = pack_hex_byte(p, addr[i]); + p = pack_hex_byte(p, bitrev ? bitrev8(addr[i]) : addr[i]); if (fmt[0] == 'M' && i != 5) - *p++ = ':'; + *p++ = separator; } *p = '\0'; @@ -896,6 +907,10 @@ static char *uuid_string(char *buf, char *end, const u8 *addr, * - 'M' For a 6-byte MAC address, it prints the address in the * usual colon-separated hex notation * - 'm' For a 6-byte MAC address, it prints the hex address without colons + * - 'MF' For a 6-byte MAC FDDI address, it prints the address + * with a dash-separated hex notation with bit reversed bytes + * - 'mF' For a 6-byte MAC FDDI address, it prints the address + * in hex notation without separators with bit reversed bytes * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) * IPv6 uses colon separated network-order 16 bit hex with leading 0's @@ -939,6 +954,7 @@ static char *pointer(const char *fmt, char *buf, char *end, void *ptr, return resource_string(buf, end, ptr, spec, fmt); case 'M': /* Colon separated: 00:01:02:03:04:05 */ case 'm': /* Contiguous: 000102030405 */ + /* [mM]F (FDDI, bit reversed) */ return mac_address_string(buf, end, ptr, spec, fmt); case 'I': /* Formatted IP supported * 4: 1.2.3.4 -- cgit v1.1 From c8e000604bce02a87742240a9b716a0f1b680c0b Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Mon, 11 Jan 2010 00:44:14 -0800 Subject: lib: Kill bit-reversed FDDI MAC output case, it's bogus. Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- lib/vsprintf.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index dc48d2b..e83e3e7 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -25,7 +25,6 @@ #include #include #include -#include #include #include /* for PAGE_SIZE */ @@ -682,19 +681,16 @@ static char *mac_address_string(char *buf, char *end, u8 *addr, char mac_addr[sizeof("xx:xx:xx:xx:xx:xx")]; char *p = mac_addr; int i; - bool bitrev; char separator; if (fmt[1] == 'F') { /* FDDI canonical format */ - bitrev = true; separator = '-'; } else { - bitrev = false; separator = ':'; } for (i = 0; i < 6; i++) { - p = pack_hex_byte(p, bitrev ? bitrev8(addr[i]) : addr[i]); + p = pack_hex_byte(p, addr[i]); if (fmt[0] == 'M' && i != 5) *p++ = separator; } @@ -908,9 +904,7 @@ static char *uuid_string(char *buf, char *end, const u8 *addr, * usual colon-separated hex notation * - 'm' For a 6-byte MAC address, it prints the hex address without colons * - 'MF' For a 6-byte MAC FDDI address, it prints the address - * with a dash-separated hex notation with bit reversed bytes - * - 'mF' For a 6-byte MAC FDDI address, it prints the address - * in hex notation without separators with bit reversed bytes + * with a dash-separated hex notation * - 'I' [46] for IPv4/IPv6 addresses printed in the usual way * IPv4 uses dot-separated decimal without leading 0's (1.2.3.4) * IPv6 uses colon separated network-order 16 bit hex with leading 0's -- cgit v1.1 From 0159f24ee764927bf44c1a25473bd4517febd21c Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Wed, 13 Jan 2010 20:23:30 -0800 Subject: lib/vsprintf.c: Add IPV4 options %pI4[hnbl] for host, network, big and little endian This should allow the removal of the #defines and uses of NIPQUAD and NIPQUAD_FMT Signed-off-by: Joe Perches Signed-off-by: David S. Miller --- lib/vsprintf.c | 36 +++++++++++++++++++++++++++++++----- 1 file changed, 31 insertions(+), 5 deletions(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index e83e3e7..add0446 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -699,13 +699,37 @@ static char *mac_address_string(char *buf, char *end, u8 *addr, return string(buf, end, mac_addr, spec); } -static char *ip4_string(char *p, const u8 *addr, bool leading_zeros) +static char *ip4_string(char *p, const u8 *addr, const char *fmt) { int i; - + bool leading_zeros = (fmt[0] == 'i'); + int index; + int step; + + switch (fmt[2]) { + case 'h': +#ifdef __BIG_ENDIAN + index = 0; + step = 1; +#else + index = 3; + step = -1; +#endif + break; + case 'l': + index = 3; + step = -1; + break; + case 'n': + case 'b': + default: + index = 0; + step = 1; + break; + } for (i = 0; i < 4; i++) { char temp[3]; /* hold each IP quad in reverse order */ - int digits = put_dec_trunc(temp, addr[i]) - temp; + int digits = put_dec_trunc(temp, addr[index]) - temp; if (leading_zeros) { if (digits < 3) *p++ = '0'; @@ -717,6 +741,7 @@ static char *ip4_string(char *p, const u8 *addr, bool leading_zeros) *p++ = temp[digits]; if (i < 3) *p++ = '.'; + index += step; } *p = '\0'; @@ -796,7 +821,7 @@ static char *ip6_compressed_string(char *p, const char *addr) if (useIPv4) { if (needcolon) *p++ = ':'; - p = ip4_string(p, &in6.s6_addr[12], false); + p = ip4_string(p, &in6.s6_addr[12], "I4"); } *p = '\0'; @@ -836,7 +861,7 @@ static char *ip4_addr_string(char *buf, char *end, const u8 *addr, { char ip4_addr[sizeof("255.255.255.255")]; - ip4_string(ip4_addr, addr, fmt[0] == 'i'); + ip4_string(ip4_addr, addr, fmt); return string(buf, end, ip4_addr, spec); } @@ -911,6 +936,7 @@ static char *uuid_string(char *buf, char *end, const u8 *addr, * - 'i' [46] for 'raw' IPv4/IPv6 addresses * IPv6 omits the colons (01020304...0f) * IPv4 uses dot-separated decimal with leading 0's (010.123.045.006) + * - '[Ii]4[hnbl]' IPv4 addresses in host, network, big or little endian order * - 'I6c' for IPv6 addresses printed as specified by * http://www.ietf.org/id/draft-kawamura-ipv6-text-representation-03.txt * - 'U' For a 16 byte UUID/GUID, it prints the UUID/GUID in the form -- cgit v1.1 From 4c54005ca438a8b46dd542b497d4f0dc2ca375e8 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Thu, 14 Jan 2010 16:10:57 -0800 Subject: rcu: 1Q2010 update for RCU documentation Add expedited functions. Review documentation and update obsolete verbiage. Also fix the advice for the RCU CPU-stall kernel configuration parameter, and document RCU CPU-stall warnings. Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <12635142581866-git-send-email-> Signed-off-by: Ingo Molnar --- lib/Kconfig.debug | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 25c3ed5..6bf97d1 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -765,9 +765,9 @@ config RCU_CPU_STALL_DETECTOR CPUs are delaying the current grace period, but only when the grace period extends for excessive time periods. - Say Y if you want RCU to perform such checks. + Say N if you want to disable such checks. - Say N if you are unsure. + Say Y if you are unsure. config KPROBES_SANITY_TEST bool "Kprobes sanity tests" -- cgit v1.1 From 660e2acad81c19b404f7d7d06e57a6d5e6ce7426 Mon Sep 17 00:00:00 2001 From: Chris Smith Date: Wed, 27 Jan 2010 22:03:11 +0900 Subject: sh: kmemleak support. Enables support for kmemleak on sh. Signed-off-by: Chris Smith Signed-off-by: Paul Mundt --- lib/Kconfig.debug | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 25c3ed5..d62e3cd 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -355,7 +355,7 @@ config SLUB_STATS config DEBUG_KMEMLEAK bool "Kernel memory leak detector" depends on DEBUG_KERNEL && EXPERIMENTAL && !MEMORY_HOTPLUG && \ - (X86 || ARM || PPC || S390) + (X86 || ARM || PPC || S390 || SUPERH) select DEBUG_FS if SYSFS select STACKTRACE if STACKTRACE_SUPPORT -- cgit v1.1 From 24551f64d47af9539a7f324343bffeea09d9dcfa Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 12 Jan 2010 21:25:24 +0000 Subject: lmb: Add lmb_free() We can free memory allocated with lmb_alloc() by removing it from the list of reserved LMBs. Rework lmb_remove() to allow that possibility and add lmb_free() which exploits it. BenH: Removed some useless parenthesis Signed-off-by: Michael Ellerman Signed-off-by: Benjamin Herrenschmidt --- lib/lmb.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/lmb.c b/lib/lmb.c index 9cee171..b1fc526 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -205,9 +205,8 @@ long lmb_add(u64 base, u64 size) } -long lmb_remove(u64 base, u64 size) +static long __lmb_remove(struct lmb_region *rgn, u64 base, u64 size) { - struct lmb_region *rgn = &(lmb.memory); u64 rgnbegin, rgnend; u64 end = base + size; int i; @@ -254,6 +253,16 @@ long lmb_remove(u64 base, u64 size) return lmb_add_region(rgn, end, rgnend - end); } +long lmb_remove(u64 base, u64 size) +{ + return __lmb_remove(&lmb.memory, base, size); +} + +long __init lmb_free(u64 base, u64 size) +{ + return __lmb_remove(&lmb.reserved, base, size); +} + long __init lmb_reserve(u64 base, u64 size) { struct lmb_region *_rgn = &lmb.reserved; -- cgit v1.1 From 632ee200130899252508c478ad0e808222573fbc Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 22 Feb 2010 17:04:45 -0800 Subject: rcu: Introduce lockdep-based checking to RCU read-side primitives Inspection is proving insufficient to catch all RCU misuses, which is understandable given that rcu_dereference() might be protected by any of four different flavors of RCU (RCU, RCU-bh, RCU-sched, and SRCU), and might also/instead be protected by any of a number of locking primitives. It is therefore time to enlist the aid of lockdep. This set of patches is inspired by earlier work by Peter Zijlstra and Thomas Gleixner, and takes the following approach: o Set up separate lockdep classes for RCU, RCU-bh, and RCU-sched. o Set up separate lockdep classes for each instance of SRCU. o Create primitives that check for being in an RCU read-side critical section. These return exact answers if lockdep is fully enabled, but if unsure, report being in an RCU read-side critical section. (We want to avoid false positives!) The primitives are: For RCU: rcu_read_lock_held(void) For RCU-bh: rcu_read_lock_bh_held(void) For RCU-sched: rcu_read_lock_sched_held(void) For SRCU: srcu_read_lock_held(struct srcu_struct *sp) o Add rcu_dereference_check(), which takes a second argument in which one places a boolean expression based on the above primitives and/or lockdep_is_held(). o A new kernel configuration parameter, CONFIG_PROVE_RCU, enables rcu_dereference_check(). This depends on CONFIG_PROVE_LOCKING, and should be quite helpful during the transition period while CONFIG_PROVE_RCU-unaware patches are in flight. The existing rcu_dereference() primitive does no checking, but upcoming patches will change that. Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <1266887105-1528-1-git-send-email-paulmck@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar --- lib/Kconfig.debug | 12 ++++++++++++ lib/debug_locks.c | 1 + 2 files changed, 13 insertions(+) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 6bf97d1..6af20a8 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -499,6 +499,18 @@ config PROVE_LOCKING For more details, see Documentation/lockdep-design.txt. +config PROVE_RCU + bool "RCU debugging: prove RCU correctness" + depends on PROVE_LOCKING + default n + help + This feature enables lockdep extensions that check for correct + use of RCU APIs. This is currently under development. Say Y + if you want to debug RCU usage or help work on the PROVE_RCU + feature. + + Say N if you are unsure. + config LOCKDEP bool depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT diff --git a/lib/debug_locks.c b/lib/debug_locks.c index bc3b117..5bf0020 100644 --- a/lib/debug_locks.c +++ b/lib/debug_locks.c @@ -23,6 +23,7 @@ * shut up after that. */ int debug_locks = 1; +EXPORT_SYMBOL_GPL(debug_locks); /* * The locking-testsuite uses to get a -- cgit v1.1 From 2676a58c980b7ef076cc9bbff3fd8c9d2d5417ea Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 22 Feb 2010 17:04:54 -0800 Subject: radix-tree: Disable RCU lockdep checking in radix tree Because the radix tree is used with many different locking designs, we cannot do any effective checking without changing the radix-tree APIs. It might make sense to do this later, but only if the RCU lockdep checking proves itself sufficiently valuable. Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <1266887105-1528-10-git-send-email-paulmck@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar --- lib/radix-tree.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) (limited to 'lib') diff --git a/lib/radix-tree.c b/lib/radix-tree.c index 92cdd99..6b9670d 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -364,7 +364,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, unsigned int height, shift; struct radix_tree_node *node, **slot; - node = rcu_dereference(root->rnode); + node = rcu_dereference_raw(root->rnode); if (node == NULL) return NULL; @@ -384,7 +384,7 @@ static void *radix_tree_lookup_element(struct radix_tree_root *root, do { slot = (struct radix_tree_node **) (node->slots + ((index>>shift) & RADIX_TREE_MAP_MASK)); - node = rcu_dereference(*slot); + node = rcu_dereference_raw(*slot); if (node == NULL) return NULL; @@ -568,7 +568,7 @@ int radix_tree_tag_get(struct radix_tree_root *root, if (!root_tag_get(root, tag)) return 0; - node = rcu_dereference(root->rnode); + node = rcu_dereference_raw(root->rnode); if (node == NULL) return 0; @@ -602,7 +602,7 @@ int radix_tree_tag_get(struct radix_tree_root *root, BUG_ON(ret && saw_unset_tag); return !!ret; } - node = rcu_dereference(node->slots[offset]); + node = rcu_dereference_raw(node->slots[offset]); shift -= RADIX_TREE_MAP_SHIFT; height--; } @@ -711,7 +711,7 @@ __lookup(struct radix_tree_node *slot, void ***results, unsigned long index, } shift -= RADIX_TREE_MAP_SHIFT; - slot = rcu_dereference(slot->slots[i]); + slot = rcu_dereference_raw(slot->slots[i]); if (slot == NULL) goto out; } @@ -758,7 +758,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, unsigned long cur_index = first_index; unsigned int ret; - node = rcu_dereference(root->rnode); + node = rcu_dereference_raw(root->rnode); if (!node) return 0; @@ -787,7 +787,7 @@ radix_tree_gang_lookup(struct radix_tree_root *root, void **results, slot = *(((void ***)results)[ret + i]); if (!slot) continue; - results[ret + nr_found] = rcu_dereference(slot); + results[ret + nr_found] = rcu_dereference_raw(slot); nr_found++; } ret += nr_found; @@ -826,7 +826,7 @@ radix_tree_gang_lookup_slot(struct radix_tree_root *root, void ***results, unsigned long cur_index = first_index; unsigned int ret; - node = rcu_dereference(root->rnode); + node = rcu_dereference_raw(root->rnode); if (!node) return 0; @@ -915,7 +915,7 @@ __lookup_tag(struct radix_tree_node *slot, void ***results, unsigned long index, } } shift -= RADIX_TREE_MAP_SHIFT; - slot = rcu_dereference(slot->slots[i]); + slot = rcu_dereference_raw(slot->slots[i]); if (slot == NULL) break; } @@ -951,7 +951,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, if (!root_tag_get(root, tag)) return 0; - node = rcu_dereference(root->rnode); + node = rcu_dereference_raw(root->rnode); if (!node) return 0; @@ -980,7 +980,7 @@ radix_tree_gang_lookup_tag(struct radix_tree_root *root, void **results, slot = *(((void ***)results)[ret + i]); if (!slot) continue; - results[ret + nr_found] = rcu_dereference(slot); + results[ret + nr_found] = rcu_dereference_raw(slot); nr_found++; } ret += nr_found; @@ -1020,7 +1020,7 @@ radix_tree_gang_lookup_tag_slot(struct radix_tree_root *root, void ***results, if (!root_tag_get(root, tag)) return 0; - node = rcu_dereference(root->rnode); + node = rcu_dereference_raw(root->rnode); if (!node) return 0; -- cgit v1.1 From 96be753af91fc9d582450a84722f6a6721d218ad Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 22 Feb 2010 17:04:55 -0800 Subject: idr: Apply lockdep-based diagnostics to rcu_dereference() uses Because idr can be used with any of a number of locks or with any flavor of RCU, just disable the lockdep-based diagnostics. If idr needs diagnostics, the check expression will need to be passed into the relevant idr primitives as an additional argument. Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <1266887105-1528-11-git-send-email-paulmck@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar --- lib/idr.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/idr.c b/lib/idr.c index 0dc7822..2eb1dca 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -504,7 +504,7 @@ void *idr_find(struct idr *idp, int id) int n; struct idr_layer *p; - p = rcu_dereference(idp->top); + p = rcu_dereference_raw(idp->top); if (!p) return NULL; n = (p->layer+1) * IDR_BITS; @@ -519,7 +519,7 @@ void *idr_find(struct idr *idp, int id) while (n > 0 && p) { n -= IDR_BITS; BUG_ON(n != p->layer*IDR_BITS); - p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); + p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); } return((void *)p); } @@ -552,7 +552,7 @@ int idr_for_each(struct idr *idp, struct idr_layer **paa = &pa[0]; n = idp->layers * IDR_BITS; - p = rcu_dereference(idp->top); + p = rcu_dereference_raw(idp->top); max = 1 << n; id = 0; @@ -560,7 +560,7 @@ int idr_for_each(struct idr *idp, while (n > 0 && p) { n -= IDR_BITS; *paa++ = p; - p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]); + p = rcu_dereference_raw(p->ary[(id >> n) & IDR_MASK]); } if (p) { -- cgit v1.1 From 1ed509a225008c9e8c0644fbd22168e09a7383a0 Mon Sep 17 00:00:00 2001 From: "Paul E. McKenney" Date: Mon, 22 Feb 2010 17:05:05 -0800 Subject: rcu: Add RCU_CPU_STALL_VERBOSE to dump detailed per-task information When RCU detects a grace-period stall, it currently just prints out the PID of any tasks doing the stalling. This patch adds RCU_CPU_STALL_VERBOSE, which enables the more-verbose reporting from sched_show_task(). Suggested-by: Thomas Gleixner Signed-off-by: Paul E. McKenney Cc: laijs@cn.fujitsu.com Cc: dipankar@in.ibm.com Cc: mathieu.desnoyers@polymtl.ca Cc: josh@joshtriplett.org Cc: dvhltc@us.ibm.com Cc: niv@us.ibm.com Cc: peterz@infradead.org Cc: rostedt@goodmis.org Cc: Valdis.Kletnieks@vt.edu Cc: dhowells@redhat.com LKML-Reference: <1266887105-1528-21-git-send-email-paulmck@linux.vnet.ibm.com> Signed-off-by: Ingo Molnar --- lib/Kconfig.debug | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 6af20a8..4cdab45 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -781,6 +781,18 @@ config RCU_CPU_STALL_DETECTOR Say Y if you are unsure. +config RCU_CPU_STALL_VERBOSE + bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR" + depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU + default n + help + This option causes RCU to printk detailed per-task information + for any tasks that are stalling the current RCU grace period. + + Say N if you are unsure. + + Say Y if you want to enable such checks. + config KPROBES_SANITY_TEST bool "Kprobes sanity tests" depends on DEBUG_KERNEL -- cgit v1.1 From 84c6f88fc8265d7a712d7d6ed8fc1a878dfc84d1 Mon Sep 17 00:00:00 2001 From: Hitoshi Mitake Date: Thu, 4 Feb 2010 16:08:15 +0900 Subject: perf lock: Fix and add misc documentally things I've forgot to add 'perf lock' line to command-list.txt, so users of perf could not find perf lock when they type 'perf'. Fixing command-list.txt requires document (tools/perf/Documentation/perf-lock.txt). But perf lock is too much "under construction" to write a stable document, so this is something like pseudo document for now. And I wrote description of perf lock at help section of CONFIG_LOCK_STAT, this will navigate users of lock trace events. Signed-off-by: Hitoshi Mitake Cc: Peter Zijlstra Cc: Paul Mackerras Cc: Arnaldo Carvalho de Melo LKML-Reference: <1265267295-8388-1-git-send-email-mitake@dcl.info.waseda.ac.jp> Signed-off-by: Frederic Weisbecker --- lib/Kconfig.debug | 6 ++++++ 1 file changed, 6 insertions(+) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 25c3ed5..65f964e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -520,6 +520,12 @@ config LOCK_STAT For more details, see Documentation/lockstat.txt + You can analyze lock events with "perf lock", subcommand of perf. + If you want to use "perf lock", you need to turn on CONFIG_EVENT_TRACING. + + CONFIG_LOCK_STAT defines "contended" and "acquired" lock events. + (CONFIG_LOCKDEP defines "acquire" and "release" events.) + config DEBUG_LOCKDEP bool "Lock dependency engine debugging" depends on DEBUG_KERNEL && LOCKDEP -- cgit v1.1 From dd8b1cf681eab40bc5afb67bdd06b2ca341f5669 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sat, 27 Feb 2010 17:10:39 +0100 Subject: perf: Remove pointless breakpoint union Remove pointless union in the breakpoint field of hw_perf_event. Signed-off-by: Frederic Weisbecker Cc: Peter Zijlstra Cc: Paul Mackerras --- lib/Kconfig.debug | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 65f964e..4dc24cc 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -520,11 +520,13 @@ config LOCK_STAT For more details, see Documentation/lockstat.txt - You can analyze lock events with "perf lock", subcommand of perf. - If you want to use "perf lock", you need to turn on CONFIG_EVENT_TRACING. + This also enables lock events required by "perf lock", + subcommand of perf. + If you want to use "perf lock", you also need to turn on + CONFIG_EVENT_TRACING. CONFIG_LOCK_STAT defines "contended" and "acquired" lock events. - (CONFIG_LOCKDEP defines "acquire" and "release" events.) + (CONFIG_LOCKDEP defines "acquire" and "release" events.) config DEBUG_LOCKDEP bool "Lock dependency engine debugging" -- cgit v1.1 From f047f4f3792344901e1ea18a180515d7d5349e02 Mon Sep 17 00:00:00 2001 From: Amerigo Wang Date: Fri, 5 Mar 2010 13:42:24 -0800 Subject: mm: use the same log level for show_mem() Use the same log level for printk's in show_mem(), so that those messages can be shown completely when using log level 6. Signed-off-by: WANG Cong Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/show_mem.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'lib') diff --git a/lib/show_mem.c b/lib/show_mem.c index 238e72a..fdc77c8 100644 --- a/lib/show_mem.c +++ b/lib/show_mem.c @@ -15,7 +15,7 @@ void show_mem(void) unsigned long total = 0, reserved = 0, shared = 0, nonshared = 0, highmem = 0; - printk(KERN_INFO "Mem-Info:\n"); + printk("Mem-Info:\n"); show_free_areas(); for_each_online_pgdat(pgdat) { @@ -49,15 +49,15 @@ void show_mem(void) pgdat_resize_unlock(pgdat, &flags); } - printk(KERN_INFO "%lu pages RAM\n", total); + printk("%lu pages RAM\n", total); #ifdef CONFIG_HIGHMEM - printk(KERN_INFO "%lu pages HighMem\n", highmem); + printk("%lu pages HighMem\n", highmem); #endif - printk(KERN_INFO "%lu pages reserved\n", reserved); - printk(KERN_INFO "%lu pages shared\n", shared); - printk(KERN_INFO "%lu pages non-shared\n", nonshared); + printk("%lu pages reserved\n", reserved); + printk("%lu pages shared\n", shared); + printk("%lu pages non-shared\n", nonshared); #ifdef CONFIG_QUICKLIST - printk(KERN_INFO "%lu pages in pagetable cache\n", + printk("%lu pages in pagetable cache\n", quicklist_total_size()); #endif } -- cgit v1.1 From 0347af4ee3922220f6bfe74b87b526aa709a0365 Mon Sep 17 00:00:00 2001 From: Simon Kagstrom Date: Fri, 5 Mar 2010 13:42:49 -0800 Subject: lkdtm: add debugfs access and loosen KPROBE ties Add adds a debugfs interface and additional failure modes to LKDTM to provide similar functionality to the provoke-crash driver submitted here: http://lwn.net/Articles/371208/ Crashes can now be induced either through module parameters (as before) or through the debugfs interface as in provoke-crash. The patch also provides a new "direct" interface, where KPROBES are not used, i.e., the crash is invoked directly upon write to the debugfs file. When built without KPROBES configured, only this mode is available. Signed-off-by: Simon Kagstrom Cc: M. Mohan Kumar Cc: Americo Wang Cc: David Woodhouse Cc: Ingo Molnar Cc: "Eric W. Biederman" , Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig.debug | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 5e3407d..b520ec1 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -864,8 +864,7 @@ config DEBUG_FORCE_WEAK_PER_CPU config LKDTM tristate "Linux Kernel Dump Test Tool Module" - depends on DEBUG_KERNEL - depends on KPROBES + depends on DEBUG_FS depends on BLOCK default n help @@ -876,7 +875,7 @@ config LKDTM called lkdtm. Documentation on how to use the module can be found in - drivers/misc/lkdtm.c + Documentation/fault-injection/provoke-crashes.txt config FAULT_INJECTION bool "Fault-injection framework" -- cgit v1.1 From a11d2b64e1f2556953120d516241243ea365f0ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Goddard=20Rosa?= Date: Fri, 5 Mar 2010 13:43:11 -0800 Subject: lib/string.c: simplify stricmp() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Removes 32 bytes on core2 with gcc 4.4.1: text data bss dec hex filename 3196 0 0 3196 c7c lib/string-BEFORE.o 3164 0 0 3164 c5c lib/string-AFTER.o Signed-off-by: André Goddard Rosa Cc: Joe Perches Cc: Frederic Weisbecker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/string.c | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) (limited to 'lib') diff --git a/lib/string.c b/lib/string.c index a1cdcfc..0f86245 100644 --- a/lib/string.c +++ b/lib/string.c @@ -36,25 +36,21 @@ int strnicmp(const char *s1, const char *s2, size_t len) /* Yes, Virginia, it had better be unsigned */ unsigned char c1, c2; - c1 = c2 = 0; - if (len) { - do { - c1 = *s1; - c2 = *s2; - s1++; - s2++; - if (!c1) - break; - if (!c2) - break; - if (c1 == c2) - continue; - c1 = tolower(c1); - c2 = tolower(c2); - if (c1 != c2) - break; - } while (--len); - } + if (!len) + return 0; + + do { + c1 = *s1++; + c2 = *s2++; + if (!c1 || !c2) + break; + if (c1 == c2) + continue; + c1 = tolower(c1); + c2 = tolower(c2); + if (c1 != c2) + break; + } while (--len); return (int)c1 - (int)c2; } EXPORT_SYMBOL(strnicmp); -- cgit v1.1 From d6a2eedfddcded92c8f9b0ac022a99c4134696b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andr=C3=A9=20Goddard=20Rosa?= Date: Fri, 5 Mar 2010 13:43:12 -0800 Subject: lib/string.c: simplify strnstr() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: André Goddard Rosa Cc: Li Zefan Cc: Joe Perches Cc: Frederic Weisbecker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/string.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'lib') diff --git a/lib/string.c b/lib/string.c index 0f86245..f71bead 100644 --- a/lib/string.c +++ b/lib/string.c @@ -689,13 +689,13 @@ EXPORT_SYMBOL(strstr); */ char *strnstr(const char *s1, const char *s2, size_t len) { - size_t l1 = len, l2; + size_t l2; l2 = strlen(s2); if (!l2) return (char *)s1; - while (l1 >= l2) { - l1--; + while (len >= l2) { + len--; if (!memcmp(s1, s2, l2)) return (char *)s1; s1++; -- cgit v1.1 From 835cc0c8477fdbc59e0217891d6f11061b1ac4e2 Mon Sep 17 00:00:00 2001 From: Don Mullis Date: Fri, 5 Mar 2010 13:43:15 -0800 Subject: lib: more scalable list_sort() XFS and UBIFS can pass long lists to list_sort(); this alternative implementation scales better, reaching ~3x performance gain when list length exceeds the L2 cache size. Stand-alone program timings were run on a Core 2 duo L1=32KB L2=4MB, gcc-4.4, with flags extracted from an Ubuntu kernel build. Object size is 581 bytes compared to 455 for Mark J. Roberts' code. Worst case for either implementation is a list length just over a power of two, and to roughly the same degree, so here are timing results for a range of 2^N+1 lengths. List elements were 16 bytes each including malloc overhead; initial order was random. time (msec) Tatham-Roberts | generic-Mullis-v2 loop_count length | | ratio 4000000 2 206 294 1.427 2000000 3 176 227 1.289 1000000 5 199 172 0.864 500000 9 235 178 0.757 250000 17 243 182 0.748 125000 33 261 196 0.750 62500 65 277 209 0.754 31250 129 292 219 0.75 15625 257 317 235 0.741 7812 513 340 252 0.741 3906 1025 362 267 0.737 1953 2049 388 283 0.729 ~ L1 size 976 4097 556 323 0.580 488 8193 678 361 0.532 244 16385 773 395 0.510 122 32769 844 418 0.495 61 65537 917 454 0.495 30 131073 1128 543 0.481 15 262145 2355 869 0.369 ~ L2 size 7 524289 5597 1714 0.306 3 1048577 6218 2022 0.325 Mark's code does not actually implement the usual or generic mergesort, but rather a variant from Simon Tatham described here: http://www.chiark.greenend.org.uk/~sgtatham/algorithms/listsort.html Simon's algorithm performs O(log N) passes over the entire input list, doing merges of sublists that double in size on each pass. The generic algorithm instead merges pairs of equal length lists as early as possible, in recursive order. For either algorithm, the elements that extend the list beyond power-of-two length are a special case, handled as nearly as possible as a "rounding-up" to a full POT. Some intuition for the locality of reference implications of merge order may be gotten by watching this animation: http://www.sorting-algorithms.com/merge-sort Simon's algorithm requires only O(1) extra space rather than the generic algorithm's O(log N), but in my non-recursive implementation the actual O(log N) data is merely a vector of ~20 pointers, which I've put on the stack. Long-running list_sort() calls: If the list passed in may be long, or the client's cmp() callback function is slow, the client's cmp() may periodically invoke cond_resched() to voluntarily yield the CPU. All inner loops of list_sort() call back to cmp(). Stability of the sort: distinct elements that compare equal emerge from the sort in the same order as with Mark's code, for simple test cases. A boot-time test is provided to verify this and other correctness requirements. A kernel that uses drm.ko appears to run normally with this change; I have no suitable hardware to similarly test the use by UBIFS. [akpm@linux-foundation.org: style tweaks, fix comment, make list_sort_test __init] Signed-off-by: Don Mullis Cc: Dave Airlie Cc: Andi Kleen Cc: Dave Chinner Cc: Artem Bityutskiy Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/list_sort.c | 252 ++++++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 183 insertions(+), 69 deletions(-) (limited to 'lib') diff --git a/lib/list_sort.c b/lib/list_sort.c index 19d11e0..362c10f 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c @@ -4,99 +4,213 @@ #include #include +#define MAX_LIST_LENGTH_BITS 20 + +/* + * Returns a list organized in an intermediate format suited + * to chaining of merge() calls: null-terminated, no reserved or + * sentinel head node, "prev" links not maintained. + */ +static struct list_head *merge(void *priv, + int (*cmp)(void *priv, struct list_head *a, + struct list_head *b), + struct list_head *a, struct list_head *b) +{ + struct list_head head, *tail = &head; + + while (a && b) { + /* if equal, take 'a' -- important for sort stability */ + if ((*cmp)(priv, a, b) <= 0) { + tail->next = a; + a = a->next; + } else { + tail->next = b; + b = b->next; + } + tail = tail->next; + } + tail->next = a?:b; + return head.next; +} + +/* + * Combine final list merge with restoration of standard doubly-linked + * list structure. This approach duplicates code from merge(), but + * runs faster than the tidier alternatives of either a separate final + * prev-link restoration pass, or maintaining the prev links + * throughout. + */ +static void merge_and_restore_back_links(void *priv, + int (*cmp)(void *priv, struct list_head *a, + struct list_head *b), + struct list_head *head, + struct list_head *a, struct list_head *b) +{ + struct list_head *tail = head; + + while (a && b) { + /* if equal, take 'a' -- important for sort stability */ + if ((*cmp)(priv, a, b) <= 0) { + tail->next = a; + a->prev = tail; + a = a->next; + } else { + tail->next = b; + b->prev = tail; + b = b->next; + } + tail = tail->next; + } + tail->next = a ? : b; + + do { + /* + * In worst cases this loop may run many iterations. + * Continue callbacks to the client even though no + * element comparison is needed, so the client's cmp() + * routine can invoke cond_resched() periodically. + */ + (*cmp)(priv, tail, tail); + + tail->next->prev = tail; + tail = tail->next; + } while (tail->next); + + tail->next = head; + head->prev = tail; +} + /** * list_sort - sort a list. * @priv: private data, passed to @cmp * @head: the list to sort * @cmp: the elements comparison function * - * This function has been implemented by Mark J Roberts . It - * implements "merge sort" which has O(nlog(n)) complexity. The list is sorted - * in ascending order. + * This function implements "merge sort" which has O(nlog(n)) complexity. + * The list is sorted in ascending order. * * The comparison function @cmp is supposed to return a negative value if @a is * less than @b, and a positive value if @a is greater than @b. If @a and @b * are equivalent, then it does not matter what this function returns. */ void list_sort(void *priv, struct list_head *head, - int (*cmp)(void *priv, struct list_head *a, - struct list_head *b)) + int (*cmp)(void *priv, struct list_head *a, + struct list_head *b)) { - struct list_head *p, *q, *e, *list, *tail, *oldhead; - int insize, nmerges, psize, qsize, i; + struct list_head *part[MAX_LIST_LENGTH_BITS+1]; /* sorted partial lists + -- last slot is a sentinel */ + int lev; /* index into part[] */ + int max_lev = 0; + struct list_head *list; if (list_empty(head)) return; + memset(part, 0, sizeof(part)); + + head->prev->next = NULL; list = head->next; - list_del(head); - insize = 1; - for (;;) { - p = oldhead = list; - list = tail = NULL; - nmerges = 0; - - while (p) { - nmerges++; - q = p; - psize = 0; - for (i = 0; i < insize; i++) { - psize++; - q = q->next == oldhead ? NULL : q->next; - if (!q) - break; - } - qsize = insize; - while (psize > 0 || (qsize > 0 && q)) { - if (!psize) { - e = q; - q = q->next; - qsize--; - if (q == oldhead) - q = NULL; - } else if (!qsize || !q) { - e = p; - p = p->next; - psize--; - if (p == oldhead) - p = NULL; - } else if (cmp(priv, p, q) <= 0) { - e = p; - p = p->next; - psize--; - if (p == oldhead) - p = NULL; - } else { - e = q; - q = q->next; - qsize--; - if (q == oldhead) - q = NULL; - } - if (tail) - tail->next = e; - else - list = e; - e->prev = tail; - tail = e; + while (list) { + struct list_head *cur = list; + list = list->next; + cur->next = NULL; + + for (lev = 0; part[lev]; lev++) { + cur = merge(priv, cmp, part[lev], cur); + part[lev] = NULL; + } + if (lev > max_lev) { + if (unlikely(lev >= ARRAY_SIZE(part)-1)) { + printk_once(KERN_DEBUG "list passed to" + " list_sort() too long for" + " efficiency\n"); + lev--; } - p = q; + max_lev = lev; } + part[lev] = cur; + } - tail->next = list; - list->prev = tail; + for (lev = 0; lev < max_lev; lev++) + if (part[lev]) + list = merge(priv, cmp, part[lev], list); - if (nmerges <= 1) - break; + merge_and_restore_back_links(priv, cmp, head, part[max_lev], list); +} +EXPORT_SYMBOL(list_sort); - insize *= 2; - } +#ifdef DEBUG_LIST_SORT +struct debug_el { + struct list_head l_h; + int value; + unsigned serial; +}; - head->next = list; - head->prev = list->prev; - list->prev->next = head; - list->prev = head; +static int cmp(void *priv, struct list_head *a, struct list_head *b) +{ + return container_of(a, struct debug_el, l_h)->value + - container_of(b, struct debug_el, l_h)->value; } -EXPORT_SYMBOL(list_sort); +/* + * The pattern of set bits in the list length determines which cases + * are hit in list_sort(). + */ +#define LIST_SORT_TEST_LENGTH (512+128+2) /* not including head */ + +static int __init list_sort_test(void) +{ + int i, r = 1, count; + struct list_head *head = kmalloc(sizeof(*head), GFP_KERNEL); + struct list_head *cur; + + printk(KERN_WARNING "testing list_sort()\n"); + + cur = head; + for (i = 0; i < LIST_SORT_TEST_LENGTH; i++) { + struct debug_el *el = kmalloc(sizeof(*el), GFP_KERNEL); + BUG_ON(!el); + /* force some equivalencies */ + el->value = (r = (r * 725861) % 6599) % (LIST_SORT_TEST_LENGTH/3); + el->serial = i; + + el->l_h.prev = cur; + cur->next = &el->l_h; + cur = cur->next; + } + head->prev = cur; + + list_sort(NULL, head, cmp); + + count = 1; + for (cur = head->next; cur->next != head; cur = cur->next) { + struct debug_el *el = container_of(cur, struct debug_el, l_h); + int cmp_result = cmp(NULL, cur, cur->next); + if (cur->next->prev != cur) { + printk(KERN_EMERG "list_sort() returned " + "a corrupted list!\n"); + return 1; + } else if (cmp_result > 0) { + printk(KERN_EMERG "list_sort() failed to sort!\n"); + return 1; + } else if (cmp_result == 0 && + el->serial >= container_of(cur->next, + struct debug_el, l_h)->serial) { + printk(KERN_EMERG "list_sort() failed to preserve order" + " of equivalent elements!\n"); + return 1; + } + kfree(cur->prev); + count++; + } + kfree(cur); + if (count != LIST_SORT_TEST_LENGTH) { + printk(KERN_EMERG "list_sort() returned list of" + "different length!\n"); + return 1; + } + return 0; +} +module_init(list_sort_test); +#endif -- cgit v1.1 From 02b12b7a28faa2e9ed5a361cd08ea576ab1f1509 Mon Sep 17 00:00:00 2001 From: Don Mullis Date: Fri, 5 Mar 2010 13:43:15 -0800 Subject: lib: revise list_sort() header comment Clarify and correct header comment of list_sort(). Signed-off-by: Don Mullis Cc: Dave Airlie Cc: Andi Kleen Cc: Dave Chinner Cc: Artem Bityutskiy Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/list_sort.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) (limited to 'lib') diff --git a/lib/list_sort.c b/lib/list_sort.c index 362c10f..4b5cb79 100644 --- a/lib/list_sort.c +++ b/lib/list_sort.c @@ -81,17 +81,18 @@ static void merge_and_restore_back_links(void *priv, } /** - * list_sort - sort a list. - * @priv: private data, passed to @cmp + * list_sort - sort a list + * @priv: private data, opaque to list_sort(), passed to @cmp * @head: the list to sort * @cmp: the elements comparison function * - * This function implements "merge sort" which has O(nlog(n)) complexity. - * The list is sorted in ascending order. + * This function implements "merge sort", which has O(nlog(n)) + * complexity. * - * The comparison function @cmp is supposed to return a negative value if @a is - * less than @b, and a positive value if @a is greater than @b. If @a and @b - * are equivalent, then it does not matter what this function returns. + * The comparison function @cmp must return a negative value if @a + * should sort before @b, and a positive value if @a should sort after + * @b. If @a and @b are equivalent, and their original relative + * ordering is to be preserved, @cmp must return 0. */ void list_sort(void *priv, struct list_head *head, int (*cmp)(void *priv, struct list_head *a, -- cgit v1.1 From a069c266ae5fdfbf5b4aecf2c672413aa33b2504 Mon Sep 17 00:00:00 2001 From: Don Mullis Date: Fri, 5 Mar 2010 13:43:16 -0800 Subject: lib: build list_sort() only if needed Build list_sort() only for configs that need it -- those that don't save ~581 bytes (i386). Signed-off-by: Don Mullis Cc: Dave Airlie Cc: Andi Kleen Cc: Dave Chinner Cc: Artem Bityutskiy Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/Kconfig | 3 +++ lib/Makefile | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig b/lib/Kconfig index 97b136f..8034c46 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -160,6 +160,9 @@ config TEXTSEARCH_BM config TEXTSEARCH_FSM tristate +config LIST_SORT + boolean + config HAS_IOMEM boolean depends on !NO_IOMEM diff --git a/lib/Makefile b/lib/Makefile index 3b0b4a6..e39c361 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ - string_helpers.o gcd.o list_sort.o + string_helpers.o gcd.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG @@ -40,6 +40,7 @@ lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o +obj-$(CONFIG_LIST_SORT) += list_sort.o obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o obj-$(CONFIG_DEBUG_LIST) += list_debug.o -- cgit v1.1 From 9a86e2bad0b9fbf3290ae496da6dab9536dd6bf7 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Fri, 5 Mar 2010 13:43:17 -0800 Subject: lib: fix first line of kernel-doc for a few functions The function name must be followed by a space, hypen, space, and a short description. Signed-off-by: Ben Hutchings Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/bitmap.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'lib') diff --git a/lib/bitmap.c b/lib/bitmap.c index 11bf497..61998c5 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -487,7 +487,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen, EXPORT_SYMBOL(__bitmap_parse); /** - * bitmap_parse_user() + * bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap * * @ubuf: pointer to user buffer containing string. * @ulen: buffer size in bytes. If string is smaller than this @@ -619,7 +619,7 @@ int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) EXPORT_SYMBOL(bitmap_parselist); /** - * bitmap_pos_to_ord(buf, pos, bits) + * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap * @buf: pointer to a bitmap * @pos: a bit position in @buf (0 <= @pos < @bits) * @bits: number of valid bit positions in @buf @@ -655,7 +655,7 @@ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) } /** - * bitmap_ord_to_pos(buf, ord, bits) + * bitmap_ord_to_pos - find position of n-th set bit in bitmap * @buf: pointer to bitmap * @ord: ordinal bit position (n-th set bit, n >= 0) * @bits: number of valid bit positions in @buf -- cgit v1.1 From 08564fb7ab9ead9226b6154439c3fecd17972eb0 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Fri, 5 Mar 2010 13:43:18 -0800 Subject: bitmap: use for_each_set_bit() Replace open-coded loop with for_each_set_bit(). Signed-off-by: Akinobu Mita Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/bitmap.c | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) (limited to 'lib') diff --git a/lib/bitmap.c b/lib/bitmap.c index 61998c5..ffb78c9 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -733,10 +733,9 @@ void bitmap_remap(unsigned long *dst, const unsigned long *src, bitmap_zero(dst, bits); w = bitmap_weight(new, bits); - for (oldbit = find_first_bit(src, bits); - oldbit < bits; - oldbit = find_next_bit(src, bits, oldbit + 1)) { + for_each_set_bit(oldbit, src, bits) { int n = bitmap_pos_to_ord(old, oldbit, bits); + if (n < 0 || w == 0) set_bit(oldbit, dst); /* identity map */ else @@ -903,9 +902,7 @@ void bitmap_onto(unsigned long *dst, const unsigned long *orig, */ m = 0; - for (n = find_first_bit(relmap, bits); - n < bits; - n = find_next_bit(relmap, bits, n + 1)) { + for_each_set_bit(n, relmap, bits) { /* m == bitmap_pos_to_ord(relmap, n, bits) */ if (test_bit(m, orig)) set_bit(n, dst); @@ -934,9 +931,7 @@ void bitmap_fold(unsigned long *dst, const unsigned long *orig, return; bitmap_zero(dst, bits); - for (oldbit = find_first_bit(orig, bits); - oldbit < bits; - oldbit = find_next_bit(orig, bits, oldbit + 1)) + for_each_set_bit(oldbit, orig, bits) set_bit(oldbit % sz, dst); } EXPORT_SYMBOL(bitmap_fold); -- cgit v1.1 From 4f2a9463d18517a9839401c3de6419ee1435875b Mon Sep 17 00:00:00 2001 From: Joakim Tjernlund Date: Fri, 5 Mar 2010 13:43:55 -0800 Subject: crc32: some minor cleanups Signed-off-by: Joakim Tjernlund Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/crc32.c | 30 ++++++++++++++---------------- 1 file changed, 14 insertions(+), 16 deletions(-) (limited to 'lib') diff --git a/lib/crc32.c b/lib/crc32.c index 02e3b31..0f45fbf 100644 --- a/lib/crc32.c +++ b/lib/crc32.c @@ -30,11 +30,15 @@ #include #include "crc32defs.h" #if CRC_LE_BITS == 8 -#define tole(x) __constant_cpu_to_le32(x) -#define tobe(x) __constant_cpu_to_be32(x) +# define tole(x) __constant_cpu_to_le32(x) #else -#define tole(x) (x) -#define tobe(x) (x) +# define tole(x) (x) +#endif + +#if CRC_BE_BITS == 8 +# define tobe(x) __constant_cpu_to_be32(x) +#else +# define tobe(x) (x) #endif #include "crc32table.h" @@ -52,20 +56,19 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab) # else # define DO_CRC(x) crc = tab[((crc >> 24) ^ (x)) & 255] ^ (crc << 8) # endif - const u32 *b = (const u32 *)buf; + const u32 *b; size_t rem_len; /* Align it */ - if (unlikely((long)b & 3 && len)) { - u8 *p = (u8 *)b; + if (unlikely((long)buf & 3 && len)) { do { - DO_CRC(*p++); - } while ((--len) && ((long)p)&3); - b = (u32 *)p; + DO_CRC(*buf++); + } while ((--len) && ((long)buf)&3); } rem_len = len & 3; /* load data 32 bits wide, xor data 32 bits wide. */ len = len >> 2; + b = (const u32 *)buf; for (--b; len; --len) { crc ^= *++b; /* use pre increment for speed */ DO_CRC(0); @@ -82,6 +85,7 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 *tab) } while (--len); } return crc; +#undef DO_CRC } #endif /** @@ -119,9 +123,6 @@ u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len) crc = __cpu_to_le32(crc); crc = crc32_body(crc, p, len, tab); return __le32_to_cpu(crc); -#undef ENDIAN_SHIFT -#undef DO_CRC - # elif CRC_LE_BITS == 4 while (len--) { crc ^= *p++; @@ -179,9 +180,6 @@ u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len) crc = __cpu_to_be32(crc); crc = crc32_body(crc, p, len, tab); return __be32_to_cpu(crc); -#undef ENDIAN_SHIFT -#undef DO_CRC - # elif CRC_BE_BITS == 4 while (len--) { crc ^= *p++ << 24; -- cgit v1.1 From ef0658f3de484bf9b173639cd47544584e01efa5 Mon Sep 17 00:00:00 2001 From: Joe Perches Date: Sat, 6 Mar 2010 17:10:14 -0800 Subject: vsprintf.c: Reduce sizeof struct printf_spec from 24 to 8 bytes Reducing the size of struct printf_spec is a good thing because multiple instances are commonly passed on stack. It's possible for type to be u8 and field_width to be s8, but this is likely small enough for now. Signed-off-by: Joe Perches Signed-off-by: Linus Torvalds --- lib/vsprintf.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index af4aaa6..e994cea 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -408,12 +408,12 @@ enum format_type { }; struct printf_spec { - enum format_type type; - int flags; /* flags to number() */ - int field_width; /* width of output field */ - int base; - int precision; /* # of digits/chars */ - int qualifier; + u16 type; + s16 field_width; /* width of output field */ + u8 flags; /* flags to number() */ + u8 base; + s8 precision; /* # of digits/chars */ + u8 qualifier; }; static char *number(char *buf, char *end, unsigned long long num, @@ -1333,7 +1333,7 @@ int vsnprintf(char *buf, size_t size, const char *fmt, va_list args) break; case FORMAT_TYPE_NRCHARS: { - int qualifier = spec.qualifier; + u8 qualifier = spec.qualifier; if (qualifier == 'l') { long *ip = va_arg(args, long *); @@ -1619,7 +1619,7 @@ do { \ case FORMAT_TYPE_NRCHARS: { /* skip %n 's argument */ - int qualifier = spec.qualifier; + u8 qualifier = spec.qualifier; void *skip_arg; if (qualifier == 'l') skip_arg = va_arg(args, long *); @@ -1885,7 +1885,9 @@ int vsscanf(const char *buf, const char *fmt, va_list args) char *next; char digit; int num = 0; - int qualifier, base, field_width; + u8 qualifier; + u8 base; + s16 field_width; bool is_sign; while (*fmt && *str) { @@ -1963,7 +1965,7 @@ int vsscanf(const char *buf, const char *fmt, va_list args) { char *s = (char *)va_arg(args, char *); if (field_width == -1) - field_width = INT_MAX; + field_width = SHORT_MAX; /* first, skip leading white space in buffer */ str = skip_spaces(str); -- cgit v1.1 From b89dc5d6b0981c1096ccffbf8f4413c7bb1bcc0a Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Fri, 5 Mar 2010 10:47:31 -0700 Subject: vsprintf: clarify comments for printf_spec flags Add clues about what the SMALL and SPECIAL flags do. Signed-off-by: Bjorn Helgaas Signed-off-by: Linus Torvalds --- lib/vsprintf.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index e994cea..a900d13 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -381,8 +381,8 @@ static noinline char *put_dec(char *buf, unsigned long long num) #define PLUS 4 /* show plus */ #define SPACE 8 /* space if plus */ #define LEFT 16 /* left justified */ -#define SMALL 32 /* Must be 32 == 0x20 */ -#define SPECIAL 64 /* 0x */ +#define SMALL 32 /* use lowercase in hex (must be 32 == 0x20) */ +#define SPECIAL 64 /* prefix hex with "0x", octal with "0" */ enum format_type { FORMAT_TYPE_NONE, /* Just a string part */ -- cgit v1.1 From 4da0b66c6e9ea7ba78a19f9f186779826d89f8b0 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Fri, 5 Mar 2010 10:47:37 -0700 Subject: vsprintf: move %pR resource printf_specs off the stack This adds separate I/O and memory specs, so we don't have to change the field width in a shared spec, which then lets us make all the specs const and static, since they never change. Signed-off-by: Bjorn Helgaas Signed-off-by: Linus Torvalds --- lib/vsprintf.c | 45 ++++++++++++++++++++++++--------------------- 1 file changed, 24 insertions(+), 21 deletions(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index a900d13..0d461c7 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -597,22 +597,29 @@ static char *resource_string(char *buf, char *end, struct resource *res, #ifndef MEM_RSRC_PRINTK_SIZE #define MEM_RSRC_PRINTK_SIZE 10 #endif - struct printf_spec hex_spec = { + static const struct printf_spec io_spec = { .base = 16, + .field_width = IO_RSRC_PRINTK_SIZE, .precision = -1, .flags = SPECIAL | SMALL | ZEROPAD, }; - struct printf_spec dec_spec = { + static const struct printf_spec mem_spec = { + .base = 16, + .field_width = MEM_RSRC_PRINTK_SIZE, + .precision = -1, + .flags = SPECIAL | SMALL | ZEROPAD, + }; + static const struct printf_spec dec_spec = { .base = 10, .precision = -1, .flags = 0, }; - struct printf_spec str_spec = { + static const struct printf_spec str_spec = { .field_width = -1, .precision = 10, .flags = LEFT, }; - struct printf_spec flag_spec = { + static const struct printf_spec flag_spec = { .base = 16, .precision = -1, .flags = SPECIAL | SMALL, @@ -628,35 +635,31 @@ static char *resource_string(char *buf, char *end, struct resource *res, 2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)]; char *p = sym, *pend = sym + sizeof(sym); - int size = -1, addr = 0; int decode = (fmt[0] == 'R') ? 1 : 0; - - if (res->flags & IORESOURCE_IO) { - size = IO_RSRC_PRINTK_SIZE; - addr = 1; - } else if (res->flags & IORESOURCE_MEM) { - size = MEM_RSRC_PRINTK_SIZE; - addr = 1; - } + const struct printf_spec *specp; *p++ = '['; - if (res->flags & IORESOURCE_IO) + if (res->flags & IORESOURCE_IO) { p = string(p, pend, "io ", str_spec); - else if (res->flags & IORESOURCE_MEM) + specp = &io_spec; + } else if (res->flags & IORESOURCE_MEM) { p = string(p, pend, "mem ", str_spec); - else if (res->flags & IORESOURCE_IRQ) + specp = &mem_spec; + } else if (res->flags & IORESOURCE_IRQ) { p = string(p, pend, "irq ", str_spec); - else if (res->flags & IORESOURCE_DMA) + specp = &dec_spec; + } else if (res->flags & IORESOURCE_DMA) { p = string(p, pend, "dma ", str_spec); - else { + specp = &dec_spec; + } else { p = string(p, pend, "??? ", str_spec); + specp = &mem_spec; decode = 0; } - hex_spec.field_width = size; - p = number(p, pend, res->start, addr ? hex_spec : dec_spec); + p = number(p, pend, res->start, *specp); if (res->start != res->end) { *p++ = '-'; - p = number(p, pend, res->end, addr ? hex_spec : dec_spec); + p = number(p, pend, res->end, *specp); } if (decode) { if (res->flags & IORESOURCE_MEM_64) -- cgit v1.1 From b8fa05719ba4349be80ce929237249b57886a203 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 7 Mar 2010 09:54:44 -0800 Subject: Revert "lib: build list_sort() only if needed" This reverts commit a069c266ae5fdfbf5b4aecf2c672413aa33b2504. It turns ou that not only was it missing a case (XFS) that needed it, but perhaps more importantly, people sometimes want to enable new modules that they hadn't had enabled before, and if such a module uses list_sort(), it can't easily be inserted any more. So rather than add a "select LIST_SORT" to the XFS case, just leave it compiled in. It's not all _that_ big, after all, and the inconvenience isn't worth it. Requested-by: Alexey Dobriyan Cc: Christoph Hellwig Cc: Don Mullis Cc: Andrew Morton Cc: Dave Chinner Signed-off-by: Linus Torvalds --- lib/Kconfig | 3 --- lib/Makefile | 3 +-- 2 files changed, 1 insertion(+), 5 deletions(-) (limited to 'lib') diff --git a/lib/Kconfig b/lib/Kconfig index 496d16e..170d8ca 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -160,9 +160,6 @@ config TEXTSEARCH_BM config TEXTSEARCH_FSM tristate -config LIST_SORT - boolean - config BTREE boolean diff --git a/lib/Makefile b/lib/Makefile index 59e46a0..2e152ae 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -21,7 +21,7 @@ lib-y += kobject.o kref.o klist.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ - string_helpers.o gcd.o + string_helpers.o gcd.o list_sort.o ifeq ($(CONFIG_DEBUG_KOBJECT),y) CFLAGS_kobject.o += -DDEBUG @@ -40,7 +40,6 @@ lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o -obj-$(CONFIG_LIST_SORT) += list_sort.o obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o obj-$(CONFIG_BTREE) += btree.o obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o -- cgit v1.1 From 9cd43611ccfb46632bfa7d19f688924ea93f1613 Mon Sep 17 00:00:00 2001 From: Emese Revfy Date: Thu, 31 Dec 2009 14:52:51 +0100 Subject: kobject: Constify struct kset_uevent_ops Constify struct kset_uevent_ops. This is part of the ops structure constification effort started by Arjan van de Ven et al. Benefits of this constification: * prevents modification of data that is shared (referenced) by many other structure instances at runtime * detects/prevents accidental (but not intentional) modification attempts on archs that enforce read-only kernel data at runtime * potentially better optimized code as the compiler can assume that the const data cannot be changed * the compiler/linker move const data into .rodata and therefore exclude them from false sharing Signed-off-by: Emese Revfy Signed-off-by: Greg Kroah-Hartman --- lib/kobject.c | 4 ++-- lib/kobject_uevent.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) (limited to 'lib') diff --git a/lib/kobject.c b/lib/kobject.c index b512b74..cecf5a0 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = { * If the kset was not able to be created, NULL will be returned. */ static struct kset *kset_create(const char *name, - struct kset_uevent_ops *uevent_ops, + const struct kset_uevent_ops *uevent_ops, struct kobject *parent_kobj) { struct kset *kset; @@ -832,7 +832,7 @@ static struct kset *kset_create(const char *name, * If the kset was not able to be created, NULL will be returned. */ struct kset *kset_create_and_add(const char *name, - struct kset_uevent_ops *uevent_ops, + const struct kset_uevent_ops *uevent_ops, struct kobject *parent_kobj) { struct kset *kset; diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 920a3ca..c9d3a3e 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, const char *subsystem; struct kobject *top_kobj; struct kset *kset; - struct kset_uevent_ops *uevent_ops; + const struct kset_uevent_ops *uevent_ops; u64 seq; int i = 0; int retval = 0; -- cgit v1.1 From 52cf25d0ab7f78eeecc59ac652ed5090f69b619e Mon Sep 17 00:00:00 2001 From: Emese Revfy Date: Tue, 19 Jan 2010 02:58:23 +0100 Subject: Driver core: Constify struct sysfs_ops in struct kobj_type Constify struct sysfs_ops. This is part of the ops structure constification effort started by Arjan van de Ven et al. Benefits of this constification: * prevents modification of data that is shared (referenced) by many other structure instances at runtime * detects/prevents accidental (but not intentional) modification attempts on archs that enforce read-only kernel data at runtime * potentially better optimized code as the compiler can assume that the const data cannot be changed * the compiler/linker move const data into .rodata and therefore exclude them from false sharing Signed-off-by: Emese Revfy Acked-by: David Teigland Acked-by: Matt Domsch Acked-by: Maciej Sosnowski Acked-by: Hans J. Koch Acked-by: Pekka Enberg Acked-by: Jens Axboe Acked-by: Stephen Hemminger Signed-off-by: Greg Kroah-Hartman --- lib/kobject.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/kobject.c b/lib/kobject.c index cecf5a0..8115eb1 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct kobject *kobj, struct attribute *attr, return ret; } -struct sysfs_ops kobj_sysfs_ops = { +const struct sysfs_ops kobj_sysfs_ops = { .show = kobj_attr_show, .store = kobj_attr_store, }; -- cgit v1.1 From e69eae65523b457a3ac4262a66cfff57f2c924a9 Mon Sep 17 00:00:00 2001 From: Joakim Tjernlund Date: Wed, 10 Mar 2010 15:23:55 -0800 Subject: zlib: make new optimized inflate endian independent Commit 6846ee5ca68d81e6baccf0d56221d7a00c1be18b ("zlib: Fix build of powerpc boot wrapper") made the new optimized inflate only available on arch's that define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS. This patch will again enable the optimization for all arch's by defining our own endian independent version of unaligned access. As an added bonus, arch's that define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS do a plain load instead. Signed-off-by: Joakim Tjernlund Cc: Anton Blanchard Cc: Benjamin Herrenschmidt Cc: David Woodhouse Cc: Kumar Gala Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/zlib_inflate/inffast.c | 70 ++++++++++++++++++++-------------------------- 1 file changed, 30 insertions(+), 40 deletions(-) (limited to 'lib') diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c index 215447c..fa62fc7 100644 --- a/lib/zlib_inflate/inffast.c +++ b/lib/zlib_inflate/inffast.c @@ -8,21 +8,6 @@ #include "inflate.h" #include "inffast.h" -/* Only do the unaligned "Faster" variant when - * CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS is set - * - * On powerpc, it won't be as we don't include autoconf.h - * automatically for the boot wrapper, which is intended as - * we run in an environment where we may not be able to deal - * with (even rare) alignment faults. In addition, we do not - * define __KERNEL__ for arch/powerpc/boot unlike x86 - */ - -#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS -#include -#include -#endif - #ifndef ASMINF /* Allow machine dependent optimization for post-increment or pre-increment. @@ -36,14 +21,31 @@ - Pentium III (Anderson) - M68060 (Nikl) */ +union uu { + unsigned short us; + unsigned char b[2]; +}; + +/* Endian independed version */ +static inline unsigned short +get_unaligned16(const unsigned short *p) +{ + union uu mm; + unsigned char *b = (unsigned char *)p; + + mm.b[0] = b[0]; + mm.b[1] = b[1]; + return mm.us; +} + #ifdef POSTINC # define OFF 0 # define PUP(a) *(a)++ -# define UP_UNALIGNED(a) get_unaligned((a)++) +# define UP_UNALIGNED(a) get_unaligned16((a)++) #else # define OFF 1 # define PUP(a) *++(a) -# define UP_UNALIGNED(a) get_unaligned(++(a)) +# define UP_UNALIGNED(a) get_unaligned16(++(a)) #endif /* @@ -256,7 +258,6 @@ void inflate_fast(z_streamp strm, unsigned start) } } else { -#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS unsigned short *sout; unsigned long loops; @@ -274,7 +275,11 @@ void inflate_fast(z_streamp strm, unsigned start) sfrom = (unsigned short *)(from - OFF); loops = len >> 1; do +#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS + PUP(sout) = PUP(sfrom); +#else PUP(sout) = UP_UNALIGNED(sfrom); +#endif while (--loops); out = (unsigned char *)sout + OFF; from = (unsigned char *)sfrom + OFF; @@ -282,14 +287,13 @@ void inflate_fast(z_streamp strm, unsigned start) unsigned short pat16; pat16 = *(sout-2+2*OFF); - if (dist == 1) -#if defined(__BIG_ENDIAN) - pat16 = (pat16 & 0xff) | ((pat16 & 0xff) << 8); -#elif defined(__LITTLE_ENDIAN) - pat16 = (pat16 & 0xff00) | ((pat16 & 0xff00) >> 8); -#else -#error __BIG_ENDIAN nor __LITTLE_ENDIAN is defined -#endif + if (dist == 1) { + union uu mm; + /* copy one char pattern to both bytes */ + mm.us = pat16; + mm.b[0] = mm.b[1]; + pat16 = mm.us; + } loops = len >> 1; do PUP(sout) = pat16; @@ -298,20 +302,6 @@ void inflate_fast(z_streamp strm, unsigned start) } if (len & 1) PUP(out) = PUP(from); -#else /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ - from = out - dist; /* copy direct from output */ - do { /* minimum length is three */ - PUP(out) = PUP(from); - PUP(out) = PUP(from); - PUP(out) = PUP(from); - len -= 3; - } while (len > 2); - if (len) { - PUP(out) = PUP(from); - if (len > 1) - PUP(out) = PUP(from); - } -#endif /* !CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ } } else if ((op & 64) == 0) { /* 2nd level distance code */ -- cgit v1.1 From 51ea3f6a4571e9283e2ff79b74bcedfc2986dbe2 Mon Sep 17 00:00:00 2001 From: Joakim Tjernlund Date: Wed, 10 Mar 2010 15:23:57 -0800 Subject: inflate_fast: sout is already a short so ptr arith was off by one. inflate_fast() can do either POST INC or PRE INC on its pointers walking the memory to decompress. Default is PRE INC. The sout pointer offset was miscalculated in one case as the calculation assumed sout was a char * This breaks inflate_fast() iff configured to do POST INC. Signed-off-by: Joakim Tjernlund Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- lib/zlib_inflate/inffast.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/zlib_inflate/inffast.c b/lib/zlib_inflate/inffast.c index fa62fc7..2c13ecc 100644 --- a/lib/zlib_inflate/inffast.c +++ b/lib/zlib_inflate/inffast.c @@ -286,7 +286,7 @@ void inflate_fast(z_streamp strm, unsigned start) } else { /* dist == 1 or dist == 2 */ unsigned short pat16; - pat16 = *(sout-2+2*OFF); + pat16 = *(sout-1+OFF); if (dist == 1) { union uu mm; /* copy one char pattern to both bytes */ -- cgit v1.1 From 0f4050c7d3ba0275e5f39513c0670a717d43048c Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Fri, 5 Mar 2010 10:47:42 -0700 Subject: resource: add bus number support Add support for bus number resources. This is for bridges with a range of bus numbers behind them. Signed-off-by: Bjorn Helgaas Signed-off-by: Len Brown --- lib/vsprintf.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index 0d461c7..ebbecf9 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -609,6 +609,12 @@ static char *resource_string(char *buf, char *end, struct resource *res, .precision = -1, .flags = SPECIAL | SMALL | ZEROPAD, }; + static const struct printf_spec bus_spec = { + .base = 16, + .field_width = 2, + .precision = -1, + .flags = SMALL | ZEROPAD, + }; static const struct printf_spec dec_spec = { .base = 10, .precision = -1, @@ -651,6 +657,9 @@ static char *resource_string(char *buf, char *end, struct resource *res, } else if (res->flags & IORESOURCE_DMA) { p = string(p, pend, "dma ", str_spec); specp = &dec_spec; + } else if (res->flags & IORESOURCE_BUS) { + p = string(p, pend, "bus ", str_spec); + specp = &bus_spec; } else { p = string(p, pend, "??? ", str_spec); specp = &mem_spec; -- cgit v1.1 From 9d7cca04211d4eb104eaaa424b98f650bc29c730 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Fri, 5 Mar 2010 10:47:47 -0700 Subject: resource: add window support Add support for resource windows. This is for bridge resources, i.e., regions where a bridge forwards transactions from the primary to the secondary side. Signed-off-by: Bjorn Helgaas Signed-off-by: Len Brown --- lib/vsprintf.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/vsprintf.c b/lib/vsprintf.c index ebbecf9..24112e5 100644 --- a/lib/vsprintf.c +++ b/lib/vsprintf.c @@ -635,7 +635,7 @@ static char *resource_string(char *buf, char *end, struct resource *res, * 64-bit res (sizeof==8): 20 chars in dec, 18 in hex ("0x" + 16) */ #define RSRC_BUF_SIZE ((2 * sizeof(resource_size_t)) + 4) #define FLAG_BUF_SIZE (2 * sizeof(res->flags)) -#define DECODED_BUF_SIZE sizeof("[mem - 64bit pref disabled]") +#define DECODED_BUF_SIZE sizeof("[mem - 64bit pref window disabled]") #define RAW_BUF_SIZE sizeof("[mem - flags 0x]") char sym[max(2*RSRC_BUF_SIZE + DECODED_BUF_SIZE, 2*RSRC_BUF_SIZE + FLAG_BUF_SIZE + RAW_BUF_SIZE)]; @@ -675,6 +675,8 @@ static char *resource_string(char *buf, char *end, struct resource *res, p = string(p, pend, " 64bit", str_spec); if (res->flags & IORESOURCE_PREFETCH) p = string(p, pend, " pref", str_spec); + if (res->flags & IORESOURCE_WINDOW) + p = string(p, pend, " window", str_spec); if (res->flags & IORESOURCE_DISABLED) p = string(p, pend, " disabled", str_spec); } else { -- cgit v1.1