diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-15 10:40:41 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-15 10:40:41 -0700 |
commit | 419217cb1d0266f62cbea6cdc6b1d1324350bc34 (patch) | |
tree | 01f80c026cc94dfc13b3a16d2fce3ba41c69d54f /kernel | |
parent | 4937ce87959629d31e9b09cf5bdf1e12a305c805 (diff) | |
parent | 14358e6ddaed27499d7d366b3e65c3e46b39e1c4 (diff) | |
download | kernel_samsung_espresso10-419217cb1d0266f62cbea6cdc6b1d1324350bc34.zip kernel_samsung_espresso10-419217cb1d0266f62cbea6cdc6b1d1324350bc34.tar.gz kernel_samsung_espresso10-419217cb1d0266f62cbea6cdc6b1d1324350bc34.tar.bz2 |
Merge branch 'v2.6.24-lockdep' of git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep
* 'v2.6.24-lockdep' of git://git.kernel.org/pub/scm/linux/kernel/git/peterz/linux-2.6-lockdep:
lockdep: annotate dir vs file i_mutex
lockdep: per filesystem inode lock class
lockdep: annotate kprobes irq fiddling
lockdep: annotate rcu_read_{,un}lock{,_bh}
lockdep: annotate journal_start()
lockdep: s390: connect the sysexit hook
lockdep: x86_64: connect the sysexit hook
lockdep: i386: connect the sysexit hook
lockdep: syscall exit check
lockdep: fixup mutex annotations
lockdep: fix mismatched lockdep_depth/curr_chain_hash
lockdep: Avoid /proc/lockdep & lock_stat infinite output
lockdep: maintainers
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/lockdep.c | 26 | ||||
-rw-r--r-- | kernel/lockdep_proc.c | 61 | ||||
-rw-r--r-- | kernel/mutex.c | 35 | ||||
-rw-r--r-- | kernel/rcupdate.c | 8 |
4 files changed, 91 insertions, 39 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 734da57..a6f1ee9 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c @@ -1521,7 +1521,7 @@ cache_hit: } static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, - struct held_lock *hlock, int chain_head) + struct held_lock *hlock, int chain_head, u64 chain_key) { /* * Trylock needs to maintain the stack of held locks, but it @@ -1534,7 +1534,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, * graph_lock for us) */ if (!hlock->trylock && (hlock->check == 2) && - lookup_chain_cache(curr->curr_chain_key, hlock->class)) { + lookup_chain_cache(chain_key, hlock->class)) { /* * Check whether last held lock: * @@ -1576,7 +1576,7 @@ static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, #else static inline int validate_chain(struct task_struct *curr, struct lockdep_map *lock, struct held_lock *hlock, - int chain_head) + int chain_head, u64 chain_key) { return 1; } @@ -2450,11 +2450,11 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, chain_head = 1; } chain_key = iterate_chain_key(chain_key, id); - curr->curr_chain_key = chain_key; - if (!validate_chain(curr, lock, hlock, chain_head)) + if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) return 0; + curr->curr_chain_key = chain_key; curr->lockdep_depth++; check_chain_key(curr); #ifdef CONFIG_DEBUG_LOCKDEP @@ -3199,3 +3199,19 @@ void debug_show_held_locks(struct task_struct *task) } EXPORT_SYMBOL_GPL(debug_show_held_locks); + +void lockdep_sys_exit(void) +{ + struct task_struct *curr = current; + + if (unlikely(curr->lockdep_depth)) { + if (!debug_locks_off()) + return; + printk("\n================================================\n"); + printk( "[ BUG: lock held when returning to user space! ]\n"); + printk( "------------------------------------------------\n"); + printk("%s/%d is leaving the kernel with locks still held!\n", + curr->comm, curr->pid); + lockdep_print_held_locks(curr); + } +} diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index c851b2d..8a135bd 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c @@ -25,28 +25,38 @@ static void *l_next(struct seq_file *m, void *v, loff_t *pos) { - struct lock_class *class = v; + struct lock_class *class; (*pos)++; - if (class->lock_entry.next != &all_lock_classes) - class = list_entry(class->lock_entry.next, struct lock_class, - lock_entry); - else - class = NULL; - m->private = class; + if (v == SEQ_START_TOKEN) + class = m->private; + else { + class = v; + + if (class->lock_entry.next != &all_lock_classes) + class = list_entry(class->lock_entry.next, + struct lock_class, lock_entry); + else + class = NULL; + } return class; } static void *l_start(struct seq_file *m, loff_t *pos) { - struct lock_class *class = m->private; + struct lock_class *class; + loff_t i = 0; - if (&class->lock_entry == all_lock_classes.next) - seq_printf(m, "all lock classes:\n"); + if (*pos == 0) + return SEQ_START_TOKEN; - return class; + list_for_each_entry(class, &all_lock_classes, lock_entry) { + if (++i == *pos) + return class; + } + return NULL; } static void l_stop(struct seq_file *m, void *v) @@ -101,10 +111,15 @@ static void print_name(struct seq_file *m, struct lock_class *class) static int l_show(struct seq_file *m, void *v) { unsigned long nr_forward_deps, nr_backward_deps; - struct lock_class *class = m->private; + struct lock_class *class = v; struct lock_list *entry; char c1, c2, c3, c4; + if (v == SEQ_START_TOKEN) { + seq_printf(m, "all lock classes:\n"); + return 0; + } + seq_printf(m, "%p", class->key); #ifdef CONFIG_DEBUG_LOCKDEP seq_printf(m, " OPS:%8ld", class->ops); @@ -523,10 +538,11 @@ static void *ls_start(struct seq_file *m, loff_t *pos) { struct lock_stat_seq *data = m->private; - if (data->iter == data->stats) - seq_header(m); + if (*pos == 0) + return SEQ_START_TOKEN; - if (data->iter == data->iter_end) + data->iter = data->stats + *pos; + if (data->iter >= data->iter_end) data->iter = NULL; return data->iter; @@ -538,8 +554,13 @@ static void *ls_next(struct seq_file *m, void *v, loff_t *pos) (*pos)++; - data->iter = v; - data->iter++; + if (v == SEQ_START_TOKEN) + data->iter = data->stats; + else { + data->iter = v; + data->iter++; + } + if (data->iter == data->iter_end) data->iter = NULL; @@ -552,9 +573,11 @@ static void ls_stop(struct seq_file *m, void *v) static int ls_show(struct seq_file *m, void *v) { - struct lock_stat_seq *data = m->private; + if (v == SEQ_START_TOKEN) + seq_header(m); + else + seq_stats(m, v); - seq_stats(m, data->iter); return 0; } diff --git a/kernel/mutex.c b/kernel/mutex.c index 691b865..d7fe50c 100644 --- a/kernel/mutex.c +++ b/kernel/mutex.c @@ -51,6 +51,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) EXPORT_SYMBOL(__mutex_init); +#ifndef CONFIG_DEBUG_LOCK_ALLOC /* * We split the mutex lock/unlock logic into separate fastpath and * slowpath functions, to reduce the register pressure on the fastpath. @@ -92,6 +93,7 @@ void inline fastcall __sched mutex_lock(struct mutex *lock) } EXPORT_SYMBOL(mutex_lock); +#endif static void fastcall noinline __sched __mutex_unlock_slowpath(atomic_t *lock_count); @@ -122,7 +124,8 @@ EXPORT_SYMBOL(mutex_unlock); * Lock a mutex (possibly interruptible), slowpath: */ static inline int __sched -__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) +__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, + unsigned long ip) { struct task_struct *task = current; struct mutex_waiter waiter; @@ -132,7 +135,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) spin_lock_mutex(&lock->wait_lock, flags); debug_mutex_lock_common(lock, &waiter); - mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); + mutex_acquire(&lock->dep_map, subclass, 0, ip); debug_mutex_add_waiter(lock, &waiter, task_thread_info(task)); /* add waiting tasks to the end of the waitqueue (FIFO): */ @@ -143,7 +146,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) if (old_val == 1) goto done; - lock_contended(&lock->dep_map, _RET_IP_); + lock_contended(&lock->dep_map, ip); for (;;) { /* @@ -166,7 +169,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass) if (unlikely(state == TASK_INTERRUPTIBLE && signal_pending(task))) { mutex_remove_waiter(lock, &waiter, task_thread_info(task)); - mutex_release(&lock->dep_map, 1, _RET_IP_); + mutex_release(&lock->dep_map, 1, ip); spin_unlock_mutex(&lock->wait_lock, flags); debug_mutex_free_waiter(&waiter); @@ -197,20 +200,12 @@ done: return 0; } -static void fastcall noinline __sched -__mutex_lock_slowpath(atomic_t *lock_count) -{ - struct mutex *lock = container_of(lock_count, struct mutex, count); - - __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0); -} - #ifdef CONFIG_DEBUG_LOCK_ALLOC void __sched mutex_lock_nested(struct mutex *lock, unsigned int subclass) { might_sleep(); - __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass); + __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, _RET_IP_); } EXPORT_SYMBOL_GPL(mutex_lock_nested); @@ -219,7 +214,7 @@ int __sched mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass) { might_sleep(); - return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass); + return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, subclass, _RET_IP_); } EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested); @@ -271,6 +266,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count) __mutex_unlock_common_slowpath(lock_count, 1); } +#ifndef CONFIG_DEBUG_LOCK_ALLOC /* * Here come the less common (and hence less performance-critical) APIs: * mutex_lock_interruptible() and mutex_trylock(). @@ -298,13 +294,22 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock) EXPORT_SYMBOL(mutex_lock_interruptible); +static void fastcall noinline __sched +__mutex_lock_slowpath(atomic_t *lock_count) +{ + struct mutex *lock = container_of(lock_count, struct mutex, count); + + __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); +} + static int fastcall noinline __sched __mutex_lock_interruptible_slowpath(atomic_t *lock_count) { struct mutex *lock = container_of(lock_count, struct mutex, count); - return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0); + return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, _RET_IP_); } +#endif /* * Spinlock based trylock, we take the spinlock and check whether we diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 2c2dd84..130214f 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c @@ -49,6 +49,14 @@ #include <linux/cpu.h> #include <linux/mutex.h> +#ifdef CONFIG_DEBUG_LOCK_ALLOC +static struct lock_class_key rcu_lock_key; +struct lockdep_map rcu_lock_map = + STATIC_LOCKDEP_MAP_INIT("rcu_read_lock", &rcu_lock_key); + +EXPORT_SYMBOL_GPL(rcu_lock_map); +#endif + /* Definition for rcupdate control block. */ static struct rcu_ctrlblk rcu_ctrlblk = { .cur = -300, |