aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/lockdep.c7
-rw-r--r--kernel/lockdep_internals.h10
2 files changed, 8 insertions, 9 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 78325f8..1b58a1b 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2298,7 +2298,12 @@ void trace_hardirqs_on_caller(unsigned long ip)
return;
if (unlikely(curr->hardirqs_enabled)) {
- debug_atomic_inc(redundant_hardirqs_on);
+ /*
+ * Neither irq nor preemption are disabled here
+ * so this is racy by nature but loosing one hit
+ * in a stat is not a big deal.
+ */
+ this_cpu_inc(lockdep_stats.redundant_hardirqs_on);
return;
}
/* we'll do an OFF -> ON transition: */
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 8d7d4b6..2b17476 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -140,19 +140,13 @@ struct lockdep_stats {
DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
#define debug_atomic_inc(ptr) { \
- struct lockdep_stats *__cpu_lockdep_stats; \
- \
WARN_ON_ONCE(!irqs_disabled()); \
- __cpu_lockdep_stats = &__get_cpu_var(lockdep_stats); \
- __cpu_lockdep_stats->ptr++; \
+ this_cpu_inc(lockdep_stats.ptr); \
}
#define debug_atomic_dec(ptr) { \
- struct lockdep_stats *__cpu_lockdep_stats; \
- \
WARN_ON_ONCE(!irqs_disabled()); \
- __cpu_lockdep_stats = &__get_cpu_var(lockdep_stats); \
- __cpu_lockdep_stats->ptr--; \
+ this_cpu_inc(lockdep_stats.ptr); \
}
#define debug_atomic_read(ptr) ({ \