Skip to content

Commit baffd72

Browse files
Peter Zijlstraingomolnar
authored andcommitted
lockdep: Revert "lockdep: Use raw_cpu_*() for per-cpu variables"
The thinking in commit: fddf905 ("lockdep: Use raw_cpu_*() for per-cpu variables") is flawed. While it is true that when we're migratable both CPUs will have a 0 value, it doesn't hold that when we do get migrated in the middle of a raw_cpu_op(), the old CPU will still have 0 by the time we get around to reading it on the new CPU. Luckily, the reason for that commit (s390 using preempt_disable() instead of preempt_disable_notrace() in their percpu code), has since been fixed by commit: 1196f12 ("s390: don't trace preemption in percpu macros") An audit of arch/*/include/asm/percpu*.h shows there are no other architectures affected by this particular issue. Fixes: fddf905 ("lockdep: Use raw_cpu_*() for per-cpu variables") Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://lkml.kernel.org/r/20201005095958.GJ2651@hirez.programming.kicks-ass.net
1 parent 4d00409 commit baffd72

1 file changed

Lines changed: 9 additions & 17 deletions

File tree

include/linux/lockdep.h

Lines changed: 9 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -512,19 +512,19 @@ static inline void print_irqtrace_events(struct task_struct *curr)
512512
#define lock_map_release(l) lock_release(l, _THIS_IP_)
513513

514514
#ifdef CONFIG_PROVE_LOCKING
515-
# define might_lock(lock) \
515+
# define might_lock(lock) \
516516
do { \
517517
typecheck(struct lockdep_map *, &(lock)->dep_map); \
518518
lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
519519
lock_release(&(lock)->dep_map, _THIS_IP_); \
520520
} while (0)
521-
# define might_lock_read(lock) \
521+
# define might_lock_read(lock) \
522522
do { \
523523
typecheck(struct lockdep_map *, &(lock)->dep_map); \
524524
lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
525525
lock_release(&(lock)->dep_map, _THIS_IP_); \
526526
} while (0)
527-
# define might_lock_nested(lock, subclass) \
527+
# define might_lock_nested(lock, subclass) \
528528
do { \
529529
typecheck(struct lockdep_map *, &(lock)->dep_map); \
530530
lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \
@@ -536,45 +536,37 @@ DECLARE_PER_CPU(int, hardirqs_enabled);
536536
DECLARE_PER_CPU(int, hardirq_context);
537537
DECLARE_PER_CPU(unsigned int, lockdep_recursion);
538538

539-
/*
540-
* The below lockdep_assert_*() macros use raw_cpu_read() to access the above
541-
* per-cpu variables. This is required because this_cpu_read() will potentially
542-
* call into preempt/irq-disable and that obviously isn't right. This is also
543-
* correct because when IRQs are enabled, it doesn't matter if we accidentally
544-
* read the value from our previous CPU.
545-
*/
546-
547-
#define __lockdep_enabled (debug_locks && !raw_cpu_read(lockdep_recursion))
539+
#define __lockdep_enabled (debug_locks && !this_cpu_read(lockdep_recursion))
548540

549541
#define lockdep_assert_irqs_enabled() \
550542
do { \
551-
WARN_ON_ONCE(__lockdep_enabled && !raw_cpu_read(hardirqs_enabled)); \
543+
WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
552544
} while (0)
553545

554546
#define lockdep_assert_irqs_disabled() \
555547
do { \
556-
WARN_ON_ONCE(__lockdep_enabled && raw_cpu_read(hardirqs_enabled)); \
548+
WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
557549
} while (0)
558550

559551
#define lockdep_assert_in_irq() \
560552
do { \
561-
WARN_ON_ONCE(__lockdep_enabled && !raw_cpu_read(hardirq_context)); \
553+
WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirq_context)); \
562554
} while (0)
563555

564556
#define lockdep_assert_preemption_enabled() \
565557
do { \
566558
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
567559
__lockdep_enabled && \
568560
(preempt_count() != 0 || \
569-
!raw_cpu_read(hardirqs_enabled))); \
561+
!this_cpu_read(hardirqs_enabled))); \
570562
} while (0)
571563

572564
#define lockdep_assert_preemption_disabled() \
573565
do { \
574566
WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT) && \
575567
__lockdep_enabled && \
576568
(preempt_count() == 0 && \
577-
raw_cpu_read(hardirqs_enabled))); \
569+
this_cpu_read(hardirqs_enabled))); \
578570
} while (0)
579571

580572
#else

0 commit comments

Comments
 (0)