Skip to content

Commit e6b1a44

Browse files
Hou TaoPeter Zijlstra
authored andcommitted
locking/percpu-rwsem: Use this_cpu_{inc,dec}() for read_count
The __this_cpu*() accessors are (in general) IRQ-unsafe which, given that percpu-rwsem is a blocking primitive, should be just fine. However, file_end_write() is used from IRQ context and will cause load-store issues on architectures where the per-cpu accessors are not natively irq-safe. Fix it by using the IRQ-safe this_cpu_*() for operations on read_count. This will generate more expensive code on a number of platforms, which might cause a performance regression for some of the other percpu-rwsem users. If any such is reported, we can consider alternative solutions. Fixes: 70fe2f4 ("aio: fix freeze protection of aio writes") Signed-off-by: Hou Tao <houtao1@huawei.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Acked-by: Will Deacon <will@kernel.org> Acked-by: Oleg Nesterov <oleg@redhat.com> Link: https://lkml.kernel.org/r/20200915140750.137881-1-houtao1@huawei.com
1 parent 23870f1 commit e6b1a44

2 files changed

Lines changed: 6 additions & 6 deletions

File tree

include/linux/percpu-rwsem.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
6060
* anything we did within this RCU-sched read-size critical section.
6161
*/
6262
if (likely(rcu_sync_is_idle(&sem->rss)))
63-
__this_cpu_inc(*sem->read_count);
63+
this_cpu_inc(*sem->read_count);
6464
else
6565
__percpu_down_read(sem, false); /* Unconditional memory barrier */
6666
/*
@@ -79,7 +79,7 @@ static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
7979
* Same as in percpu_down_read().
8080
*/
8181
if (likely(rcu_sync_is_idle(&sem->rss)))
82-
__this_cpu_inc(*sem->read_count);
82+
this_cpu_inc(*sem->read_count);
8383
else
8484
ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
8585
preempt_enable();
@@ -103,7 +103,7 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
103103
* Same as in percpu_down_read().
104104
*/
105105
if (likely(rcu_sync_is_idle(&sem->rss))) {
106-
__this_cpu_dec(*sem->read_count);
106+
this_cpu_dec(*sem->read_count);
107107
} else {
108108
/*
109109
* slowpath; reader will only ever wake a single blocked
@@ -115,7 +115,7 @@ static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
115115
* aggregate zero, as that is the only time it matters) they
116116
* will also see our critical section.
117117
*/
118-
__this_cpu_dec(*sem->read_count);
118+
this_cpu_dec(*sem->read_count);
119119
rcuwait_wake_up(&sem->writer);
120120
}
121121
preempt_enable();

kernel/locking/percpu-rwsem.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ EXPORT_SYMBOL_GPL(percpu_free_rwsem);
4545

4646
static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
4747
{
48-
__this_cpu_inc(*sem->read_count);
48+
this_cpu_inc(*sem->read_count);
4949

5050
/*
5151
* Due to having preemption disabled the decrement happens on
@@ -71,7 +71,7 @@ static bool __percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
7171
if (likely(!atomic_read_acquire(&sem->block)))
7272
return true;
7373

74-
__this_cpu_dec(*sem->read_count);
74+
this_cpu_dec(*sem->read_count);
7575

7676
/* Prod writer to re-evaluate readers_active_check() */
7777
rcuwait_wake_up(&sem->writer);

0 commit comments

Comments
 (0)