Skip to content

Commit aa40c13

Browse files
committed
rcu: Report QS for outermost PREEMPT=n rcu_read_unlock() for strict GPs
The CONFIG_PREEMPT=n instance of rcu_read_unlock is even more aggressively than that of CONFIG_PREEMPT=y in deferring reporting quiescent states to the RCU core. This is just what is wanted in normal use because it reduces overhead, but the resulting delay is not what is wanted for kernels built with CONFIG_RCU_STRICT_GRACE_PERIOD=y. This commit therefore adds an rcu_read_unlock_strict() function that checks for exceptional conditions, and reports the newly started quiescent state if it is safe to do so, also doing a spin-delay if requested via rcutree.rcu_unlock_delay. This commit also adds a call to rcu_read_unlock_strict() from the CONFIG_PREEMPT=n instance of __rcu_read_unlock(). [ paulmck: Fixed bug located by kernel test robot <lkp@intel.com> ] Reported-by Jann Horn <jannh@google.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
1 parent a657f26 commit aa40c13

3 files changed

Lines changed: 31 additions & 6 deletions

File tree

include/linux/rcupdate.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -55,6 +55,12 @@ void __rcu_read_unlock(void);
5555

5656
#else /* #ifdef CONFIG_PREEMPT_RCU */
5757

58+
#ifdef CONFIG_TINY_RCU
59+
#define rcu_read_unlock_strict() do { } while (0)
60+
#else
61+
void rcu_read_unlock_strict(void);
62+
#endif
63+
5864
static inline void __rcu_read_lock(void)
5965
{
6066
preempt_disable();
@@ -63,6 +69,7 @@ static inline void __rcu_read_lock(void)
6369
static inline void __rcu_read_unlock(void)
6470
{
6571
preempt_enable();
72+
rcu_read_unlock_strict();
6673
}
6774

6875
static inline int rcu_preempt_depth(void)

kernel/rcu/tree.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -178,6 +178,12 @@ module_param(gp_init_delay, int, 0444);
178178
static int gp_cleanup_delay;
179179
module_param(gp_cleanup_delay, int, 0444);
180180

181+
// Add delay to rcu_read_unlock() for strict grace periods.
182+
static int rcu_unlock_delay;
183+
#ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
184+
module_param(rcu_unlock_delay, int, 0444);
185+
#endif
186+
181187
/*
182188
* This rcu parameter is runtime-read-only. It reflects
183189
* a minimum allowed number of objects which can be cached

kernel/rcu/tree_plugin.h

Lines changed: 18 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -430,12 +430,6 @@ static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
430430
return !list_empty(&rnp->blkd_tasks);
431431
}
432432

433-
// Add delay to rcu_read_unlock() for strict grace periods.
434-
static int rcu_unlock_delay;
435-
#ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
436-
module_param(rcu_unlock_delay, int, 0444);
437-
#endif
438-
439433
/*
440434
* Report deferred quiescent states. The deferral time can
441435
* be quite short, for example, in the case of the call from
@@ -784,6 +778,24 @@ dump_blkd_tasks(struct rcu_node *rnp, int ncheck)
784778

785779
#else /* #ifdef CONFIG_PREEMPT_RCU */
786780

781+
/*
782+
* If strict grace periods are enabled, and if the calling
783+
* __rcu_read_unlock() marks the beginning of a quiescent state, immediately
784+
* report that quiescent state and, if requested, spin for a bit.
785+
*/
786+
void rcu_read_unlock_strict(void)
787+
{
788+
struct rcu_data *rdp;
789+
790+
if (!IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ||
791+
irqs_disabled() || preempt_count() || !rcu_state.gp_kthread)
792+
return;
793+
rdp = this_cpu_ptr(&rcu_data);
794+
rcu_report_qs_rdp(rdp->cpu, rdp);
795+
udelay(rcu_unlock_delay);
796+
}
797+
EXPORT_SYMBOL_GPL(rcu_read_unlock_strict);
798+
787799
/*
788800
* Tell them what RCU they are running.
789801
*/

0 commit comments

Comments
 (0)