@@ -165,6 +165,12 @@ module_param(gp_init_delay, int, 0444);
165165static int gp_cleanup_delay ;
166166module_param (gp_cleanup_delay , int , 0444 );
167167
168+ // Add delay to rcu_read_unlock() for strict grace periods.
169+ static int rcu_unlock_delay ;
170+ #ifdef CONFIG_RCU_STRICT_GRACE_PERIOD
171+ module_param (rcu_unlock_delay , int , 0444 );
172+ #endif
173+
168174/*
169175 * This rcu parameter is runtime-read-only. It reflects
170176 * a minimum allowed number of objects which can be cached
@@ -455,24 +461,25 @@ static int rcu_is_cpu_rrupt_from_idle(void)
455461 return __this_cpu_read (rcu_data .dynticks_nesting ) == 0 ;
456462}
457463
458- #define DEFAULT_RCU_BLIMIT 10 /* Maximum callbacks per rcu_do_batch ... */
459- #define DEFAULT_MAX_RCU_BLIMIT 10000 /* ... even during callback flood. */
464+ #define DEFAULT_RCU_BLIMIT (IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ? 1000 : 10)
465+ // Maximum callbacks per rcu_do_batch ...
466+ #define DEFAULT_MAX_RCU_BLIMIT 10000 // ... even during callback flood.
460467static long blimit = DEFAULT_RCU_BLIMIT ;
461- #define DEFAULT_RCU_QHIMARK 10000 /* If this many pending, ignore blimit. */
468+ #define DEFAULT_RCU_QHIMARK 10000 // If this many pending, ignore blimit.
462469static long qhimark = DEFAULT_RCU_QHIMARK ;
463- #define DEFAULT_RCU_QLOMARK 100 /* Once only this many pending, use blimit. */
470+ #define DEFAULT_RCU_QLOMARK 100 // Once only this many pending, use blimit.
464471static long qlowmark = DEFAULT_RCU_QLOMARK ;
465472#define DEFAULT_RCU_QOVLD_MULT 2
466473#define DEFAULT_RCU_QOVLD (DEFAULT_RCU_QOVLD_MULT * DEFAULT_RCU_QHIMARK)
467- static long qovld = DEFAULT_RCU_QOVLD ; /* If this many pending, hammer QS. */
468- static long qovld_calc = -1 ; /* No pre-initialization lock acquisitions! */
474+ static long qovld = DEFAULT_RCU_QOVLD ; // If this many pending, hammer QS.
475+ static long qovld_calc = -1 ; // No pre-initialization lock acquisitions!
469476
470477module_param (blimit , long , 0444 );
471478module_param (qhimark , long , 0444 );
472479module_param (qlowmark , long , 0444 );
473480module_param (qovld , long , 0444 );
474481
475- static ulong jiffies_till_first_fqs = ULONG_MAX ;
482+ static ulong jiffies_till_first_fqs = IS_ENABLED ( CONFIG_RCU_STRICT_GRACE_PERIOD ) ? 0 : ULONG_MAX ;
476483static ulong jiffies_till_next_fqs = ULONG_MAX ;
477484static bool rcu_kick_kthreads ;
478485static int rcu_divisor = 7 ;
@@ -1571,6 +1578,19 @@ static void __maybe_unused rcu_advance_cbs_nowake(struct rcu_node *rnp,
15711578 raw_spin_unlock_rcu_node (rnp );
15721579}
15731580
1581+ /*
1582+ * In CONFIG_RCU_STRICT_GRACE_PERIOD=y kernels, attempt to generate a
1583+ * quiescent state. This is intended to be invoked when the CPU notices
1584+ * a new grace period.
1585+ */
1586+ static void rcu_strict_gp_check_qs (void )
1587+ {
1588+ if (IS_ENABLED (CONFIG_RCU_STRICT_GRACE_PERIOD )) {
1589+ rcu_read_lock ();
1590+ rcu_read_unlock ();
1591+ }
1592+ }
1593+
15741594/*
15751595 * Update CPU-local rcu_data state to record the beginnings and ends of
15761596 * grace periods. The caller must hold the ->lock of the leaf rcu_node
@@ -1641,6 +1661,7 @@ static void note_gp_changes(struct rcu_data *rdp)
16411661 }
16421662 needwake = __note_gp_changes (rnp , rdp );
16431663 raw_spin_unlock_irqrestore_rcu_node (rnp , flags );
1664+ rcu_strict_gp_check_qs ();
16441665 if (needwake )
16451666 rcu_gp_kthread_wake ();
16461667}
@@ -1678,6 +1699,15 @@ static void rcu_gp_torture_wait(void)
16781699 }
16791700}
16801701
1702+ /*
1703+ * Handler for on_each_cpu() to invoke the target CPU's RCU core
1704+ * processing.
1705+ */
1706+ static void rcu_strict_gp_boundary (void * unused )
1707+ {
1708+ invoke_rcu_core ();
1709+ }
1710+
16811711/*
16821712 * Initialize a new grace period. Return false if no grace period required.
16831713 */
@@ -1809,6 +1839,10 @@ static bool rcu_gp_init(void)
18091839 WRITE_ONCE (rcu_state .gp_activity , jiffies );
18101840 }
18111841
1842+ // If strict, make all CPUs aware of new grace period.
1843+ if (IS_ENABLED (CONFIG_RCU_STRICT_GRACE_PERIOD ))
1844+ on_each_cpu (rcu_strict_gp_boundary , NULL , 0 );
1845+
18121846 return true;
18131847}
18141848
@@ -2025,6 +2059,10 @@ static void rcu_gp_cleanup(void)
20252059 rcu_state .gp_flags & RCU_GP_FLAG_INIT );
20262060 }
20272061 raw_spin_unlock_irq_rcu_node (rnp );
2062+
2063+ // If strict, make all CPUs aware of the end of the old grace period.
2064+ if (IS_ENABLED (CONFIG_RCU_STRICT_GRACE_PERIOD ))
2065+ on_each_cpu (rcu_strict_gp_boundary , NULL , 0 );
20282066}
20292067
20302068/*
@@ -2203,7 +2241,7 @@ rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
22032241 * structure. This must be called from the specified CPU.
22042242 */
22052243static void
2206- rcu_report_qs_rdp (int cpu , struct rcu_data * rdp )
2244+ rcu_report_qs_rdp (struct rcu_data * rdp )
22072245{
22082246 unsigned long flags ;
22092247 unsigned long mask ;
@@ -2212,6 +2250,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
22122250 rcu_segcblist_is_offloaded (& rdp -> cblist );
22132251 struct rcu_node * rnp ;
22142252
2253+ WARN_ON_ONCE (rdp -> cpu != smp_processor_id ());
22152254 rnp = rdp -> mynode ;
22162255 raw_spin_lock_irqsave_rcu_node (rnp , flags );
22172256 if (rdp -> cpu_no_qs .b .norm || rdp -> gp_seq != rnp -> gp_seq ||
@@ -2228,8 +2267,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_data *rdp)
22282267 return ;
22292268 }
22302269 mask = rdp -> grpmask ;
2231- if (rdp -> cpu == smp_processor_id ())
2232- rdp -> core_needs_qs = false;
2270+ rdp -> core_needs_qs = false;
22332271 if ((rnp -> qsmask & mask ) == 0 ) {
22342272 raw_spin_unlock_irqrestore_rcu_node (rnp , flags );
22352273 } else {
@@ -2278,7 +2316,7 @@ rcu_check_quiescent_state(struct rcu_data *rdp)
22782316 * Tell RCU we are done (but rcu_report_qs_rdp() will be the
22792317 * judge of that).
22802318 */
2281- rcu_report_qs_rdp (rdp -> cpu , rdp );
2319+ rcu_report_qs_rdp (rdp );
22822320}
22832321
22842322/*
@@ -2621,6 +2659,14 @@ void rcu_force_quiescent_state(void)
26212659}
26222660EXPORT_SYMBOL_GPL (rcu_force_quiescent_state );
26232661
2662+ // Workqueue handler for an RCU reader for kernels enforcing struct RCU
2663+ // grace periods.
2664+ static void strict_work_handler (struct work_struct * work )
2665+ {
2666+ rcu_read_lock ();
2667+ rcu_read_unlock ();
2668+ }
2669+
26242670/* Perform RCU core processing work for the current CPU. */
26252671static __latent_entropy void rcu_core (void )
26262672{
@@ -2665,6 +2711,10 @@ static __latent_entropy void rcu_core(void)
26652711 /* Do any needed deferred wakeups of rcuo kthreads. */
26662712 do_nocb_deferred_wakeup (rdp );
26672713 trace_rcu_utilization (TPS ("End RCU core" ));
2714+
2715+ // If strict GPs, schedule an RCU reader in a clean environment.
2716+ if (IS_ENABLED (CONFIG_RCU_STRICT_GRACE_PERIOD ))
2717+ queue_work_on (rdp -> cpu , rcu_gp_wq , & rdp -> strict_work );
26682718}
26692719
26702720static void rcu_core_si (struct softirq_action * h )
@@ -3862,6 +3912,7 @@ rcu_boot_init_percpu_data(int cpu)
38623912
38633913 /* Set up local state, ensuring consistent view of global state. */
38643914 rdp -> grpmask = leaf_node_cpu_bit (rdp -> mynode , cpu );
3915+ INIT_WORK (& rdp -> strict_work , strict_work_handler );
38653916 WARN_ON_ONCE (rdp -> dynticks_nesting != 1 );
38663917 WARN_ON_ONCE (rcu_dynticks_in_eqs (rcu_dynticks_snap (rdp )));
38673918 rdp -> rcu_ofl_gp_seq = rcu_state .gp_seq ;
0 commit comments