Skip to content

Commit cd290ec

Browse files
melverpaulmckrcu
authored andcommitted
kcsan: Use tracing-safe version of prandom
In the core runtime, we must minimize any calls to external library functions to avoid any kind of recursion. This can happen even though instrumentation is disabled for called functions, but tracing is enabled. Most recently, prandom_u32() added a tracepoint, which can cause problems for KCSAN even if the rcuidle variant is used. For example: kcsan -> prandom_u32() -> trace_prandom_u32_rcuidle -> srcu_read_lock_notrace -> __srcu_read_lock -> kcsan ... While we could disable KCSAN in kcsan_setup_watchpoint(), this does not solve other unexpected behaviour we may get due recursing into functions that may not be tolerant to such recursion: __srcu_read_lock -> kcsan -> ... -> __srcu_read_lock Therefore, switch to using prandom_u32_state(), which is uninstrumented, and does not have a tracepoint. Link: https://lkml.kernel.org/r/20200821063043.1949509-1-elver@google.com Link: https://lkml.kernel.org/r/20200820172046.GA177701@elver.google.com Signed-off-by: Marco Elver <elver@google.com> Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
1 parent 068df05 commit cd290ec

1 file changed

Lines changed: 29 additions & 6 deletions

File tree

kernel/kcsan/core.c

Lines changed: 29 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -100,6 +100,9 @@ static atomic_long_t watchpoints[CONFIG_KCSAN_NUM_WATCHPOINTS + NUM_SLOTS-1];
100100
*/
101101
static DEFINE_PER_CPU(long, kcsan_skip);
102102

103+
/* For kcsan_prandom_u32_max(). */
104+
static DEFINE_PER_CPU(struct rnd_state, kcsan_rand_state);
105+
103106
static __always_inline atomic_long_t *find_watchpoint(unsigned long addr,
104107
size_t size,
105108
bool expect_write,
@@ -271,11 +274,28 @@ should_watch(const volatile void *ptr, size_t size, int type, struct kcsan_ctx *
271274
return true;
272275
}
273276

277+
/*
278+
* Returns a pseudo-random number in interval [0, ep_ro). See prandom_u32_max()
279+
* for more details.
280+
*
281+
* The open-coded version here is using only safe primitives for all contexts
282+
* where we can have KCSAN instrumentation. In particular, we cannot use
283+
* prandom_u32() directly, as its tracepoint could cause recursion.
284+
*/
285+
static u32 kcsan_prandom_u32_max(u32 ep_ro)
286+
{
287+
struct rnd_state *state = &get_cpu_var(kcsan_rand_state);
288+
const u32 res = prandom_u32_state(state);
289+
290+
put_cpu_var(kcsan_rand_state);
291+
return (u32)(((u64) res * ep_ro) >> 32);
292+
}
293+
274294
static inline void reset_kcsan_skip(void)
275295
{
276296
long skip_count = kcsan_skip_watch -
277297
(IS_ENABLED(CONFIG_KCSAN_SKIP_WATCH_RANDOMIZE) ?
278-
prandom_u32_max(kcsan_skip_watch) :
298+
kcsan_prandom_u32_max(kcsan_skip_watch) :
279299
0);
280300
this_cpu_write(kcsan_skip, skip_count);
281301
}
@@ -285,16 +305,18 @@ static __always_inline bool kcsan_is_enabled(void)
285305
return READ_ONCE(kcsan_enabled) && get_ctx()->disable_count == 0;
286306
}
287307

288-
static inline unsigned int get_delay(int type)
308+
/* Introduce delay depending on context and configuration. */
309+
static void delay_access(int type)
289310
{
290311
unsigned int delay = in_task() ? kcsan_udelay_task : kcsan_udelay_interrupt;
291312
/* For certain access types, skew the random delay to be longer. */
292313
unsigned int skew_delay_order =
293314
(type & (KCSAN_ACCESS_COMPOUND | KCSAN_ACCESS_ASSERT)) ? 1 : 0;
294315

295-
return delay - (IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
296-
prandom_u32_max(delay >> skew_delay_order) :
297-
0);
316+
delay -= IS_ENABLED(CONFIG_KCSAN_DELAY_RANDOMIZE) ?
317+
kcsan_prandom_u32_max(delay >> skew_delay_order) :
318+
0;
319+
udelay(delay);
298320
}
299321

300322
void kcsan_save_irqtrace(struct task_struct *task)
@@ -476,7 +498,7 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
476498
* Delay this thread, to increase probability of observing a racy
477499
* conflicting access.
478500
*/
479-
udelay(get_delay(type));
501+
delay_access(type);
480502

481503
/*
482504
* Re-read value, and check if it is as expected; if not, we infer a
@@ -620,6 +642,7 @@ void __init kcsan_init(void)
620642
BUG_ON(!in_task());
621643

622644
kcsan_debugfs_init();
645+
prandom_seed_full_state(&kcsan_rand_state);
623646

624647
/*
625648
* We are in the init task, and no other tasks should be running;

0 commit comments

Comments
 (0)