Skip to content

Commit c51f8f8

Browse files
George Spelvinwtarreau
authored andcommitted
random32: make prandom_u32() output unpredictable
Non-cryptographic PRNGs may have great statistical properties, but are usually trivially predictable to someone who knows the algorithm, given a small sample of their output. An LFSR like prandom_u32() is particularly simple, even if the sample is widely scattered bits. It turns out the network stack uses prandom_u32() for some things like random port numbers which it would prefer are *not* trivially predictable. Predictability led to a practical DNS spoofing attack. Oops. This patch replaces the LFSR with a homebrew cryptographic PRNG based on the SipHash round function, which is in turn seeded with 128 bits of strong random key. (The authors of SipHash have *not* been consulted about this abuse of their algorithm.) Speed is prioritized over security; attacks are rare, while performance is always wanted. Replacing all callers of prandom_u32() is the quick fix. Whether to reinstate a weaker PRNG for uses which can tolerate it is an open question. Commit f227e3e ("random32: update the net random state on interrupt and activity") was an earlier attempt at a solution. This patch replaces it. Reported-by: Amit Klein <aksecurity@gmail.com> Cc: Willy Tarreau <w@1wt.eu> Cc: Eric Dumazet <edumazet@google.com> Cc: "Jason A. Donenfeld" <Jason@zx2c4.com> Cc: Andy Lutomirski <luto@kernel.org> Cc: Kees Cook <keescook@chromium.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: tytso@mit.edu Cc: Florian Westphal <fw@strlen.de> Cc: Marc Plumb <lkml.mplumb@gmail.com> Fixes: f227e3e ("random32: update the net random state on interrupt and activity") Signed-off-by: George Spelvin <lkml@sdf.org> Link: https://lore.kernel.org/netdev/20200808152628.GA27941@SDF.ORG/ [ willy: partial reversal of f227e3e; moved SIPROUND definitions to prandom.h for later use; merged George's prandom_seed() proposal; inlined siprand_u32(); replaced the net_rand_state[] array with 4 members to fix a build issue; cosmetic cleanups to make checkpatch happy; fixed RANDOM32_SELFTEST build ] Signed-off-by: Willy Tarreau <w@1wt.eu>
1 parent f11901e commit c51f8f8

4 files changed

Lines changed: 318 additions & 190 deletions

File tree

drivers/char/random.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1277,7 +1277,6 @@ void add_interrupt_randomness(int irq, int irq_flags)
12771277

12781278
fast_mix(fast_pool);
12791279
add_interrupt_bench(cycles);
1280-
this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);
12811280

12821281
if (unlikely(crng_init == 0)) {
12831282
if ((fast_pool->count >= 64) &&

include/linux/prandom.h

Lines changed: 34 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,44 @@ void prandom_bytes(void *buf, size_t nbytes);
1616
void prandom_seed(u32 seed);
1717
void prandom_reseed_late(void);
1818

19+
#if BITS_PER_LONG == 64
20+
/*
21+
* The core SipHash round function. Each line can be executed in
22+
* parallel given enough CPU resources.
23+
*/
24+
#define PRND_SIPROUND(v0, v1, v2, v3) ( \
25+
v0 += v1, v1 = rol64(v1, 13), v2 += v3, v3 = rol64(v3, 16), \
26+
v1 ^= v0, v0 = rol64(v0, 32), v3 ^= v2, \
27+
v0 += v3, v3 = rol64(v3, 21), v2 += v1, v1 = rol64(v1, 17), \
28+
v3 ^= v0, v1 ^= v2, v2 = rol64(v2, 32) \
29+
)
30+
31+
#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261)
32+
#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573)
33+
34+
#elif BITS_PER_LONG == 32
35+
/*
36+
* On 32-bit machines, we use HSipHash, a reduced-width version of SipHash.
37+
* This is weaker, but 32-bit machines are not used for high-traffic
38+
* applications, so there is less output for an attacker to analyze.
39+
*/
40+
#define PRND_SIPROUND(v0, v1, v2, v3) ( \
41+
v0 += v1, v1 = rol32(v1, 5), v2 += v3, v3 = rol32(v3, 8), \
42+
v1 ^= v0, v0 = rol32(v0, 16), v3 ^= v2, \
43+
v0 += v3, v3 = rol32(v3, 7), v2 += v1, v1 = rol32(v1, 13), \
44+
v3 ^= v0, v1 ^= v2, v2 = rol32(v2, 16) \
45+
)
46+
#define PRND_K0 0x6c796765
47+
#define PRND_K1 0x74656462
48+
49+
#else
50+
#error Unsupported BITS_PER_LONG
51+
#endif
52+
1953
struct rnd_state {
2054
__u32 s1, s2, s3, s4;
2155
};
2256

23-
DECLARE_PER_CPU(struct rnd_state, net_rand_state);
24-
2557
u32 prandom_u32_state(struct rnd_state *state);
2658
void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
2759
void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);

kernel/time/timer.c

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1717,13 +1717,6 @@ void update_process_times(int user_tick)
17171717
scheduler_tick();
17181718
if (IS_ENABLED(CONFIG_POSIX_TIMERS))
17191719
run_posix_cpu_timers();
1720-
1721-
/* The current CPU might make use of net randoms without receiving IRQs
1722-
* to renew them often enough. Let's update the net_rand_state from a
1723-
* non-constant value that's not affine to the number of calls to make
1724-
* sure it's updated when there's some activity (we don't care in idle).
1725-
*/
1726-
this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick);
17271720
}
17281721

17291722
/**

0 commit comments

Comments
 (0)