Skip to content

Commit 91f28da

Browse files
committed
Merge tag '20201024-v4-5.10' of git://git.kernel.org/pub/scm/linux/kernel/git/wtarreau/prandom
Pull random32 updates from Willy Tarreau: "Make prandom_u32() less predictable. This is the cleanup of the latest series of prandom_u32 experimentations consisting in using SipHash instead of Tausworthe to produce the randoms used by the network stack. The changes to the files were kept minimal, and the controversial commit that used to take noise from the fast_pool (f227e3e) was reverted. Instead, a dedicated "net_rand_noise" per_cpu variable is fed from various sources of activities (networking, scheduling) to perturb the SipHash state using fast, non-trivially predictable data, instead of keeping it fully deterministic. The goal is essentially to make any occasional memory leakage or brute-force attempt useless. The resulting code was verified to be very slightly faster on x86_64 than what is was with the controversial commit above, though this remains barely above measurement noise. It was also tested on i386 and arm, and build- tested only on arm64" Link: https://lore.kernel.org/netdev/20200808152628.GA27941@SDF.ORG/ * tag '20201024-v4-5.10' of git://git.kernel.org/pub/scm/linux/kernel/git/wtarreau/prandom: random32: add a selftest for the prandom32 code random32: add noise from network and scheduling activity random32: make prandom_u32() output unpredictable
2 parents d769139 + c6e169b commit 91f28da

5 files changed

Lines changed: 404 additions & 190 deletions

File tree

drivers/char/random.c

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1277,7 +1277,6 @@ void add_interrupt_randomness(int irq, int irq_flags)
12771277

12781278
fast_mix(fast_pool);
12791279
add_interrupt_bench(cycles);
1280-
this_cpu_add(net_rand_state.s1, fast_pool->pool[cycles & 3]);
12811280

12821281
if (unlikely(crng_init == 0)) {
12831282
if ((fast_pool->count >= 64) &&

include/linux/prandom.h

Lines changed: 53 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,62 @@ void prandom_bytes(void *buf, size_t nbytes);
1616
void prandom_seed(u32 seed);
1717
void prandom_reseed_late(void);
1818

19+
DECLARE_PER_CPU(unsigned long, net_rand_noise);
20+
21+
#define PRANDOM_ADD_NOISE(a, b, c, d) \
22+
prandom_u32_add_noise((unsigned long)(a), (unsigned long)(b), \
23+
(unsigned long)(c), (unsigned long)(d))
24+
25+
#if BITS_PER_LONG == 64
26+
/*
27+
* The core SipHash round function. Each line can be executed in
28+
* parallel given enough CPU resources.
29+
*/
30+
#define PRND_SIPROUND(v0, v1, v2, v3) ( \
31+
v0 += v1, v1 = rol64(v1, 13), v2 += v3, v3 = rol64(v3, 16), \
32+
v1 ^= v0, v0 = rol64(v0, 32), v3 ^= v2, \
33+
v0 += v3, v3 = rol64(v3, 21), v2 += v1, v1 = rol64(v1, 17), \
34+
v3 ^= v0, v1 ^= v2, v2 = rol64(v2, 32) \
35+
)
36+
37+
#define PRND_K0 (0x736f6d6570736575 ^ 0x6c7967656e657261)
38+
#define PRND_K1 (0x646f72616e646f6d ^ 0x7465646279746573)
39+
40+
#elif BITS_PER_LONG == 32
41+
/*
42+
* On 32-bit machines, we use HSipHash, a reduced-width version of SipHash.
43+
* This is weaker, but 32-bit machines are not used for high-traffic
44+
* applications, so there is less output for an attacker to analyze.
45+
*/
46+
#define PRND_SIPROUND(v0, v1, v2, v3) ( \
47+
v0 += v1, v1 = rol32(v1, 5), v2 += v3, v3 = rol32(v3, 8), \
48+
v1 ^= v0, v0 = rol32(v0, 16), v3 ^= v2, \
49+
v0 += v3, v3 = rol32(v3, 7), v2 += v1, v1 = rol32(v1, 13), \
50+
v3 ^= v0, v1 ^= v2, v2 = rol32(v2, 16) \
51+
)
52+
#define PRND_K0 0x6c796765
53+
#define PRND_K1 0x74656462
54+
55+
#else
56+
#error Unsupported BITS_PER_LONG
57+
#endif
58+
59+
static inline void prandom_u32_add_noise(unsigned long a, unsigned long b,
60+
unsigned long c, unsigned long d)
61+
{
62+
/*
63+
* This is not used cryptographically; it's just
64+
* a convenient 4-word hash function. (3 xor, 2 add, 2 rol)
65+
*/
66+
a ^= raw_cpu_read(net_rand_noise);
67+
PRND_SIPROUND(a, b, c, d);
68+
raw_cpu_write(net_rand_noise, d);
69+
}
70+
1971
struct rnd_state {
2072
__u32 s1, s2, s3, s4;
2173
};
2274

23-
DECLARE_PER_CPU(struct rnd_state, net_rand_state);
24-
2575
u32 prandom_u32_state(struct rnd_state *state);
2676
void prandom_bytes_state(struct rnd_state *state, void *buf, size_t nbytes);
2777
void prandom_seed_full_state(struct rnd_state __percpu *pcpu_state);
@@ -67,6 +117,7 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
67117
state->s2 = __seed(i, 8U);
68118
state->s3 = __seed(i, 16U);
69119
state->s4 = __seed(i, 128U);
120+
PRANDOM_ADD_NOISE(state, i, 0, 0);
70121
}
71122

72123
/* Pseudo random number generator from numerical recipes. */

kernel/time/timer.c

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1706,6 +1706,8 @@ void update_process_times(int user_tick)
17061706
{
17071707
struct task_struct *p = current;
17081708

1709+
PRANDOM_ADD_NOISE(jiffies, user_tick, p, 0);
1710+
17091711
/* Note: this timer irq context must be accounted for as well. */
17101712
account_process_tick(p, user_tick);
17111713
run_local_timers();
@@ -1717,13 +1719,6 @@ void update_process_times(int user_tick)
17171719
scheduler_tick();
17181720
if (IS_ENABLED(CONFIG_POSIX_TIMERS))
17191721
run_posix_cpu_timers();
1720-
1721-
/* The current CPU might make use of net randoms without receiving IRQs
1722-
* to renew them often enough. Let's update the net_rand_state from a
1723-
* non-constant value that's not affine to the number of calls to make
1724-
* sure it's updated when there's some activity (we don't care in idle).
1725-
*/
1726-
this_cpu_add(net_rand_state.s1, rol32(jiffies, 24) + user_tick);
17271722
}
17281723

17291724
/**

0 commit comments

Comments
 (0)