Skip to content

Commit b93e21a

Browse files
committed
scftorture: Add smp_call_function_single() memory-ordering checks
This commit adds checks for memory misordering across calls to smp_call_function_single() and also across returns in the case where the caller waits. Misordering results in a splat. [ paulmck: s/GFP_KERNEL/GFP_ATOMIC/ per kernel test robot feedback. ] Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
1 parent dba3142 commit b93e21a

1 file changed

Lines changed: 48 additions & 8 deletions

File tree

kernel/scftorture.c

Lines changed: 48 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -117,9 +117,20 @@ static struct scf_selector scf_sel_array[SCF_NPRIMS];
117117
static int scf_sel_array_len;
118118
static unsigned long scf_sel_totweight;
119119

120+
// Communicate between caller and handler.
121+
struct scf_check {
122+
bool scfc_in;
123+
bool scfc_out;
124+
int scfc_cpu; // -1 for not _single().
125+
bool scfc_wait;
126+
};
127+
120128
// Use to wait for all threads to start.
121129
static atomic_t n_started;
122130
static atomic_t n_errs;
131+
static atomic_t n_mb_in_errs;
132+
static atomic_t n_mb_out_errs;
133+
static atomic_t n_alloc_errs;
123134
static bool scfdone;
124135

125136
DEFINE_TORTURE_RANDOM_PERCPU(scf_torture_rand);
@@ -222,24 +233,27 @@ static struct scf_selector *scf_sel_rand(struct torture_random_state *trsp)
222233
// Update statistics and occasionally burn up mass quantities of CPU time,
223234
// if told to do so via scftorture.longwait. Otherwise, occasionally burn
224235
// a little bit.
225-
static void scf_handler(void *unused)
236+
static void scf_handler(void *scfc_in)
226237
{
227238
int i;
228239
int j;
229240
unsigned long r = torture_random(this_cpu_ptr(&scf_torture_rand));
241+
struct scf_check *scfcp = scfc_in;
230242

243+
if (likely(scfcp) && WARN_ON_ONCE(unlikely(!READ_ONCE(scfcp->scfc_in))))
244+
atomic_inc(&n_mb_in_errs);
231245
this_cpu_inc(scf_invoked_count);
232246
if (longwait <= 0) {
233247
if (!(r & 0xffc0))
234248
udelay(r & 0x3f);
235-
return;
249+
goto out;
236250
}
237251
if (r & 0xfff)
238-
return;
252+
goto out;
239253
r = (r >> 12);
240254
if (longwait <= 0) {
241255
udelay((r & 0xff) + 1);
242-
return;
256+
goto out;
243257
}
244258
r = r % longwait + 1;
245259
for (i = 0; i < r; i++) {
@@ -248,21 +262,32 @@ static void scf_handler(void *unused)
248262
cpu_relax();
249263
}
250264
}
265+
out:
266+
if (unlikely(!scfcp))
267+
return;
268+
if (scfcp->scfc_wait)
269+
WRITE_ONCE(scfcp->scfc_out, true);
270+
else
271+
kfree(scfcp);
251272
}
252273

253274
// As above, but check for correct CPU.
254-
static void scf_handler_1(void *me)
275+
static void scf_handler_1(void *scfc_in)
255276
{
256-
if (WARN_ON_ONCE(smp_processor_id() != (uintptr_t)me))
277+
struct scf_check *scfcp = scfc_in;
278+
279+
if (likely(scfcp) && WARN_ONCE(smp_processor_id() != scfcp->scfc_cpu, "%s: Wanted CPU %d got CPU %d\n", __func__, scfcp->scfc_cpu, smp_processor_id())) {
257280
atomic_inc(&n_errs);
258-
scf_handler(NULL);
281+
}
282+
scf_handler(scfcp);
259283
}
260284

261285
// Randomly do an smp_call_function*() invocation.
262286
static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_random_state *trsp)
263287
{
264288
uintptr_t cpu;
265289
int ret;
290+
struct scf_check *scfcp = NULL;
266291
struct scf_selector *scfsp = scf_sel_rand(trsp);
267292

268293
if (use_cpus_read_lock)
@@ -271,17 +296,32 @@ static void scftorture_invoke_one(struct scf_statistics *scfp, struct torture_ra
271296
preempt_disable();
272297
switch (scfsp->scfs_prim) {
273298
case SCF_PRIM_SINGLE:
299+
scfcp = kmalloc(sizeof(*scfcp), GFP_ATOMIC);
300+
if (WARN_ON_ONCE(!scfcp))
301+
atomic_inc(&n_alloc_errs);
274302
cpu = torture_random(trsp) % nr_cpu_ids;
275303
if (scfsp->scfs_wait)
276304
scfp->n_single_wait++;
277305
else
278306
scfp->n_single++;
279-
ret = smp_call_function_single(cpu, scf_handler_1, (void *)cpu, scfsp->scfs_wait);
307+
if (scfcp) {
308+
scfcp->scfc_cpu = cpu;
309+
scfcp->scfc_wait = scfsp->scfs_wait;
310+
scfcp->scfc_out = false;
311+
scfcp->scfc_in = true;
312+
}
313+
ret = smp_call_function_single(cpu, scf_handler_1, (void *)scfcp, scfsp->scfs_wait);
280314
if (ret) {
281315
if (scfsp->scfs_wait)
282316
scfp->n_single_wait_ofl++;
283317
else
284318
scfp->n_single_ofl++;
319+
kfree(scfcp);
320+
} else if (scfcp && scfsp->scfs_wait) {
321+
if (WARN_ON_ONCE(!scfcp->scfc_out))
322+
atomic_inc(&n_mb_out_errs); // Leak rather than trash!
323+
else
324+
kfree(scfcp);
285325
}
286326
break;
287327
case SCF_PRIM_MANY:

0 commit comments

Comments
 (0)