Skip to content

Commit d13027b

Browse files
committed
Revert "arm64: initialize per-cpu offsets earlier"
This reverts commit 353e228. Qian Cai reports that TX2 no longer boots with his .config as it appears that task_cpu() gets instrumented and used before KASAN has been initialised. Although Mark has a proposed fix, let's take the safe option of reverting this for now and sorting it out properly later. Link: https://lore.kernel.org/r/711bc57a314d8d646b41307008db2845b7537b3d.camel@redhat.com Reported-by: Qian Cai <cai@redhat.com> Tested-by: Mark Rutland <mark.rutland@arm.com> Signed-off-by: Will Deacon <will@kernel.org>
1 parent a82e4ef commit d13027b

4 files changed

Lines changed: 11 additions & 19 deletions

File tree

arch/arm64/include/asm/cpu.h

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,4 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info);
6868
void update_cpu_features(int cpu, struct cpuinfo_arm64 *info,
6969
struct cpuinfo_arm64 *boot);
7070

71-
void init_this_cpu_offset(void);
72-
7371
#endif /* __ASM_CPU_H */

arch/arm64/kernel/head.S

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -448,8 +448,6 @@ SYM_FUNC_START_LOCAL(__primary_switched)
448448
bl __pi_memset
449449
dsb ishst // Make zero page visible to PTW
450450

451-
bl init_this_cpu_offset
452-
453451
#ifdef CONFIG_KASAN
454452
bl kasan_early_init
455453
#endif
@@ -756,7 +754,6 @@ SYM_FUNC_START_LOCAL(__secondary_switched)
756754
ptrauth_keys_init_cpu x2, x3, x4, x5
757755
#endif
758756

759-
bl init_this_cpu_offset
760757
b secondary_start_kernel
761758
SYM_FUNC_END(__secondary_switched)
762759

arch/arm64/kernel/setup.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -87,6 +87,12 @@ void __init smp_setup_processor_id(void)
8787
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
8888
set_cpu_logical_map(0, mpidr);
8989

90+
/*
91+
* clear __my_cpu_offset on boot CPU to avoid hang caused by
92+
* using percpu variable early, for example, lockdep will
93+
* access percpu variable inside lock_release
94+
*/
95+
set_my_cpu_offset(0);
9096
pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
9197
(unsigned long)mpidr, read_cpuid_id());
9298
}
@@ -276,12 +282,6 @@ u64 cpu_logical_map(int cpu)
276282
}
277283
EXPORT_SYMBOL_GPL(cpu_logical_map);
278284

279-
void noinstr init_this_cpu_offset(void)
280-
{
281-
unsigned int cpu = task_cpu(current);
282-
set_my_cpu_offset(per_cpu_offset(cpu));
283-
}
284-
285285
void __init __no_sanitize_address setup_arch(char **cmdline_p)
286286
{
287287
init_mm.start_code = (unsigned long) _text;

arch/arm64/kernel/smp.c

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -192,7 +192,10 @@ asmlinkage notrace void secondary_start_kernel(void)
192192
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
193193
struct mm_struct *mm = &init_mm;
194194
const struct cpu_operations *ops;
195-
unsigned int cpu = smp_processor_id();
195+
unsigned int cpu;
196+
197+
cpu = task_cpu(current);
198+
set_my_cpu_offset(per_cpu_offset(cpu));
196199

197200
/*
198201
* All kernel threads share the same mm context; grab a
@@ -432,13 +435,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
432435

433436
void __init smp_prepare_boot_cpu(void)
434437
{
435-
/*
436-
* Now that setup_per_cpu_areas() has allocated the runtime per-cpu
437-
* areas it is only safe to read the CPU0 boot-time area, and we must
438-
* reinitialize the offset to point to the runtime area.
439-
*/
440-
init_this_cpu_offset();
441-
438+
set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
442439
cpuinfo_store_boot_cpu();
443440

444441
/*

0 commit comments

Comments
 (0)