Skip to content

Commit 30f3f68

Browse files
committed
Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux
Pull arm64 fixes from Will Deacon: "Here's the weekly batch of fixes for arm64. Not an awful lot here, but there are still a few unresolved issues relating to CPU hotplug, RCU and IRQ tracing that I hope to queue fixes for next week. Summary: - Fix early use of kprobes - Fix kernel placement in kexec_file_load() - Bump maximum number of NUMA nodes" * tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: arm64: kexec_file: try more regions if loading segments fails arm64: kprobes: Use BRK instead of single-step when executing instructions out-of-line arm64: NUMA: Kconfig: Increase NODES_SHIFT to 4
2 parents 4257087 + 108aa50 commit 30f3f68

7 files changed

Lines changed: 67 additions & 59 deletions

File tree

arch/arm64/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1002,7 +1002,7 @@ config NUMA
10021002
config NODES_SHIFT
10031003
int "Maximum NUMA Nodes (as a power of 2)"
10041004
range 1 10
1005-
default "2"
1005+
default "4"
10061006
depends on NEED_MULTIPLE_NODES
10071007
help
10081008
Specify the maximum number of NUMA Nodes available on the target

arch/arm64/include/asm/brk-imm.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
* #imm16 values used for BRK instruction generation
1111
* 0x004: for installing kprobes
1212
* 0x005: for installing uprobes
13+
* 0x006: for kprobe software single-step
1314
* Allowed values for kgdb are 0x400 - 0x7ff
1415
* 0x100: for triggering a fault on purpose (reserved)
1516
* 0x400: for dynamic BRK instruction
@@ -19,6 +20,7 @@
1920
*/
2021
#define KPROBES_BRK_IMM 0x004
2122
#define UPROBES_BRK_IMM 0x005
23+
#define KPROBES_BRK_SS_IMM 0x006
2224
#define FAULT_BRK_IMM 0x100
2325
#define KGDB_DYN_DBG_BRK_IMM 0x400
2426
#define KGDB_COMPILED_DBG_BRK_IMM 0x401

arch/arm64/include/asm/debug-monitors.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@
5353

5454
/* kprobes BRK opcodes with ESR encoding */
5555
#define BRK64_OPCODE_KPROBES (AARCH64_BREAK_MON | (KPROBES_BRK_IMM << 5))
56+
#define BRK64_OPCODE_KPROBES_SS (AARCH64_BREAK_MON | (KPROBES_BRK_SS_IMM << 5))
5657
/* uprobes BRK opcodes with ESR encoding */
5758
#define BRK64_OPCODE_UPROBES (AARCH64_BREAK_MON | (UPROBES_BRK_IMM << 5))
5859

arch/arm64/include/asm/kprobes.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
#include <linux/percpu.h>
1717

1818
#define __ARCH_WANT_KPROBES_INSN_SLOT
19-
#define MAX_INSN_SIZE 1
19+
#define MAX_INSN_SIZE 2
2020

2121
#define flush_insn_slot(p) do { } while (0)
2222
#define kretprobe_blacklist_size 0

arch/arm64/kernel/kexec_image.c

Lines changed: 31 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ static void *image_load(struct kimage *image,
4343
u64 flags, value;
4444
bool be_image, be_kernel;
4545
struct kexec_buf kbuf;
46-
unsigned long text_offset;
46+
unsigned long text_offset, kernel_segment_number;
4747
struct kexec_segment *kernel_segment;
4848
int ret;
4949

@@ -88,11 +88,37 @@ static void *image_load(struct kimage *image,
8888
/* Adjust kernel segment with TEXT_OFFSET */
8989
kbuf.memsz += text_offset;
9090

91-
ret = kexec_add_buffer(&kbuf);
92-
if (ret)
91+
kernel_segment_number = image->nr_segments;
92+
93+
/*
94+
* The location of the kernel segment may make it impossible to satisfy
95+
* the other segment requirements, so we try repeatedly to find a
96+
* location that will work.
97+
*/
98+
while ((ret = kexec_add_buffer(&kbuf)) == 0) {
99+
/* Try to load additional data */
100+
kernel_segment = &image->segment[kernel_segment_number];
101+
ret = load_other_segments(image, kernel_segment->mem,
102+
kernel_segment->memsz, initrd,
103+
initrd_len, cmdline);
104+
if (!ret)
105+
break;
106+
107+
/*
108+
* We couldn't find space for the other segments; erase the
109+
* kernel segment and try the next available hole.
110+
*/
111+
image->nr_segments -= 1;
112+
kbuf.buf_min = kernel_segment->mem + kernel_segment->memsz;
113+
kbuf.mem = KEXEC_BUF_MEM_UNKNOWN;
114+
}
115+
116+
if (ret) {
117+
pr_err("Could not find any suitable kernel location!");
93118
return ERR_PTR(ret);
119+
}
94120

95-
kernel_segment = &image->segment[image->nr_segments - 1];
121+
kernel_segment = &image->segment[kernel_segment_number];
96122
kernel_segment->mem += text_offset;
97123
kernel_segment->memsz -= text_offset;
98124
image->start = kernel_segment->mem;
@@ -101,12 +127,7 @@ static void *image_load(struct kimage *image,
101127
kernel_segment->mem, kbuf.bufsz,
102128
kernel_segment->memsz);
103129

104-
/* Load additional data */
105-
ret = load_other_segments(image,
106-
kernel_segment->mem, kernel_segment->memsz,
107-
initrd, initrd_len, cmdline);
108-
109-
return ERR_PTR(ret);
130+
return 0;
110131
}
111132

112133
#ifdef CONFIG_KEXEC_IMAGE_VERIFY_SIG

arch/arm64/kernel/machine_kexec_file.c

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -240,6 +240,11 @@ static int prepare_elf_headers(void **addr, unsigned long *sz)
240240
return ret;
241241
}
242242

243+
/*
244+
* Tries to add the initrd and DTB to the image. If it is not possible to find
245+
* valid locations, this function will undo changes to the image and return non
246+
* zero.
247+
*/
243248
int load_other_segments(struct kimage *image,
244249
unsigned long kernel_load_addr,
245250
unsigned long kernel_size,
@@ -248,7 +253,8 @@ int load_other_segments(struct kimage *image,
248253
{
249254
struct kexec_buf kbuf;
250255
void *headers, *dtb = NULL;
251-
unsigned long headers_sz, initrd_load_addr = 0, dtb_len;
256+
unsigned long headers_sz, initrd_load_addr = 0, dtb_len,
257+
orig_segments = image->nr_segments;
252258
int ret = 0;
253259

254260
kbuf.image = image;
@@ -334,6 +340,7 @@ int load_other_segments(struct kimage *image,
334340
return 0;
335341

336342
out_err:
343+
image->nr_segments = orig_segments;
337344
vfree(dtb);
338345
return ret;
339346
}

arch/arm64/kernel/probes/kprobes.c

Lines changed: 23 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -36,25 +36,16 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
3636
static void __kprobes
3737
post_kprobe_handler(struct kprobe_ctlblk *, struct pt_regs *);
3838

39-
static int __kprobes patch_text(kprobe_opcode_t *addr, u32 opcode)
40-
{
41-
void *addrs[1];
42-
u32 insns[1];
43-
44-
addrs[0] = addr;
45-
insns[0] = opcode;
46-
47-
return aarch64_insn_patch_text(addrs, insns, 1);
48-
}
49-
5039
static void __kprobes arch_prepare_ss_slot(struct kprobe *p)
5140
{
41+
kprobe_opcode_t *addr = p->ainsn.api.insn;
42+
void *addrs[] = {addr, addr + 1};
43+
u32 insns[] = {p->opcode, BRK64_OPCODE_KPROBES_SS};
44+
5245
/* prepare insn slot */
53-
patch_text(p->ainsn.api.insn, p->opcode);
46+
aarch64_insn_patch_text(addrs, insns, 2);
5447

55-
flush_icache_range((uintptr_t) (p->ainsn.api.insn),
56-
(uintptr_t) (p->ainsn.api.insn) +
57-
MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
48+
flush_icache_range((uintptr_t)addr, (uintptr_t)(addr + MAX_INSN_SIZE));
5849

5950
/*
6051
* Needs restoring of return address after stepping xol.
@@ -128,13 +119,18 @@ void *alloc_insn_page(void)
128119
/* arm kprobe: install breakpoint in text */
129120
void __kprobes arch_arm_kprobe(struct kprobe *p)
130121
{
131-
patch_text(p->addr, BRK64_OPCODE_KPROBES);
122+
void *addr = p->addr;
123+
u32 insn = BRK64_OPCODE_KPROBES;
124+
125+
aarch64_insn_patch_text(&addr, &insn, 1);
132126
}
133127

134128
/* disarm kprobe: remove breakpoint from text */
135129
void __kprobes arch_disarm_kprobe(struct kprobe *p)
136130
{
137-
patch_text(p->addr, p->opcode);
131+
void *addr = p->addr;
132+
133+
aarch64_insn_patch_text(&addr, &p->opcode, 1);
138134
}
139135

140136
void __kprobes arch_remove_kprobe(struct kprobe *p)
@@ -163,20 +159,15 @@ static void __kprobes set_current_kprobe(struct kprobe *p)
163159
}
164160

165161
/*
166-
* Interrupts need to be disabled before single-step mode is set, and not
167-
* reenabled until after single-step mode ends.
168-
* Without disabling interrupt on local CPU, there is a chance of
169-
* interrupt occurrence in the period of exception return and start of
170-
* out-of-line single-step, that result in wrongly single stepping
171-
* into the interrupt handler.
162+
* Mask all of DAIF while executing the instruction out-of-line, to keep things
163+
* simple and avoid nesting exceptions. Interrupts do have to be disabled since
164+
* the kprobe state is per-CPU and doesn't get migrated.
172165
*/
173166
static void __kprobes kprobes_save_local_irqflag(struct kprobe_ctlblk *kcb,
174167
struct pt_regs *regs)
175168
{
176169
kcb->saved_irqflag = regs->pstate & DAIF_MASK;
177-
regs->pstate |= PSR_I_BIT;
178-
/* Unmask PSTATE.D for enabling software step exceptions. */
179-
regs->pstate &= ~PSR_D_BIT;
170+
regs->pstate |= DAIF_MASK;
180171
}
181172

182173
static void __kprobes kprobes_restore_local_irqflag(struct kprobe_ctlblk *kcb,
@@ -219,10 +210,7 @@ static void __kprobes setup_singlestep(struct kprobe *p,
219210
slot = (unsigned long)p->ainsn.api.insn;
220211

221212
set_ss_context(kcb, slot); /* mark pending ss */
222-
223-
/* IRQs and single stepping do not mix well. */
224213
kprobes_save_local_irqflag(kcb, regs);
225-
kernel_enable_single_step(regs);
226214
instruction_pointer_set(regs, slot);
227215
} else {
228216
/* insn simulation */
@@ -273,12 +261,8 @@ post_kprobe_handler(struct kprobe_ctlblk *kcb, struct pt_regs *regs)
273261
}
274262
/* call post handler */
275263
kcb->kprobe_status = KPROBE_HIT_SSDONE;
276-
if (cur->post_handler) {
277-
/* post_handler can hit breakpoint and single step
278-
* again, so we enable D-flag for recursive exception.
279-
*/
264+
if (cur->post_handler)
280265
cur->post_handler(cur, regs, 0);
281-
}
282266

283267
reset_current_kprobe();
284268
}
@@ -302,8 +286,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr)
302286
if (!instruction_pointer(regs))
303287
BUG();
304288

305-
kernel_disable_single_step();
306-
307289
if (kcb->kprobe_status == KPROBE_REENTER)
308290
restore_previous_kprobe(kcb);
309291
else
@@ -365,10 +347,6 @@ static void __kprobes kprobe_handler(struct pt_regs *regs)
365347
* pre-handler and it returned non-zero, it will
366348
* modify the execution path and no need to single
367349
* stepping. Let's just reset current kprobe and exit.
368-
*
369-
* pre_handler can hit a breakpoint and can step thru
370-
* before return, keep PSTATE D-flag enabled until
371-
* pre_handler return back.
372350
*/
373351
if (!p->pre_handler || !p->pre_handler(p, regs)) {
374352
setup_singlestep(p, regs, kcb, 0);
@@ -399,7 +377,7 @@ kprobe_ss_hit(struct kprobe_ctlblk *kcb, unsigned long addr)
399377
}
400378

401379
static int __kprobes
402-
kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
380+
kprobe_breakpoint_ss_handler(struct pt_regs *regs, unsigned int esr)
403381
{
404382
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
405383
int retval;
@@ -409,16 +387,15 @@ kprobe_single_step_handler(struct pt_regs *regs, unsigned int esr)
409387

410388
if (retval == DBG_HOOK_HANDLED) {
411389
kprobes_restore_local_irqflag(kcb, regs);
412-
kernel_disable_single_step();
413-
414390
post_kprobe_handler(kcb, regs);
415391
}
416392

417393
return retval;
418394
}
419395

420-
static struct step_hook kprobes_step_hook = {
421-
.fn = kprobe_single_step_handler,
396+
static struct break_hook kprobes_break_ss_hook = {
397+
.imm = KPROBES_BRK_SS_IMM,
398+
.fn = kprobe_breakpoint_ss_handler,
422399
};
423400

424401
static int __kprobes
@@ -486,7 +463,7 @@ int __kprobes arch_trampoline_kprobe(struct kprobe *p)
486463
int __init arch_init_kprobes(void)
487464
{
488465
register_kernel_break_hook(&kprobes_break_hook);
489-
register_kernel_step_hook(&kprobes_step_hook);
466+
register_kernel_break_hook(&kprobes_break_ss_hook);
490467

491468
return 0;
492469
}

0 commit comments

Comments
 (0)