Skip to content

Commit ad14c19

Browse files
nixiaomingwilldeacon
authored andcommitted
arm64: fix some spelling mistakes in the comments by codespell
arch/arm64/include/asm/cpu_ops.h:24: necesary ==> necessary arch/arm64/include/asm/kvm_arm.h:69: maintainance ==> maintenance arch/arm64/include/asm/cpufeature.h:361: capabilties ==> capabilities arch/arm64/kernel/perf_regs.c:19: compatability ==> compatibility arch/arm64/kernel/smp_spin_table.c:86: endianess ==> endianness arch/arm64/kernel/smp_spin_table.c:88: endianess ==> endianness arch/arm64/kvm/vgic/vgic-mmio-v3.c:1004: targetting ==> targeting arch/arm64/kvm/vgic/vgic-mmio-v3.c:1005: targetting ==> targeting Signed-off-by: Xiaoming Ni <nixiaoming@huawei.com> Link: https://lore.kernel.org/r/20200828031822.35928-1-nixiaoming@huawei.com Signed-off-by: Will Deacon <will@kernel.org>
1 parent f75aef3 commit ad14c19

6 files changed

Lines changed: 8 additions & 8 deletions

File tree

arch/arm64/include/asm/cpu_ops.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@
2121
* mechanism for doing so, tests whether it is possible to boot
2222
* the given CPU.
2323
* @cpu_boot: Boots a cpu into the kernel.
24-
* @cpu_postboot: Optionally, perform any post-boot cleanup or necesary
24+
* @cpu_postboot: Optionally, perform any post-boot cleanup or necessary
2525
* synchronisation. Called from the cpu being booted.
2626
* @cpu_can_disable: Determines whether a CPU can be disabled based on
2727
* mechanism-specific information.

arch/arm64/include/asm/cpufeature.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -358,7 +358,7 @@ static inline int cpucap_default_scope(const struct arm64_cpu_capabilities *cap)
358358
}
359359

360360
/*
361-
* Generic helper for handling capabilties with multiple (match,enable) pairs
361+
* Generic helper for handling capabilities with multiple (match,enable) pairs
362362
* of call backs, sharing the same capability bit.
363363
* Iterate over each entry to see if at least one matches.
364364
*/

arch/arm64/include/asm/kvm_arm.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@
6666
* TWI: Trap WFI
6767
* TIDCP: Trap L2CTLR/L2ECTLR
6868
* BSU_IS: Upgrade barriers to the inner shareable domain
69-
* FB: Force broadcast of all maintainance operations
69+
* FB: Force broadcast of all maintenance operations
7070
* AMO: Override CPSR.A and enable signaling with VA
7171
* IMO: Override CPSR.I and enable signaling with VI
7272
* FMO: Override CPSR.F and enable signaling with VF

arch/arm64/kernel/perf_regs.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
1616

1717
/*
1818
* Our handling of compat tasks (PERF_SAMPLE_REGS_ABI_32) is weird, but
19-
* we're stuck with it for ABI compatability reasons.
19+
* we're stuck with it for ABI compatibility reasons.
2020
*
2121
* For a 32-bit consumer inspecting a 32-bit task, then it will look at
2222
* the first 16 registers (see arch/arm/include/uapi/asm/perf_regs.h).

arch/arm64/kernel/smp_spin_table.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -83,9 +83,9 @@ static int smp_spin_table_cpu_prepare(unsigned int cpu)
8383

8484
/*
8585
* We write the release address as LE regardless of the native
86-
* endianess of the kernel. Therefore, any boot-loaders that
86+
* endianness of the kernel. Therefore, any boot-loaders that
8787
* read this address need to convert this address to the
88-
* boot-loader's endianess before jumping. This is mandated by
88+
* boot-loader's endianness before jumping. This is mandated by
8989
* the boot protocol.
9090
*/
9191
writeq_relaxed(__pa_symbol(secondary_holding_pen), release_addr);

arch/arm64/kvm/vgic/vgic-mmio-v3.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1001,8 +1001,8 @@ void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1)
10011001
raw_spin_lock_irqsave(&irq->irq_lock, flags);
10021002

10031003
/*
1004-
* An access targetting Group0 SGIs can only generate
1005-
* those, while an access targetting Group1 SGIs can
1004+
* An access targeting Group0 SGIs can only generate
1005+
* those, while an access targeting Group1 SGIs can
10061006
* generate interrupts of either group.
10071007
*/
10081008
if (!irq->group || allow_group1) {

0 commit comments

Comments
 (0)