Skip to content

Commit d0d5b40

Browse files
committed
Merge branch '7.0/fixes' into 7.0/base
Signed-off-by: Eric Naim <dnaim@cachyos.org>
2 parents 9cf6ab5 + c27a982 commit d0d5b40

33 files changed

Lines changed: 332 additions & 195 deletions

File tree

Documentation/admin-guide/kernel-parameters.txt

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1750,8 +1750,8 @@ Kernel parameters
17501750
fred= [X86-64]
17511751
Enable/disable Flexible Return and Event Delivery.
17521752
Format: { on | off }
1753-
on: enable FRED when it's present.
1754-
off: disable FRED, the default setting.
1753+
on: enable FRED when it's present, the default setting.
1754+
off: disable FRED.
17551755

17561756
ftrace=[tracer]
17571757
[FTRACE] will set and start the specified tracer

arch/arm/include/asm/mmu_context.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ static inline void check_and_switch_context(struct mm_struct *mm,
8080
#ifndef MODULE
8181
#define finish_arch_post_lock_switch \
8282
finish_arch_post_lock_switch
83-
static inline void finish_arch_post_lock_switch(void)
83+
static __always_inline void finish_arch_post_lock_switch(void)
8484
{
8585
struct mm_struct *mm = current->mm;
8686

arch/riscv/include/asm/sync_core.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
* RISC-V implements return to user-space through an xRET instruction,
77
* which is not core serializing.
88
*/
9-
static inline void sync_core_before_usermode(void)
9+
static __always_inline void sync_core_before_usermode(void)
1010
{
1111
asm volatile ("fence.i" ::: "memory");
1212
}

arch/s390/include/asm/mmu_context.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
9393
}
9494

9595
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
96-
static inline void finish_arch_post_lock_switch(void)
96+
static __always_inline void finish_arch_post_lock_switch(void)
9797
{
9898
struct task_struct *tsk = current;
9999
struct mm_struct *mm = tsk->mm;

arch/sparc/include/asm/mmu_context_64.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -160,7 +160,7 @@ static inline void arch_start_context_switch(struct task_struct *prev)
160160
}
161161

162162
#define finish_arch_post_lock_switch finish_arch_post_lock_switch
163-
static inline void finish_arch_post_lock_switch(void)
163+
static __always_inline void finish_arch_post_lock_switch(void)
164164
{
165165
/* Restore the state of MCDPER register for the new process
166166
* just switched to.

arch/x86/Kconfig

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -557,7 +557,7 @@ config X86_FRED
557557
bool "Flexible Return and Event Delivery"
558558
depends on X86_64
559559
help
560-
When enabled, try to use Flexible Return and Event Delivery
560+
When enabled, use Flexible Return and Event Delivery
561561
instead of the legacy SYSCALL/SYSENTER/IDT architecture for
562562
ring transitions and exception/interrupt handling if the
563563
system supports it.

arch/x86/include/asm/mmu_context.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -136,9 +136,6 @@ static inline void mm_reset_untag_mask(struct mm_struct *mm)
136136
}
137137
#endif
138138

139-
#define enter_lazy_tlb enter_lazy_tlb
140-
extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk);
141-
142139
extern void mm_init_global_asid(struct mm_struct *mm);
143140
extern void mm_free_global_asid(struct mm_struct *mm);
144141

arch/x86/include/asm/sync_core.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ static __always_inline void sync_core(void)
9393
* to user-mode. x86 implements return to user-space through sysexit,
9494
* sysrel, and sysretq, which are not core serializing.
9595
*/
96-
static inline void sync_core_before_usermode(void)
96+
static __always_inline void sync_core_before_usermode(void)
9797
{
9898
/* With PTI, we unconditionally serialize before running user code. */
9999
if (static_cpu_has(X86_FEATURE_PTI))

arch/x86/include/asm/tlbflush.h

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -172,6 +172,28 @@ struct tlb_state_shared {
172172
};
173173
DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
174174

175+
/*
176+
* Please ignore the name of this function. It should be called
177+
* switch_to_kernel_thread().
178+
*
179+
* enter_lazy_tlb() is a hint from the scheduler that we are entering a
180+
* kernel thread or other context without an mm. Acceptable implementations
181+
* include doing nothing whatsoever, switching to init_mm, or various clever
182+
* lazy tricks to try to minimize TLB flushes.
183+
*
184+
* The scheduler reserves the right to call enter_lazy_tlb() several times
185+
* in a row. It will notify us that we're going back to a real mm by
186+
* calling switch_mm_irqs_off().
187+
*/
188+
#define enter_lazy_tlb enter_lazy_tlb
189+
static __always_inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
190+
{
191+
if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
192+
return;
193+
194+
this_cpu_write(cpu_tlbstate_shared.is_lazy, true);
195+
}
196+
175197
bool nmi_uaccess_okay(void);
176198
#define nmi_uaccess_okay nmi_uaccess_okay
177199

@@ -480,6 +502,10 @@ static inline void cpu_tlbstate_update_lam(unsigned long lam, u64 untag_mask)
480502
{
481503
}
482504
#endif
505+
#else /* !MODULE */
506+
#define enter_lazy_tlb enter_lazy_tlb
507+
extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
508+
__compiletime_error("enter_lazy_tlb() should not be used in modules");
483509
#endif /* !MODULE */
484510

485511
static inline void __native_tlb_flush_global(unsigned long cr4)

arch/x86/kernel/cpu/common.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1759,7 +1759,7 @@ static void __init cpu_parse_early_param(void)
17591759

17601760
/* Minimize the gap between FRED is available and available but disabled. */
17611761
arglen = cmdline_find_option(boot_command_line, "fred", arg, sizeof(arg));
1762-
if (arglen != 2 || strncmp(arg, "on", 2))
1762+
if (arglen > 0 && (arglen != 2 || strncmp(arg, "on", 2)))
17631763
setup_clear_cpu_cap(X86_FEATURE_FRED);
17641764

17651765
arglen = cmdline_find_option(boot_command_line, "clearcpuid", arg, sizeof(arg));

0 commit comments

Comments
 (0)