Skip to content

Commit b92071c

Browse files
committed
Revert "sched/fair: Proportional newidle balance"
This reverts commit 33cf66d. Signed-off-by: Eric Naim <dnaim@cachyos.org>
1 parent 9502983 commit b92071c

6 files changed

Lines changed: 4 additions & 64 deletions

File tree

include/linux/sched/topology.h

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -92,9 +92,6 @@ struct sched_domain {
9292
unsigned int nr_balance_failed; /* initialise to 0 */
9393

9494
/* idle_balance() stats */
95-
unsigned int newidle_call;
96-
unsigned int newidle_success;
97-
unsigned int newidle_ratio;
9895
u64 max_newidle_lb_cost;
9996
unsigned long last_decay_max_lb_cost;
10097

kernel/sched/core.c

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -124,7 +124,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_exit_tp);
124124
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_set_need_resched_tp);
125125

126126
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
127-
DEFINE_PER_CPU(struct rnd_state, sched_rnd_state);
128127

129128
#ifdef CONFIG_SCHED_PROXY_EXEC
130129
DEFINE_STATIC_KEY_TRUE(__sched_proxy_exec);
@@ -8545,8 +8544,6 @@ void __init sched_init_smp(void)
85458544
{
85468545
sched_init_numa(NUMA_NO_NODE);
85478546

8548-
prandom_init_once(&sched_rnd_state);
8549-
85508547
/*
85518548
* There's no userspace yet to cause hotplug operations; hence all the
85528549
* CPU masks are stable and all blatant races in the below code cannot

kernel/sched/fair.c

Lines changed: 4 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -12279,27 +12279,11 @@ void update_max_interval(void)
1227912279
max_load_balance_interval = HZ*num_online_cpus()/10;
1228012280
}
1228112281

12282-
static inline void update_newidle_stats(struct sched_domain *sd, unsigned int success)
12283-
{
12284-
sd->newidle_call++;
12285-
sd->newidle_success += success;
12286-
12287-
if (sd->newidle_call >= 1024) {
12288-
sd->newidle_ratio = sd->newidle_success;
12289-
sd->newidle_call /= 2;
12290-
sd->newidle_success /= 2;
12291-
}
12292-
}
12293-
12294-
static inline bool
12295-
update_newidle_cost(struct sched_domain *sd, u64 cost, unsigned int success)
12282+
static inline bool update_newidle_cost(struct sched_domain *sd, u64 cost)
1229612283
{
1229712284
unsigned long next_decay = sd->last_decay_max_lb_cost + HZ;
1229812285
unsigned long now = jiffies;
1229912286

12300-
if (cost)
12301-
update_newidle_stats(sd, success);
12302-
1230312287
if (cost > sd->max_newidle_lb_cost) {
1230412288
/*
1230512289
* Track max cost of a domain to make sure to not delay the
@@ -12347,7 +12331,7 @@ static void sched_balance_domains(struct rq *rq, enum cpu_idle_type idle)
1234712331
* Decay the newidle max times here because this is a regular
1234812332
* visit to all the domains.
1234912333
*/
12350-
need_decay = update_newidle_cost(sd, 0, 0);
12334+
need_decay = update_newidle_cost(sd, 0);
1235112335
max_cost += sd->max_newidle_lb_cost;
1235212336

1235312337
/*
@@ -12990,37 +12974,17 @@ static int sched_balance_newidle(struct rq *this_rq, struct rq_flags *rf)
1299012974
break;
1299112975

1299212976
if (sd->flags & SD_BALANCE_NEWIDLE) {
12993-
unsigned int weight = 1;
12994-
12995-
if (sched_feat(NI_RANDOM)) {
12996-
/*
12997-
* Throw a 1k sided dice; and only run
12998-
* newidle_balance according to the success
12999-
* rate.
13000-
*/
13001-
u32 d1k = sched_rng() % 1024;
13002-
weight = 1 + sd->newidle_ratio;
13003-
if (d1k > weight) {
13004-
update_newidle_stats(sd, 0);
13005-
continue;
13006-
}
13007-
weight = (1024 + weight/2) / weight;
13008-
}
1300912977

1301012978
pulled_task = sched_balance_rq(this_cpu, this_rq,
1301112979
sd, CPU_NEWLY_IDLE,
1301212980
&continue_balancing);
1301312981

1301412982
t1 = sched_clock_cpu(this_cpu);
1301512983
domain_cost = t1 - t0;
12984+
update_newidle_cost(sd, domain_cost);
12985+
1301612986
curr_cost += domain_cost;
1301712987
t0 = t1;
13018-
13019-
/*
13020-
* Track max cost of a domain to make sure to not delay the
13021-
* next wakeup on the CPU.
13022-
*/
13023-
update_newidle_cost(sd, domain_cost, weight * !!pulled_task);
1302412988
}
1302512989

1302612990
/*

kernel/sched/features.h

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -121,8 +121,3 @@ SCHED_FEAT(WA_BIAS, true)
121121
SCHED_FEAT(UTIL_EST, true)
122122

123123
SCHED_FEAT(LATENCY_WARN, false)
124-
125-
/*
126-
* Do newidle balancing proportional to its success rate using randomization.
127-
*/
128-
SCHED_FEAT(NI_RANDOM, true)

kernel/sched/sched.h

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
#ifndef _KERNEL_SCHED_SCHED_H
66
#define _KERNEL_SCHED_SCHED_H
77

8-
#include <linux/prandom.h>
98
#include <linux/sched/affinity.h>
109
#include <linux/sched/autogroup.h>
1110
#include <linux/sched/cpufreq.h>
@@ -1375,12 +1374,6 @@ static inline bool is_migration_disabled(struct task_struct *p)
13751374
}
13761375

13771376
DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
1378-
DECLARE_PER_CPU(struct rnd_state, sched_rnd_state);
1379-
1380-
static inline u32 sched_rng(void)
1381-
{
1382-
return prandom_u32_state(this_cpu_ptr(&sched_rnd_state));
1383-
}
13841377

13851378
static __always_inline struct rq *__this_rq(void)
13861379
{

kernel/sched/topology.c

Lines changed: 0 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1674,12 +1674,6 @@ sd_init(struct sched_domain_topology_level *tl,
16741674

16751675
.last_balance = jiffies,
16761676
.balance_interval = sd_weight,
1677-
1678-
/* 50% success rate */
1679-
.newidle_call = 512,
1680-
.newidle_success = 256,
1681-
.newidle_ratio = 512,
1682-
16831677
.max_newidle_lb_cost = 0,
16841678
.last_decay_max_lb_cost = jiffies,
16851679
.child = child,

0 commit comments

Comments
 (0)