Skip to content

Commit 2ab99ad

Browse files
committed
Merge tag 'sched-urgent-2026-04-05' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull scheduler fixes from Ingo Molnar: - Fix zero_vruntime tracking again (Peter Zijlstra) - Fix avg_vruntime() usage in sched_debug (Peter Zijlstra) * tag 'sched-urgent-2026-04-05' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: sched/debug: Fix avg_vruntime() usage sched/fair: Fix zero_vruntime tracking fix
2 parents 7bba6c8 + e08d007 commit 2ab99ad

2 files changed

Lines changed: 6 additions & 8 deletions

File tree

kernel/sched/debug.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -902,6 +902,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
902902
void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
903903
{
904904
s64 left_vruntime = -1, zero_vruntime, right_vruntime = -1, left_deadline = -1, spread;
905+
u64 avruntime;
905906
struct sched_entity *last, *first, *root;
906907
struct rq *rq = cpu_rq(cpu);
907908
unsigned long flags;
@@ -925,6 +926,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
925926
if (last)
926927
right_vruntime = last->vruntime;
927928
zero_vruntime = cfs_rq->zero_vruntime;
929+
avruntime = avg_vruntime(cfs_rq);
928930
raw_spin_rq_unlock_irqrestore(rq, flags);
929931

930932
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "left_deadline",
@@ -934,7 +936,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
934936
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "zero_vruntime",
935937
SPLIT_NS(zero_vruntime));
936938
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "avg_vruntime",
937-
SPLIT_NS(avg_vruntime(cfs_rq)));
939+
SPLIT_NS(avruntime));
938940
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "right_vruntime",
939941
SPLIT_NS(right_vruntime));
940942
spread = right_vruntime - left_vruntime;

kernel/sched/fair.c

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -707,7 +707,7 @@ void update_zero_vruntime(struct cfs_rq *cfs_rq, s64 delta)
707707
* Called in:
708708
* - place_entity() -- before enqueue
709709
* - update_entity_lag() -- before dequeue
710-
* - entity_tick()
710+
* - update_deadline() -- slice expiration
711711
*
712712
* This means it is one entry 'behind' but that puts it close enough to where
713713
* the bound on entity_key() is at most two lag bounds.
@@ -1131,6 +1131,7 @@ static bool update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
11311131
* EEVDF: vd_i = ve_i + r_i / w_i
11321132
*/
11331133
se->deadline = se->vruntime + calc_delta_fair(se->slice, se);
1134+
avg_vruntime(cfs_rq);
11341135

11351136
/*
11361137
* The task has consumed its request, reschedule.
@@ -5593,11 +5594,6 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
55935594
update_load_avg(cfs_rq, curr, UPDATE_TG);
55945595
update_cfs_group(curr);
55955596

5596-
/*
5597-
* Pulls along cfs_rq::zero_vruntime.
5598-
*/
5599-
avg_vruntime(cfs_rq);
5600-
56015597
#ifdef CONFIG_SCHED_HRTICK
56025598
/*
56035599
* queued ticks are scheduled to match the slice, so don't bother
@@ -9128,7 +9124,7 @@ static void yield_task_fair(struct rq *rq)
91289124
*/
91299125
if (entity_eligible(cfs_rq, se)) {
91309126
se->vruntime = se->deadline;
9131-
se->deadline += calc_delta_fair(se->slice, se);
9127+
update_deadline(cfs_rq, se);
91329128
}
91339129
}
91349130

0 commit comments

Comments
 (0)