@@ -831,7 +831,7 @@ void init_entity_runnable_average(struct sched_entity *se)
831831void post_init_entity_util_avg (struct task_struct * p )
832832{
833833}
834- static void update_tg_load_avg (struct cfs_rq * cfs_rq , int force )
834+ static void update_tg_load_avg (struct cfs_rq * cfs_rq )
835835{
836836}
837837#endif /* CONFIG_SMP */
@@ -3293,7 +3293,6 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
32933293/**
32943294 * update_tg_load_avg - update the tg's load avg
32953295 * @cfs_rq: the cfs_rq whose avg changed
3296- * @force: update regardless of how small the difference
32973296 *
32983297 * This function 'ensures': tg->load_avg := \Sum tg->cfs_rq[]->avg.load.
32993298 * However, because tg->load_avg is a global value there are performance
@@ -3305,7 +3304,7 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
33053304 *
33063305 * Updating tg's load_avg is necessary before update_cfs_share().
33073306 */
3308- static inline void update_tg_load_avg (struct cfs_rq * cfs_rq , int force )
3307+ static inline void update_tg_load_avg (struct cfs_rq * cfs_rq )
33093308{
33103309 long delta = cfs_rq -> avg .load_avg - cfs_rq -> tg_load_avg_contrib ;
33113310
@@ -3315,7 +3314,7 @@ static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force)
33153314 if (cfs_rq -> tg == & root_task_group )
33163315 return ;
33173316
3318- if (force || abs (delta ) > cfs_rq -> tg_load_avg_contrib / 64 ) {
3317+ if (abs (delta ) > cfs_rq -> tg_load_avg_contrib / 64 ) {
33193318 atomic_long_add (delta , & cfs_rq -> tg -> load_avg );
33203319 cfs_rq -> tg_load_avg_contrib = cfs_rq -> avg .load_avg ;
33213320 }
@@ -3617,7 +3616,7 @@ static inline bool skip_blocked_update(struct sched_entity *se)
36173616
36183617#else /* CONFIG_FAIR_GROUP_SCHED */
36193618
3620- static inline void update_tg_load_avg (struct cfs_rq * cfs_rq , int force ) {}
3619+ static inline void update_tg_load_avg (struct cfs_rq * cfs_rq ) {}
36213620
36223621static inline int propagate_entity_load_avg (struct sched_entity * se )
36233622{
@@ -3805,13 +3804,13 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
38053804 * IOW we're enqueueing a task on a new CPU.
38063805 */
38073806 attach_entity_load_avg (cfs_rq , se );
3808- update_tg_load_avg (cfs_rq , 0 );
3807+ update_tg_load_avg (cfs_rq );
38093808
38103809 } else if (decayed ) {
38113810 cfs_rq_util_change (cfs_rq , 0 );
38123811
38133812 if (flags & UPDATE_TG )
3814- update_tg_load_avg (cfs_rq , 0 );
3813+ update_tg_load_avg (cfs_rq );
38153814 }
38163815}
38173816
@@ -7898,7 +7897,7 @@ static bool __update_blocked_fair(struct rq *rq, bool *done)
78987897 struct sched_entity * se ;
78997898
79007899 if (update_cfs_rq_load_avg (cfs_rq_clock_pelt (cfs_rq ), cfs_rq )) {
7901- update_tg_load_avg (cfs_rq , 0 );
7900+ update_tg_load_avg (cfs_rq );
79027901
79037902 if (cfs_rq == & rq -> cfs )
79047903 decayed = true;
@@ -10797,7 +10796,7 @@ static void detach_entity_cfs_rq(struct sched_entity *se)
1079710796 /* Catch up with the cfs_rq and remove our load when we leave */
1079810797 update_load_avg (cfs_rq , se , 0 );
1079910798 detach_entity_load_avg (cfs_rq , se );
10800- update_tg_load_avg (cfs_rq , false );
10799+ update_tg_load_avg (cfs_rq );
1080110800 propagate_entity_cfs_rq (se );
1080210801}
1080310802
@@ -10816,7 +10815,7 @@ static void attach_entity_cfs_rq(struct sched_entity *se)
1081610815 /* Synchronize entity with its cfs_rq */
1081710816 update_load_avg (cfs_rq , se , sched_feat (ATTACH_AGE_LOAD ) ? 0 : SKIP_AGE_LOAD );
1081810817 attach_entity_load_avg (cfs_rq , se );
10819- update_tg_load_avg (cfs_rq , false );
10818+ update_tg_load_avg (cfs_rq );
1082010819 propagate_entity_cfs_rq (se );
1082110820}
1082210821
0 commit comments