Skip to content

Commit fcb3a1a

Browse files
committed
cpufreq: intel_pstate: Take CPUFREQ_GOV_STRICT_TARGET into account
Make intel_pstate take the new CPUFREQ_GOV_STRICT_TARGET governor flag into account when it operates in the passive mode with HWP enabled, so as to fix the "powersave" governor behavior in that case (currently, HWP is allowed to scale the performance all the way up to the policy max limit when the "powersave" governor is used, but it should be constrained to the policy min limit then). Fixes: f6ebbcf ("cpufreq: intel_pstate: Implement passive mode with HWP enabled") Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com> Acked-by: Viresh Kumar <viresh.kumar@linaro.org> Cc: 5.9+ <stable@vger.kernel.org> # 5.9+: 9a2a9eb cpufreq: Introduce governor flags Cc: 5.9+ <stable@vger.kernel.org> # 5.9+: 218f668 cpufreq: Introduce CPUFREQ_GOV_STRICT_TARGET Cc: 5.9+ <stable@vger.kernel.org> # 5.9+: ea9364b cpufreq: Add strict_target to struct cpufreq_policy
1 parent ea9364b commit fcb3a1a

1 file changed

Lines changed: 9 additions & 7 deletions

File tree

drivers/cpufreq/intel_pstate.c

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -2527,7 +2527,7 @@ static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, in
25272527
}
25282528

25292529
static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
2530-
bool fast_switch)
2530+
bool strict, bool fast_switch)
25312531
{
25322532
u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
25332533

@@ -2539,7 +2539,7 @@ static void intel_cpufreq_adjust_hwp(struct cpudata *cpu, u32 target_pstate,
25392539
* field in it, so opportunistically update the max too if needed.
25402540
*/
25412541
value &= ~HWP_MAX_PERF(~0L);
2542-
value |= HWP_MAX_PERF(cpu->max_perf_ratio);
2542+
value |= HWP_MAX_PERF(strict ? target_pstate : cpu->max_perf_ratio);
25432543

25442544
if (value == prev)
25452545
return;
@@ -2562,14 +2562,16 @@ static void intel_cpufreq_adjust_perf_ctl(struct cpudata *cpu,
25622562
pstate_funcs.get_val(cpu, target_pstate));
25632563
}
25642564

2565-
static int intel_cpufreq_update_pstate(struct cpudata *cpu, int target_pstate,
2566-
bool fast_switch)
2565+
static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
2566+
int target_pstate, bool fast_switch)
25672567
{
2568+
struct cpudata *cpu = all_cpu_data[policy->cpu];
25682569
int old_pstate = cpu->pstate.current_pstate;
25692570

25702571
target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
25712572
if (hwp_active) {
2572-
intel_cpufreq_adjust_hwp(cpu, target_pstate, fast_switch);
2573+
intel_cpufreq_adjust_hwp(cpu, target_pstate,
2574+
policy->strict_target, fast_switch);
25732575
cpu->pstate.current_pstate = target_pstate;
25742576
} else if (target_pstate != old_pstate) {
25752577
intel_cpufreq_adjust_perf_ctl(cpu, target_pstate, fast_switch);
@@ -2609,7 +2611,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
26092611
break;
26102612
}
26112613

2612-
target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, false);
2614+
target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
26132615

26142616
freqs.new = target_pstate * cpu->pstate.scaling;
26152617

@@ -2628,7 +2630,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
26282630

26292631
target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
26302632

2631-
target_pstate = intel_cpufreq_update_pstate(cpu, target_pstate, true);
2633+
target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
26322634

26332635
return target_pstate * cpu->pstate.scaling;
26342636
}

0 commit comments

Comments
 (0)