diff options
author | David S. Miller <davem@davemloft.net> | 2013-11-04 13:48:30 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2013-11-04 13:48:30 -0500 |
commit | 394efd19d5fcae936261bd48e5b33b21897aacf8 (patch) | |
tree | c48cf3ddbb07fd87309f1abdf31a27c71330e587 /drivers/cpufreq/intel_pstate.c | |
parent | f421436a591d34fa5279b54a96ac07d70250cc8d (diff) | |
parent | be408cd3e1fef73e9408b196a79b9934697fe3b1 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/ethernet/emulex/benet/be.h
drivers/net/netconsole.c
net/bridge/br_private.h
Three mostly trivial conflicts.
The net/bridge/br_private.h conflict was a function signature (argument
addition) change overlapping with the extern removals from Joe Perches.
In drivers/net/netconsole.c we had one change adjusting a printk message
whilst another changed "printk(KERN_INFO" into "pr_info(".
Lastly, the emulex change was a new inline function addition overlapping
with Joe Perches's extern removals.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/cpufreq/intel_pstate.c')
-rw-r--r-- | drivers/cpufreq/intel_pstate.c | 38 |
1 files changed, 18 insertions, 20 deletions
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index badf6206b2b2..eb3fdc755000 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c | |||
@@ -48,7 +48,7 @@ static inline int32_t div_fp(int32_t x, int32_t y) | |||
48 | } | 48 | } |
49 | 49 | ||
50 | struct sample { | 50 | struct sample { |
51 | int core_pct_busy; | 51 | int32_t core_pct_busy; |
52 | u64 aperf; | 52 | u64 aperf; |
53 | u64 mperf; | 53 | u64 mperf; |
54 | int freq; | 54 | int freq; |
@@ -68,7 +68,7 @@ struct _pid { | |||
68 | int32_t i_gain; | 68 | int32_t i_gain; |
69 | int32_t d_gain; | 69 | int32_t d_gain; |
70 | int deadband; | 70 | int deadband; |
71 | int last_err; | 71 | int32_t last_err; |
72 | }; | 72 | }; |
73 | 73 | ||
74 | struct cpudata { | 74 | struct cpudata { |
@@ -153,16 +153,15 @@ static inline void pid_d_gain_set(struct _pid *pid, int percent) | |||
153 | pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); | 153 | pid->d_gain = div_fp(int_tofp(percent), int_tofp(100)); |
154 | } | 154 | } |
155 | 155 | ||
156 | static signed int pid_calc(struct _pid *pid, int busy) | 156 | static signed int pid_calc(struct _pid *pid, int32_t busy) |
157 | { | 157 | { |
158 | signed int err, result; | 158 | signed int result; |
159 | int32_t pterm, dterm, fp_error; | 159 | int32_t pterm, dterm, fp_error; |
160 | int32_t integral_limit; | 160 | int32_t integral_limit; |
161 | 161 | ||
162 | err = pid->setpoint - busy; | 162 | fp_error = int_tofp(pid->setpoint) - busy; |
163 | fp_error = int_tofp(err); | ||
164 | 163 | ||
165 | if (abs(err) <= pid->deadband) | 164 | if (abs(fp_error) <= int_tofp(pid->deadband)) |
166 | return 0; | 165 | return 0; |
167 | 166 | ||
168 | pterm = mul_fp(pid->p_gain, fp_error); | 167 | pterm = mul_fp(pid->p_gain, fp_error); |
@@ -176,8 +175,8 @@ static signed int pid_calc(struct _pid *pid, int busy) | |||
176 | if (pid->integral < -integral_limit) | 175 | if (pid->integral < -integral_limit) |
177 | pid->integral = -integral_limit; | 176 | pid->integral = -integral_limit; |
178 | 177 | ||
179 | dterm = mul_fp(pid->d_gain, (err - pid->last_err)); | 178 | dterm = mul_fp(pid->d_gain, fp_error - pid->last_err); |
180 | pid->last_err = err; | 179 | pid->last_err = fp_error; |
181 | 180 | ||
182 | result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; | 181 | result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm; |
183 | 182 | ||
@@ -367,12 +366,13 @@ static int intel_pstate_turbo_pstate(void) | |||
367 | static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) | 366 | static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max) |
368 | { | 367 | { |
369 | int max_perf = cpu->pstate.turbo_pstate; | 368 | int max_perf = cpu->pstate.turbo_pstate; |
369 | int max_perf_adj; | ||
370 | int min_perf; | 370 | int min_perf; |
371 | if (limits.no_turbo) | 371 | if (limits.no_turbo) |
372 | max_perf = cpu->pstate.max_pstate; | 372 | max_perf = cpu->pstate.max_pstate; |
373 | 373 | ||
374 | max_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); | 374 | max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); |
375 | *max = clamp_t(int, max_perf, | 375 | *max = clamp_t(int, max_perf_adj, |
376 | cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); | 376 | cpu->pstate.min_pstate, cpu->pstate.turbo_pstate); |
377 | 377 | ||
378 | min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); | 378 | min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits.min_perf)); |
@@ -436,8 +436,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu, | |||
436 | struct sample *sample) | 436 | struct sample *sample) |
437 | { | 437 | { |
438 | u64 core_pct; | 438 | u64 core_pct; |
439 | core_pct = div64_u64(sample->aperf * 100, sample->mperf); | 439 | core_pct = div64_u64(int_tofp(sample->aperf * 100), |
440 | sample->freq = cpu->pstate.max_pstate * core_pct * 1000; | 440 | sample->mperf); |
441 | sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000); | ||
441 | 442 | ||
442 | sample->core_pct_busy = core_pct; | 443 | sample->core_pct_busy = core_pct; |
443 | } | 444 | } |
@@ -469,22 +470,19 @@ static inline void intel_pstate_set_sample_time(struct cpudata *cpu) | |||
469 | mod_timer_pinned(&cpu->timer, jiffies + delay); | 470 | mod_timer_pinned(&cpu->timer, jiffies + delay); |
470 | } | 471 | } |
471 | 472 | ||
472 | static inline int intel_pstate_get_scaled_busy(struct cpudata *cpu) | 473 | static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu) |
473 | { | 474 | { |
474 | int32_t busy_scaled; | ||
475 | int32_t core_busy, max_pstate, current_pstate; | 475 | int32_t core_busy, max_pstate, current_pstate; |
476 | 476 | ||
477 | core_busy = int_tofp(cpu->samples[cpu->sample_ptr].core_pct_busy); | 477 | core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy; |
478 | max_pstate = int_tofp(cpu->pstate.max_pstate); | 478 | max_pstate = int_tofp(cpu->pstate.max_pstate); |
479 | current_pstate = int_tofp(cpu->pstate.current_pstate); | 479 | current_pstate = int_tofp(cpu->pstate.current_pstate); |
480 | busy_scaled = mul_fp(core_busy, div_fp(max_pstate, current_pstate)); | 480 | return mul_fp(core_busy, div_fp(max_pstate, current_pstate)); |
481 | |||
482 | return fp_toint(busy_scaled); | ||
483 | } | 481 | } |
484 | 482 | ||
485 | static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) | 483 | static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu) |
486 | { | 484 | { |
487 | int busy_scaled; | 485 | int32_t busy_scaled; |
488 | struct _pid *pid; | 486 | struct _pid *pid; |
489 | signed int ctl = 0; | 487 | signed int ctl = 0; |
490 | int steps; | 488 | int steps; |