diff options
| -rw-r--r-- | arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/cpu/cpufreq/longrun.c | 4 | ||||
| -rw-r--r-- | drivers/cpufreq/cpufreq.c | 4 | ||||
| -rw-r--r-- | drivers/cpufreq/cpufreq_ondemand.c | 42 | 
4 files changed, 46 insertions, 6 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c index 733093d60436..141abebc4516 100644 --- a/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c +++ b/arch/x86/kernel/cpu/cpufreq/cpufreq-nforce2.c  | |||
| @@ -393,7 +393,7 @@ static struct cpufreq_driver nforce2_driver = { | |||
| 393 | * Detects nForce2 A2 and C1 stepping | 393 | * Detects nForce2 A2 and C1 stepping | 
| 394 | * | 394 | * | 
| 395 | */ | 395 | */ | 
| 396 | static unsigned int nforce2_detect_chipset(void) | 396 | static int nforce2_detect_chipset(void) | 
| 397 | { | 397 | { | 
| 398 | nforce2_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, | 398 | nforce2_dev = pci_get_subsys(PCI_VENDOR_ID_NVIDIA, | 
| 399 | PCI_DEVICE_ID_NVIDIA_NFORCE2, | 399 | PCI_DEVICE_ID_NVIDIA_NFORCE2, | 
diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/arch/x86/kernel/cpu/cpufreq/longrun.c index fc09f142d94d..d9f51367666b 100644 --- a/arch/x86/kernel/cpu/cpufreq/longrun.c +++ b/arch/x86/kernel/cpu/cpufreq/longrun.c  | |||
| @@ -35,7 +35,7 @@ static unsigned int longrun_low_freq, longrun_high_freq; | |||
| 35 | * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS | 35 | * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS | 
| 36 | * and MSR_TMTA_LONGRUN_CTRL | 36 | * and MSR_TMTA_LONGRUN_CTRL | 
| 37 | */ | 37 | */ | 
| 38 | static void __init longrun_get_policy(struct cpufreq_policy *policy) | 38 | static void __cpuinit longrun_get_policy(struct cpufreq_policy *policy) | 
| 39 | { | 39 | { | 
| 40 | u32 msr_lo, msr_hi; | 40 | u32 msr_lo, msr_hi; | 
| 41 | 41 | ||
| @@ -165,7 +165,7 @@ static unsigned int longrun_get(unsigned int cpu) | |||
| 165 | * TMTA rules: | 165 | * TMTA rules: | 
| 166 | * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq) | 166 | * performance_pctg = (target_freq - low_freq)/(high_freq - low_freq) | 
| 167 | */ | 167 | */ | 
| 168 | static unsigned int __cpuinit longrun_determine_freqs(unsigned int *low_freq, | 168 | static int __cpuinit longrun_determine_freqs(unsigned int *low_freq, | 
| 169 | unsigned int *high_freq) | 169 | unsigned int *high_freq) | 
| 170 | { | 170 | { | 
| 171 | u32 msr_lo, msr_hi; | 171 | u32 msr_lo, msr_hi; | 
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index 199dcb9f0b83..c63a43823744 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c  | |||
| @@ -918,8 +918,8 @@ static int cpufreq_add_dev_interface(unsigned int cpu, | |||
| 918 | 918 | ||
| 919 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 919 | spin_lock_irqsave(&cpufreq_driver_lock, flags); | 
| 920 | for_each_cpu(j, policy->cpus) { | 920 | for_each_cpu(j, policy->cpus) { | 
| 921 | if (!cpu_online(j)) | 921 | if (!cpu_online(j)) | 
| 922 | continue; | 922 | continue; | 
| 923 | per_cpu(cpufreq_cpu_data, j) = policy; | 923 | per_cpu(cpufreq_cpu_data, j) = policy; | 
| 924 | per_cpu(cpufreq_policy_cpu, j) = policy->cpu; | 924 | per_cpu(cpufreq_policy_cpu, j) = policy->cpu; | 
| 925 | } | 925 | } | 
diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 7b5093664e49..c631f27a3dcc 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c  | |||
| @@ -30,6 +30,8 @@ | |||
| 30 | 30 | ||
| 31 | #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) | 31 | #define DEF_FREQUENCY_DOWN_DIFFERENTIAL (10) | 
| 32 | #define DEF_FREQUENCY_UP_THRESHOLD (80) | 32 | #define DEF_FREQUENCY_UP_THRESHOLD (80) | 
| 33 | #define DEF_SAMPLING_DOWN_FACTOR (1) | ||
| 34 | #define MAX_SAMPLING_DOWN_FACTOR (100000) | ||
| 33 | #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) | 35 | #define MICRO_FREQUENCY_DOWN_DIFFERENTIAL (3) | 
| 34 | #define MICRO_FREQUENCY_UP_THRESHOLD (95) | 36 | #define MICRO_FREQUENCY_UP_THRESHOLD (95) | 
| 35 | #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) | 37 | #define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000) | 
| @@ -82,6 +84,7 @@ struct cpu_dbs_info_s { | |||
| 82 | unsigned int freq_lo; | 84 | unsigned int freq_lo; | 
| 83 | unsigned int freq_lo_jiffies; | 85 | unsigned int freq_lo_jiffies; | 
| 84 | unsigned int freq_hi_jiffies; | 86 | unsigned int freq_hi_jiffies; | 
| 87 | unsigned int rate_mult; | ||
| 85 | int cpu; | 88 | int cpu; | 
| 86 | unsigned int sample_type:1; | 89 | unsigned int sample_type:1; | 
| 87 | /* | 90 | /* | 
| @@ -108,10 +111,12 @@ static struct dbs_tuners { | |||
| 108 | unsigned int up_threshold; | 111 | unsigned int up_threshold; | 
| 109 | unsigned int down_differential; | 112 | unsigned int down_differential; | 
| 110 | unsigned int ignore_nice; | 113 | unsigned int ignore_nice; | 
| 114 | unsigned int sampling_down_factor; | ||
| 111 | unsigned int powersave_bias; | 115 | unsigned int powersave_bias; | 
| 112 | unsigned int io_is_busy; | 116 | unsigned int io_is_busy; | 
| 113 | } dbs_tuners_ins = { | 117 | } dbs_tuners_ins = { | 
| 114 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, | 118 | .up_threshold = DEF_FREQUENCY_UP_THRESHOLD, | 
| 119 | .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR, | ||
| 115 | .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, | 120 | .down_differential = DEF_FREQUENCY_DOWN_DIFFERENTIAL, | 
| 116 | .ignore_nice = 0, | 121 | .ignore_nice = 0, | 
| 117 | .powersave_bias = 0, | 122 | .powersave_bias = 0, | 
| @@ -259,6 +264,7 @@ static ssize_t show_##file_name \ | |||
| 259 | show_one(sampling_rate, sampling_rate); | 264 | show_one(sampling_rate, sampling_rate); | 
| 260 | show_one(io_is_busy, io_is_busy); | 265 | show_one(io_is_busy, io_is_busy); | 
| 261 | show_one(up_threshold, up_threshold); | 266 | show_one(up_threshold, up_threshold); | 
| 267 | show_one(sampling_down_factor, sampling_down_factor); | ||
| 262 | show_one(ignore_nice_load, ignore_nice); | 268 | show_one(ignore_nice_load, ignore_nice); | 
| 263 | show_one(powersave_bias, powersave_bias); | 269 | show_one(powersave_bias, powersave_bias); | 
| 264 | 270 | ||
| @@ -340,6 +346,29 @@ static ssize_t store_up_threshold(struct kobject *a, struct attribute *b, | |||
| 340 | return count; | 346 | return count; | 
| 341 | } | 347 | } | 
| 342 | 348 | ||
| 349 | static ssize_t store_sampling_down_factor(struct kobject *a, | ||
| 350 | struct attribute *b, const char *buf, size_t count) | ||
| 351 | { | ||
| 352 | unsigned int input, j; | ||
| 353 | int ret; | ||
| 354 | ret = sscanf(buf, "%u", &input); | ||
| 355 | |||
| 356 | if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1) | ||
| 357 | return -EINVAL; | ||
| 358 | mutex_lock(&dbs_mutex); | ||
| 359 | dbs_tuners_ins.sampling_down_factor = input; | ||
| 360 | |||
| 361 | /* Reset down sampling multiplier in case it was active */ | ||
| 362 | for_each_online_cpu(j) { | ||
| 363 | struct cpu_dbs_info_s *dbs_info; | ||
| 364 | dbs_info = &per_cpu(od_cpu_dbs_info, j); | ||
| 365 | dbs_info->rate_mult = 1; | ||
| 366 | } | ||
| 367 | mutex_unlock(&dbs_mutex); | ||
| 368 | |||
| 369 | return count; | ||
| 370 | } | ||
| 371 | |||
| 343 | static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, | 372 | static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b, | 
| 344 | const char *buf, size_t count) | 373 | const char *buf, size_t count) | 
| 345 | { | 374 | { | 
| @@ -401,6 +430,7 @@ static ssize_t store_powersave_bias(struct kobject *a, struct attribute *b, | |||
| 401 | define_one_global_rw(sampling_rate); | 430 | define_one_global_rw(sampling_rate); | 
| 402 | define_one_global_rw(io_is_busy); | 431 | define_one_global_rw(io_is_busy); | 
| 403 | define_one_global_rw(up_threshold); | 432 | define_one_global_rw(up_threshold); | 
| 433 | define_one_global_rw(sampling_down_factor); | ||
| 404 | define_one_global_rw(ignore_nice_load); | 434 | define_one_global_rw(ignore_nice_load); | 
| 405 | define_one_global_rw(powersave_bias); | 435 | define_one_global_rw(powersave_bias); | 
| 406 | 436 | ||
| @@ -409,6 +439,7 @@ static struct attribute *dbs_attributes[] = { | |||
| 409 | &sampling_rate_min.attr, | 439 | &sampling_rate_min.attr, | 
| 410 | &sampling_rate.attr, | 440 | &sampling_rate.attr, | 
| 411 | &up_threshold.attr, | 441 | &up_threshold.attr, | 
| 442 | &sampling_down_factor.attr, | ||
| 412 | &ignore_nice_load.attr, | 443 | &ignore_nice_load.attr, | 
| 413 | &powersave_bias.attr, | 444 | &powersave_bias.attr, | 
| 414 | &io_is_busy.attr, | 445 | &io_is_busy.attr, | 
| @@ -562,6 +593,10 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
| 562 | 593 | ||
| 563 | /* Check for frequency increase */ | 594 | /* Check for frequency increase */ | 
| 564 | if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { | 595 | if (max_load_freq > dbs_tuners_ins.up_threshold * policy->cur) { | 
| 596 | /* If switching to max speed, apply sampling_down_factor */ | ||
| 597 | if (policy->cur < policy->max) | ||
| 598 | this_dbs_info->rate_mult = | ||
| 599 | dbs_tuners_ins.sampling_down_factor; | ||
| 565 | dbs_freq_increase(policy, policy->max); | 600 | dbs_freq_increase(policy, policy->max); | 
| 566 | return; | 601 | return; | 
| 567 | } | 602 | } | 
| @@ -584,6 +619,9 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info) | |||
| 584 | (dbs_tuners_ins.up_threshold - | 619 | (dbs_tuners_ins.up_threshold - | 
| 585 | dbs_tuners_ins.down_differential); | 620 | dbs_tuners_ins.down_differential); | 
| 586 | 621 | ||
| 622 | /* No longer fully busy, reset rate_mult */ | ||
| 623 | this_dbs_info->rate_mult = 1; | ||
| 624 | |||
| 587 | if (freq_next < policy->min) | 625 | if (freq_next < policy->min) | 
| 588 | freq_next = policy->min; | 626 | freq_next = policy->min; | 
| 589 | 627 | ||
| @@ -607,7 +645,8 @@ static void do_dbs_timer(struct work_struct *work) | |||
| 607 | int sample_type = dbs_info->sample_type; | 645 | int sample_type = dbs_info->sample_type; | 
| 608 | 646 | ||
| 609 | /* We want all CPUs to do sampling nearly on same jiffy */ | 647 | /* We want all CPUs to do sampling nearly on same jiffy */ | 
| 610 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate); | 648 | int delay = usecs_to_jiffies(dbs_tuners_ins.sampling_rate | 
| 649 | * dbs_info->rate_mult); | ||
| 611 | 650 | ||
| 612 | if (num_online_cpus() > 1) | 651 | if (num_online_cpus() > 1) | 
| 613 | delay -= jiffies % delay; | 652 | delay -= jiffies % delay; | 
| @@ -711,6 +750,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy, | |||
| 711 | } | 750 | } | 
| 712 | } | 751 | } | 
| 713 | this_dbs_info->cpu = cpu; | 752 | this_dbs_info->cpu = cpu; | 
| 753 | this_dbs_info->rate_mult = 1; | ||
| 714 | ondemand_powersave_bias_init_cpu(cpu); | 754 | ondemand_powersave_bias_init_cpu(cpu); | 
| 715 | /* | 755 | /* | 
| 716 | * Start the timerschedule work, when this governor | 756 | * Start the timerschedule work, when this governor | 
