diff options
author | Konsta Holtta <kholtta@nvidia.com> | 2017-03-29 08:19:18 -0400 |
---|---|---|
committer | mobile promotions <svcmobile_promotions@nvidia.com> | 2018-07-05 12:58:47 -0400 |
commit | 643e69a1c288bdece8a621a9d7cd81015a1535be (patch) | |
tree | c449f2945256268f7bf4f9e1a4c8c5bdc931bd05 | |
parent | 80dca368b8741a216ba904ca12fd3f3adbe63678 (diff) |
devfreq: remove throughput hints from podgov
The CONFIG_TEGRA_THROUGHPUT-dependent throughput hint feature is dead,
so remove the code from podgov that used it.
Bug 1853519
Change-Id: Icb36eb0b2baa98757b3ce419080b43978384bbc9
Signed-off-by: Konsta Holtta <kholtta@nvidia.com>
Reviewed-on: http://git-master/r/1454591
(cherry picked from linux-4.9 commit 96fcf87d98618ddf9a1e23a017294d201040859b)
Reviewed-on: https://git-master.nvidia.com/r/1770147
Reviewed-by: Mikko Perttunen <mperttunen@nvidia.com>
GVS: Gerrit_Virtual_Submit
Reviewed-by: Timo Alho <talho@nvidia.com>
Tested-by: Timo Alho <talho@nvidia.com>
Reviewed-by: mobile promotions <svcmobile_promotions@nvidia.com>
Tested-by: mobile promotions <svcmobile_promotions@nvidia.com>
-rw-r--r-- | drivers/devfreq/governor_pod_scaling.c | 159 |
1 files changed, 7 insertions, 152 deletions
diff --git a/drivers/devfreq/governor_pod_scaling.c b/drivers/devfreq/governor_pod_scaling.c index cca369997..351cfd6ee 100644 --- a/drivers/devfreq/governor_pod_scaling.c +++ b/drivers/devfreq/governor_pod_scaling.c | |||
@@ -22,13 +22,9 @@ | |||
22 | * Power-on-demand clock scaling for nvhost devices | 22 | * Power-on-demand clock scaling for nvhost devices |
23 | * | 23 | * |
24 | * devfreq calls nvhost_pod_estimate_freq() for estimating the new | 24 | * devfreq calls nvhost_pod_estimate_freq() for estimating the new |
25 | * frequency for the device. The clocking is done using two properties: | 25 | * frequency for the device. The clocking is done using the load of the device |
26 | * | 26 | * is estimated using the busy times from the device profile. This information |
27 | * (1) Usually the governor receives actively throughput hints that indicate | 27 | * indicates if the device frequency should be altered. |
28 | * whether scaling up or down is required. | ||
29 | * (2) The load of the device is estimated using the busy times from the | ||
30 | * device profile. This information indicates if the device frequency | ||
31 | * should be altered. | ||
32 | * | 28 | * |
33 | */ | 29 | */ |
34 | 30 | ||
@@ -42,12 +38,6 @@ | |||
42 | #include <linux/tegra-soc.h> | 38 | #include <linux/tegra-soc.h> |
43 | #include <linux/module.h> | 39 | #include <linux/module.h> |
44 | 40 | ||
45 | #include <linux/notifier.h> | ||
46 | #include <linux/tegra-throughput.h> | ||
47 | |||
48 | #include <linux/notifier.h> | ||
49 | #include <linux/tegra-throughput.h> | ||
50 | |||
51 | #define CREATE_TRACE_POINTS | 41 | #define CREATE_TRACE_POINTS |
52 | #include <trace/events/nvhost_podgov.h> | 42 | #include <trace/events/nvhost_podgov.h> |
53 | 43 | ||
@@ -58,16 +48,10 @@ | |||
58 | 48 | ||
59 | #define GET_TARGET_FREQ_DONTSCALE 1 | 49 | #define GET_TARGET_FREQ_DONTSCALE 1 |
60 | 50 | ||
61 | /* time frame for load and hint tracking - when events come in at a larger | 51 | /* the number of frames to use in the running average of load estimates. |
62 | * interval, this probably indicates the current estimates are stale | 52 | * Choosing 6 frames targets a window of about 100 msec. Large flucutuations |
63 | */ | 53 | * in frame times require a window that's large enough to prevent spiky scaling |
64 | #define GR3D_TIMEFRAME 1000000 /* 1 sec */ | 54 | * behavior, which in turn exacerbates frame rate instability. |
65 | |||
66 | /* the number of frames to use in the running average of load estimates and | ||
67 | * throughput hints. Choosing 6 frames targets a window of about 100 msec. | ||
68 | * Large flucutuations in frame times require a window that's large enough to | ||
69 | * prevent spiky scaling behavior, which in turn exacerbates frame rate | ||
70 | * instability. | ||
71 | */ | 55 | */ |
72 | 56 | ||
73 | static void podgov_enable(struct devfreq *df, int enable); | 57 | static void podgov_enable(struct devfreq *df, int enable); |
@@ -84,14 +68,12 @@ struct podgov_info_rec { | |||
84 | int enable; | 68 | int enable; |
85 | int init; | 69 | int init; |
86 | 70 | ||
87 | ktime_t last_throughput_hint; | ||
88 | ktime_t last_scale; | 71 | ktime_t last_scale; |
89 | 72 | ||
90 | struct delayed_work idle_timer; | 73 | struct delayed_work idle_timer; |
91 | 74 | ||
92 | unsigned int p_slowdown_delay; | 75 | unsigned int p_slowdown_delay; |
93 | unsigned int p_block_window; | 76 | unsigned int p_block_window; |
94 | unsigned int p_use_throughput_hint; | ||
95 | unsigned int p_hint_lo_limit; | 77 | unsigned int p_hint_lo_limit; |
96 | unsigned int p_hint_hi_limit; | 78 | unsigned int p_hint_hi_limit; |
97 | unsigned int p_scaleup_limit; | 79 | unsigned int p_scaleup_limit; |
@@ -124,8 +106,6 @@ struct podgov_info_rec { | |||
124 | struct kobj_attribute enable_3d_scaling_attr; | 106 | struct kobj_attribute enable_3d_scaling_attr; |
125 | struct kobj_attribute user_attr; | 107 | struct kobj_attribute user_attr; |
126 | struct kobj_attribute freq_request_attr; | 108 | struct kobj_attribute freq_request_attr; |
127 | |||
128 | struct notifier_block throughput_hint_notifier; | ||
129 | }; | 109 | }; |
130 | 110 | ||
131 | /******************************************************************************* | 111 | /******************************************************************************* |
@@ -437,110 +417,6 @@ static void podgov_idle_handler(struct work_struct *work) | |||
437 | mutex_unlock(&df->lock); | 417 | mutex_unlock(&df->lock); |
438 | } | 418 | } |
439 | 419 | ||
440 | #ifdef CONFIG_TEGRA_THROUGHPUT | ||
441 | /******************************************************************************* | ||
442 | * freqlist_down(podgov, target, steps) | ||
443 | * | ||
444 | * This function determines the frequency that is "steps" frequency steps | ||
445 | * lower compared to the target frequency. | ||
446 | ******************************************************************************/ | ||
447 | |||
448 | static int freqlist_down(struct podgov_info_rec *podgov, unsigned long target, | ||
449 | int steps) | ||
450 | { | ||
451 | int i, pos; | ||
452 | |||
453 | for (i = podgov->freq_count - 1; i >= 0; i--) | ||
454 | if (podgov->freqlist[i] <= target) | ||
455 | break; | ||
456 | |||
457 | pos = max(0, i - steps); | ||
458 | return podgov->freqlist[pos]; | ||
459 | } | ||
460 | |||
461 | /******************************************************************************* | ||
462 | * nvhost_scale_emc_set_throughput_hint(hint) | ||
463 | * | ||
464 | * This function can be used to request scaling up or down based on the | ||
465 | * required throughput | ||
466 | ******************************************************************************/ | ||
467 | |||
468 | static int nvhost_scale_emc_set_throughput_hint(struct notifier_block *nb, | ||
469 | unsigned long action, void *data) | ||
470 | { | ||
471 | struct podgov_info_rec *podgov = | ||
472 | container_of(nb, struct podgov_info_rec, | ||
473 | throughput_hint_notifier); | ||
474 | struct devfreq *df = podgov->power_manager; | ||
475 | struct device *dev = df->dev.parent; | ||
476 | int hint = tegra_throughput_get_hint(); | ||
477 | long idle; | ||
478 | unsigned long curr, target; | ||
479 | int avg_idle, avg_hint, scale_score; | ||
480 | unsigned int smooth; | ||
481 | |||
482 | /* make sure the device is alive before doing any scaling */ | ||
483 | pm_runtime_get_noresume(dev); | ||
484 | if (!pm_runtime_active(dev)) { | ||
485 | pm_runtime_put(dev); | ||
486 | return 0; | ||
487 | } | ||
488 | |||
489 | mutex_lock(&df->lock); | ||
490 | |||
491 | podgov->block--; | ||
492 | |||
493 | if (!podgov->enable || | ||
494 | !podgov->p_use_throughput_hint || | ||
495 | podgov->block > 0) | ||
496 | goto exit_unlock; | ||
497 | |||
498 | trace_podgov_hint(df->dev.parent, podgov->idle, hint); | ||
499 | podgov->last_throughput_hint = ktime_get(); | ||
500 | |||
501 | curr = df->previous_freq; | ||
502 | idle = podgov->idle; | ||
503 | avg_idle = podgov->idle_avg; | ||
504 | smooth = podgov->p_smooth; | ||
505 | |||
506 | /* compute averages usings exponential-moving-average */ | ||
507 | avg_hint = ((smooth*podgov->hint_avg + hint)/(smooth+1)); | ||
508 | podgov->hint_avg = avg_hint; | ||
509 | |||
510 | /* set the target using avg_hint and avg_idle */ | ||
511 | target = curr; | ||
512 | if (avg_hint < podgov->p_hint_lo_limit) { | ||
513 | target = freqlist_up(podgov, curr, 1); | ||
514 | } else { | ||
515 | scale_score = avg_idle + avg_hint; | ||
516 | if (scale_score > podgov->p_scaledown_limit) | ||
517 | target = freqlist_down(podgov, curr, 1); | ||
518 | else if (scale_score < podgov->p_scaleup_limit | ||
519 | && hint < podgov->p_hint_hi_limit) | ||
520 | target = freqlist_up(podgov, curr, 1); | ||
521 | } | ||
522 | |||
523 | /* clamp and apply target */ | ||
524 | scaling_limit(df, &target); | ||
525 | if (target != curr) { | ||
526 | podgov->block = podgov->p_smooth; | ||
527 | trace_podgov_do_scale(df->dev.parent, | ||
528 | df->previous_freq, target); | ||
529 | podgov->adjustment_frequency = target; | ||
530 | podgov->adjustment_type = ADJUSTMENT_LOCAL; | ||
531 | update_devfreq(df); | ||
532 | } | ||
533 | |||
534 | trace_podgov_print_target(df->dev.parent, idle, avg_idle, | ||
535 | curr / 1000000, target, hint, avg_hint); | ||
536 | |||
537 | exit_unlock: | ||
538 | mutex_unlock(&df->lock); | ||
539 | pm_runtime_put(dev); | ||
540 | return NOTIFY_OK; | ||
541 | } | ||
542 | #endif | ||
543 | |||
544 | /******************************************************************************* | 420 | /******************************************************************************* |
545 | * debugfs interface for controlling 3d clock scaling on the fly | 421 | * debugfs interface for controlling 3d clock scaling on the fly |
546 | ******************************************************************************/ | 422 | ******************************************************************************/ |
@@ -580,7 +456,6 @@ static void nvhost_scale_emc_debug_init(struct devfreq *df) | |||
580 | CREATE_PODGOV_FILE(load_target); | 456 | CREATE_PODGOV_FILE(load_target); |
581 | CREATE_PODGOV_FILE(bias); | 457 | CREATE_PODGOV_FILE(bias); |
582 | CREATE_PODGOV_FILE(damp); | 458 | CREATE_PODGOV_FILE(damp); |
583 | CREATE_PODGOV_FILE(use_throughput_hint); | ||
584 | CREATE_PODGOV_FILE(hint_hi_limit); | 459 | CREATE_PODGOV_FILE(hint_hi_limit); |
585 | CREATE_PODGOV_FILE(hint_lo_limit); | 460 | CREATE_PODGOV_FILE(hint_lo_limit); |
586 | CREATE_PODGOV_FILE(scaleup_limit); | 461 | CREATE_PODGOV_FILE(scaleup_limit); |
@@ -784,11 +659,6 @@ static int nvhost_pod_estimate_freq(struct devfreq *df, | |||
784 | podgov->idle; | 659 | podgov->idle; |
785 | podgov->idle_avg = podgov->idle_avg / (podgov->p_smooth + 1); | 660 | podgov->idle_avg = podgov->idle_avg / (podgov->p_smooth + 1); |
786 | 661 | ||
787 | /* if throughput hint enabled, and last hint is recent enough, return */ | ||
788 | if (podgov->p_use_throughput_hint && | ||
789 | ktime_us_delta(now, podgov->last_throughput_hint) < 1000000) | ||
790 | return GET_TARGET_FREQ_DONTSCALE; | ||
791 | |||
792 | if (dev_stat.busy) { | 662 | if (dev_stat.busy) { |
793 | cancel_delayed_work(&podgov->idle_timer); | 663 | cancel_delayed_work(&podgov->idle_timer); |
794 | *freq = scaling_state_check(df, now); | 664 | *freq = scaling_state_check(df, now); |
@@ -842,7 +712,6 @@ static int nvhost_pod_init(struct devfreq *df) | |||
842 | /* Set scaling parameter defaults */ | 712 | /* Set scaling parameter defaults */ |
843 | podgov->enable = 1; | 713 | podgov->enable = 1; |
844 | podgov->block = 0; | 714 | podgov->block = 0; |
845 | podgov->p_use_throughput_hint = 1; | ||
846 | 715 | ||
847 | if (!strcmp(d->name, "vic03.0")) { | 716 | if (!strcmp(d->name, "vic03.0")) { |
848 | podgov->p_load_max = 990; | 717 | podgov->p_load_max = 990; |
@@ -871,7 +740,6 @@ static int nvhost_pod_init(struct devfreq *df) | |||
871 | podgov->p_scaledown_limit = 1300; | 740 | podgov->p_scaledown_limit = 1300; |
872 | podgov->p_smooth = 10; | 741 | podgov->p_smooth = 10; |
873 | podgov->p_damp = 7; | 742 | podgov->p_damp = 7; |
874 | podgov->p_use_throughput_hint = 0; | ||
875 | break; | 743 | break; |
876 | default: | 744 | default: |
877 | pr_err("%s: un-supported chip id\n", __func__); | 745 | pr_err("%s: un-supported chip id\n", __func__); |
@@ -886,7 +754,6 @@ static int nvhost_pod_init(struct devfreq *df) | |||
886 | podgov->p_user = 0; | 754 | podgov->p_user = 0; |
887 | 755 | ||
888 | /* Reset clock counters */ | 756 | /* Reset clock counters */ |
889 | podgov->last_throughput_hint = now; | ||
890 | podgov->last_scale = now; | 757 | podgov->last_scale = now; |
891 | 758 | ||
892 | podgov->power_manager = df; | 759 | podgov->power_manager = df; |
@@ -937,14 +804,6 @@ static int nvhost_pod_init(struct devfreq *df) | |||
937 | 804 | ||
938 | nvhost_scale_emc_debug_init(df); | 805 | nvhost_scale_emc_debug_init(df); |
939 | 806 | ||
940 | /* register the governor to throughput hint notifier chain */ | ||
941 | #ifdef CONFIG_TEGRA_THROUGHPUT | ||
942 | podgov->throughput_hint_notifier.notifier_call = | ||
943 | &nvhost_scale_emc_set_throughput_hint; | ||
944 | blocking_notifier_chain_register(&throughput_notifier_list, | ||
945 | &podgov->throughput_hint_notifier); | ||
946 | #endif | ||
947 | |||
948 | return 0; | 807 | return 0; |
949 | 808 | ||
950 | err_get_freqs: | 809 | err_get_freqs: |
@@ -973,10 +832,6 @@ static void nvhost_pod_exit(struct devfreq *df) | |||
973 | { | 832 | { |
974 | struct podgov_info_rec *podgov = df->data; | 833 | struct podgov_info_rec *podgov = df->data; |
975 | 834 | ||
976 | #ifdef CONFIG_TEGRA_THROUGHPUT | ||
977 | blocking_notifier_chain_unregister(&throughput_notifier_list, | ||
978 | &podgov->throughput_hint_notifier); | ||
979 | #endif | ||
980 | cancel_delayed_work(&podgov->idle_timer); | 835 | cancel_delayed_work(&podgov->idle_timer); |
981 | 836 | ||
982 | sysfs_remove_file(&df->dev.parent->kobj, &podgov->user_attr.attr); | 837 | sysfs_remove_file(&df->dev.parent->kobj, &podgov->user_attr.attr); |