diff options
| -rw-r--r-- | drivers/acpi/processor_idle.c | 48 | ||||
| -rw-r--r-- | drivers/cpuidle/cpuidle.c | 94 | ||||
| -rw-r--r-- | drivers/idle/intel_idle.c | 179 | ||||
| -rw-r--r-- | include/linux/cpuidle.h | 13 | ||||
| -rw-r--r-- | include/linux/suspend.h | 16 | ||||
| -rw-r--r-- | include/linux/tick.h | 6 | ||||
| -rw-r--r-- | kernel/power/suspend.c | 43 | ||||
| -rw-r--r-- | kernel/sched/idle.c | 16 | ||||
| -rw-r--r-- | kernel/time/tick-common.c | 50 | ||||
| -rw-r--r-- | kernel/time/timekeeping.c | 48 | ||||
| -rw-r--r-- | kernel/time/timekeeping.h | 2 |
11 files changed, 407 insertions, 108 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index c256bd7fbd78..c6bb9f1257c9 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c | |||
| @@ -732,9 +732,8 @@ static int acpi_idle_play_dead(struct cpuidle_device *dev, int index) | |||
| 732 | 732 | ||
| 733 | static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr) | 733 | static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr) |
| 734 | { | 734 | { |
| 735 | return IS_ENABLED(CONFIG_HOTPLUG_CPU) && num_online_cpus() > 1 && | 735 | return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst && |
| 736 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED) && | 736 | !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED); |
| 737 | !pr->flags.has_cst; | ||
| 738 | } | 737 | } |
| 739 | 738 | ||
| 740 | static int c3_cpu_count; | 739 | static int c3_cpu_count; |
| @@ -744,9 +743,10 @@ static DEFINE_RAW_SPINLOCK(c3_lock); | |||
| 744 | * acpi_idle_enter_bm - enters C3 with proper BM handling | 743 | * acpi_idle_enter_bm - enters C3 with proper BM handling |
| 745 | * @pr: Target processor | 744 | * @pr: Target processor |
| 746 | * @cx: Target state context | 745 | * @cx: Target state context |
| 746 | * @timer_bc: Whether or not to change timer mode to broadcast | ||
| 747 | */ | 747 | */ |
| 748 | static void acpi_idle_enter_bm(struct acpi_processor *pr, | 748 | static void acpi_idle_enter_bm(struct acpi_processor *pr, |
| 749 | struct acpi_processor_cx *cx) | 749 | struct acpi_processor_cx *cx, bool timer_bc) |
| 750 | { | 750 | { |
| 751 | acpi_unlazy_tlb(smp_processor_id()); | 751 | acpi_unlazy_tlb(smp_processor_id()); |
| 752 | 752 | ||
| @@ -754,7 +754,8 @@ static void acpi_idle_enter_bm(struct acpi_processor *pr, | |||
| 754 | * Must be done before busmaster disable as we might need to | 754 | * Must be done before busmaster disable as we might need to |
| 755 | * access HPET ! | 755 | * access HPET ! |
| 756 | */ | 756 | */ |
| 757 | lapic_timer_state_broadcast(pr, cx, 1); | 757 | if (timer_bc) |
| 758 | lapic_timer_state_broadcast(pr, cx, 1); | ||
| 758 | 759 | ||
| 759 | /* | 760 | /* |
| 760 | * disable bus master | 761 | * disable bus master |
| @@ -784,7 +785,8 @@ static void acpi_idle_enter_bm(struct acpi_processor *pr, | |||
| 784 | raw_spin_unlock(&c3_lock); | 785 | raw_spin_unlock(&c3_lock); |
| 785 | } | 786 | } |
| 786 | 787 | ||
| 787 | lapic_timer_state_broadcast(pr, cx, 0); | 788 | if (timer_bc) |
| 789 | lapic_timer_state_broadcast(pr, cx, 0); | ||
| 788 | } | 790 | } |
| 789 | 791 | ||
| 790 | static int acpi_idle_enter(struct cpuidle_device *dev, | 792 | static int acpi_idle_enter(struct cpuidle_device *dev, |
| @@ -798,12 +800,12 @@ static int acpi_idle_enter(struct cpuidle_device *dev, | |||
| 798 | return -EINVAL; | 800 | return -EINVAL; |
| 799 | 801 | ||
| 800 | if (cx->type != ACPI_STATE_C1) { | 802 | if (cx->type != ACPI_STATE_C1) { |
| 801 | if (acpi_idle_fallback_to_c1(pr)) { | 803 | if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) { |
| 802 | index = CPUIDLE_DRIVER_STATE_START; | 804 | index = CPUIDLE_DRIVER_STATE_START; |
| 803 | cx = per_cpu(acpi_cstate[index], dev->cpu); | 805 | cx = per_cpu(acpi_cstate[index], dev->cpu); |
| 804 | } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) { | 806 | } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) { |
| 805 | if (cx->bm_sts_skip || !acpi_idle_bm_check()) { | 807 | if (cx->bm_sts_skip || !acpi_idle_bm_check()) { |
| 806 | acpi_idle_enter_bm(pr, cx); | 808 | acpi_idle_enter_bm(pr, cx, true); |
| 807 | return index; | 809 | return index; |
| 808 | } else if (drv->safe_state_index >= 0) { | 810 | } else if (drv->safe_state_index >= 0) { |
| 809 | index = drv->safe_state_index; | 811 | index = drv->safe_state_index; |
| @@ -827,6 +829,27 @@ static int acpi_idle_enter(struct cpuidle_device *dev, | |||
| 827 | return index; | 829 | return index; |
| 828 | } | 830 | } |
| 829 | 831 | ||
| 832 | static void acpi_idle_enter_freeze(struct cpuidle_device *dev, | ||
| 833 | struct cpuidle_driver *drv, int index) | ||
| 834 | { | ||
| 835 | struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu); | ||
| 836 | |||
| 837 | if (cx->type == ACPI_STATE_C3) { | ||
| 838 | struct acpi_processor *pr = __this_cpu_read(processors); | ||
| 839 | |||
| 840 | if (unlikely(!pr)) | ||
| 841 | return; | ||
| 842 | |||
| 843 | if (pr->flags.bm_check) { | ||
| 844 | acpi_idle_enter_bm(pr, cx, false); | ||
| 845 | return; | ||
| 846 | } else { | ||
| 847 | ACPI_FLUSH_CPU_CACHE(); | ||
| 848 | } | ||
| 849 | } | ||
| 850 | acpi_idle_do_entry(cx); | ||
| 851 | } | ||
| 852 | |||
| 830 | struct cpuidle_driver acpi_idle_driver = { | 853 | struct cpuidle_driver acpi_idle_driver = { |
| 831 | .name = "acpi_idle", | 854 | .name = "acpi_idle", |
| 832 | .owner = THIS_MODULE, | 855 | .owner = THIS_MODULE, |
| @@ -925,6 +948,15 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr) | |||
| 925 | state->enter_dead = acpi_idle_play_dead; | 948 | state->enter_dead = acpi_idle_play_dead; |
| 926 | drv->safe_state_index = count; | 949 | drv->safe_state_index = count; |
| 927 | } | 950 | } |
| 951 | /* | ||
| 952 | * Halt-induced C1 is not good for ->enter_freeze, because it | ||
| 953 | * re-enables interrupts on exit. Moreover, C1 is generally not | ||
| 954 | * particularly interesting from the suspend-to-idle angle, so | ||
| 955 | * avoid C1 and the situations in which we may need to fall back | ||
| 956 | * to it altogether. | ||
| 957 | */ | ||
| 958 | if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr)) | ||
| 959 | state->enter_freeze = acpi_idle_enter_freeze; | ||
| 928 | 960 | ||
| 929 | count++; | 961 | count++; |
| 930 | if (count == CPUIDLE_STATE_MAX) | 962 | if (count == CPUIDLE_STATE_MAX) |
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 125150dc6e81..4d534582514e 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c | |||
| @@ -19,6 +19,8 @@ | |||
| 19 | #include <linux/ktime.h> | 19 | #include <linux/ktime.h> |
| 20 | #include <linux/hrtimer.h> | 20 | #include <linux/hrtimer.h> |
| 21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
| 22 | #include <linux/suspend.h> | ||
| 23 | #include <linux/tick.h> | ||
| 22 | #include <trace/events/power.h> | 24 | #include <trace/events/power.h> |
| 23 | 25 | ||
| 24 | #include "cpuidle.h" | 26 | #include "cpuidle.h" |
| @@ -32,7 +34,6 @@ LIST_HEAD(cpuidle_detected_devices); | |||
| 32 | static int enabled_devices; | 34 | static int enabled_devices; |
| 33 | static int off __read_mostly; | 35 | static int off __read_mostly; |
| 34 | static int initialized __read_mostly; | 36 | static int initialized __read_mostly; |
| 35 | static bool use_deepest_state __read_mostly; | ||
| 36 | 37 | ||
| 37 | int cpuidle_disabled(void) | 38 | int cpuidle_disabled(void) |
| 38 | { | 39 | { |
| @@ -66,36 +67,23 @@ int cpuidle_play_dead(void) | |||
| 66 | } | 67 | } |
| 67 | 68 | ||
| 68 | /** | 69 | /** |
| 69 | * cpuidle_use_deepest_state - Enable/disable the "deepest idle" mode. | 70 | * cpuidle_find_deepest_state - Find deepest state meeting specific conditions. |
| 70 | * @enable: Whether enable or disable the feature. | 71 | * @drv: cpuidle driver for the given CPU. |
| 71 | * | 72 | * @dev: cpuidle device for the given CPU. |
| 72 | * If the "deepest idle" mode is enabled, cpuidle will ignore the governor and | 73 | * @freeze: Whether or not the state should be suitable for suspend-to-idle. |
| 73 | * always use the state with the greatest exit latency (out of the states that | ||
| 74 | * are not disabled). | ||
| 75 | * | ||
| 76 | * This function can only be called after cpuidle_pause() to avoid races. | ||
| 77 | */ | ||
| 78 | void cpuidle_use_deepest_state(bool enable) | ||
| 79 | { | ||
| 80 | use_deepest_state = enable; | ||
| 81 | } | ||
| 82 | |||
| 83 | /** | ||
| 84 | * cpuidle_find_deepest_state - Find the state of the greatest exit latency. | ||
| 85 | * @drv: cpuidle driver for a given CPU. | ||
| 86 | * @dev: cpuidle device for a given CPU. | ||
| 87 | */ | 74 | */ |
| 88 | static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, | 75 | static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, |
| 89 | struct cpuidle_device *dev) | 76 | struct cpuidle_device *dev, bool freeze) |
| 90 | { | 77 | { |
| 91 | unsigned int latency_req = 0; | 78 | unsigned int latency_req = 0; |
| 92 | int i, ret = CPUIDLE_DRIVER_STATE_START - 1; | 79 | int i, ret = freeze ? -1 : CPUIDLE_DRIVER_STATE_START - 1; |
| 93 | 80 | ||
| 94 | for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { | 81 | for (i = CPUIDLE_DRIVER_STATE_START; i < drv->state_count; i++) { |
| 95 | struct cpuidle_state *s = &drv->states[i]; | 82 | struct cpuidle_state *s = &drv->states[i]; |
| 96 | struct cpuidle_state_usage *su = &dev->states_usage[i]; | 83 | struct cpuidle_state_usage *su = &dev->states_usage[i]; |
| 97 | 84 | ||
| 98 | if (s->disabled || su->disable || s->exit_latency <= latency_req) | 85 | if (s->disabled || su->disable || s->exit_latency <= latency_req |
| 86 | || (freeze && !s->enter_freeze)) | ||
| 99 | continue; | 87 | continue; |
| 100 | 88 | ||
| 101 | latency_req = s->exit_latency; | 89 | latency_req = s->exit_latency; |
| @@ -104,6 +92,63 @@ static int cpuidle_find_deepest_state(struct cpuidle_driver *drv, | |||
| 104 | return ret; | 92 | return ret; |
| 105 | } | 93 | } |
| 106 | 94 | ||
| 95 | static void enter_freeze_proper(struct cpuidle_driver *drv, | ||
| 96 | struct cpuidle_device *dev, int index) | ||
| 97 | { | ||
| 98 | tick_freeze(); | ||
| 99 | /* | ||
| 100 | * The state used here cannot be a "coupled" one, because the "coupled" | ||
| 101 | * cpuidle mechanism enables interrupts and doing that with timekeeping | ||
| 102 | * suspended is generally unsafe. | ||
| 103 | */ | ||
| 104 | drv->states[index].enter_freeze(dev, drv, index); | ||
| 105 | WARN_ON(!irqs_disabled()); | ||
| 106 | /* | ||
| 107 | * timekeeping_resume() that will be called by tick_unfreeze() for the | ||
| 108 | * last CPU executing it calls functions containing RCU read-side | ||
| 109 | * critical sections, so tell RCU about that. | ||
| 110 | */ | ||
| 111 | RCU_NONIDLE(tick_unfreeze()); | ||
| 112 | } | ||
| 113 | |||
| 114 | /** | ||
| 115 | * cpuidle_enter_freeze - Enter an idle state suitable for suspend-to-idle. | ||
| 116 | * | ||
| 117 | * If there are states with the ->enter_freeze callback, find the deepest of | ||
| 118 | * them and enter it with frozen tick. Otherwise, find the deepest state | ||
| 119 | * available and enter it normally. | ||
| 120 | */ | ||
| 121 | void cpuidle_enter_freeze(void) | ||
| 122 | { | ||
| 123 | struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices); | ||
| 124 | struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); | ||
| 125 | int index; | ||
| 126 | |||
| 127 | /* | ||
| 128 | * Find the deepest state with ->enter_freeze present, which guarantees | ||
| 129 | * that interrupts won't be enabled when it exits and allows the tick to | ||
| 130 | * be frozen safely. | ||
| 131 | */ | ||
| 132 | index = cpuidle_find_deepest_state(drv, dev, true); | ||
| 133 | if (index >= 0) { | ||
| 134 | enter_freeze_proper(drv, dev, index); | ||
| 135 | return; | ||
| 136 | } | ||
| 137 | |||
| 138 | /* | ||
| 139 | * It is not safe to freeze the tick, find the deepest state available | ||
| 140 | * at all and try to enter it normally. | ||
| 141 | */ | ||
| 142 | index = cpuidle_find_deepest_state(drv, dev, false); | ||
| 143 | if (index >= 0) | ||
| 144 | cpuidle_enter(drv, dev, index); | ||
| 145 | else | ||
| 146 | arch_cpu_idle(); | ||
| 147 | |||
| 148 | /* Interrupts are enabled again here. */ | ||
| 149 | local_irq_disable(); | ||
| 150 | } | ||
| 151 | |||
| 107 | /** | 152 | /** |
| 108 | * cpuidle_enter_state - enter the state and update stats | 153 | * cpuidle_enter_state - enter the state and update stats |
| 109 | * @dev: cpuidle device for this cpu | 154 | * @dev: cpuidle device for this cpu |
| @@ -166,9 +211,6 @@ int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev) | |||
| 166 | if (!drv || !dev || !dev->enabled) | 211 | if (!drv || !dev || !dev->enabled) |
| 167 | return -EBUSY; | 212 | return -EBUSY; |
| 168 | 213 | ||
| 169 | if (unlikely(use_deepest_state)) | ||
| 170 | return cpuidle_find_deepest_state(drv, dev); | ||
| 171 | |||
| 172 | return cpuidle_curr_governor->select(drv, dev); | 214 | return cpuidle_curr_governor->select(drv, dev); |
| 173 | } | 215 | } |
| 174 | 216 | ||
| @@ -200,7 +242,7 @@ int cpuidle_enter(struct cpuidle_driver *drv, struct cpuidle_device *dev, | |||
| 200 | */ | 242 | */ |
| 201 | void cpuidle_reflect(struct cpuidle_device *dev, int index) | 243 | void cpuidle_reflect(struct cpuidle_device *dev, int index) |
| 202 | { | 244 | { |
| 203 | if (cpuidle_curr_governor->reflect && !unlikely(use_deepest_state)) | 245 | if (cpuidle_curr_governor->reflect) |
| 204 | cpuidle_curr_governor->reflect(dev, index); | 246 | cpuidle_curr_governor->reflect(dev, index); |
| 205 | } | 247 | } |
| 206 | 248 | ||
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c index 1bc0c170f12a..b0e58522780d 100644 --- a/drivers/idle/intel_idle.c +++ b/drivers/idle/intel_idle.c | |||
| @@ -97,6 +97,8 @@ static const struct idle_cpu *icpu; | |||
| 97 | static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; | 97 | static struct cpuidle_device __percpu *intel_idle_cpuidle_devices; |
| 98 | static int intel_idle(struct cpuidle_device *dev, | 98 | static int intel_idle(struct cpuidle_device *dev, |
| 99 | struct cpuidle_driver *drv, int index); | 99 | struct cpuidle_driver *drv, int index); |
| 100 | static void intel_idle_freeze(struct cpuidle_device *dev, | ||
| 101 | struct cpuidle_driver *drv, int index); | ||
| 100 | static int intel_idle_cpu_init(int cpu); | 102 | static int intel_idle_cpu_init(int cpu); |
| 101 | 103 | ||
| 102 | static struct cpuidle_state *cpuidle_state_table; | 104 | static struct cpuidle_state *cpuidle_state_table; |
| @@ -131,28 +133,32 @@ static struct cpuidle_state nehalem_cstates[] = { | |||
| 131 | .flags = MWAIT2flg(0x00), | 133 | .flags = MWAIT2flg(0x00), |
| 132 | .exit_latency = 3, | 134 | .exit_latency = 3, |
| 133 | .target_residency = 6, | 135 | .target_residency = 6, |
| 134 | .enter = &intel_idle }, | 136 | .enter = &intel_idle, |
| 137 | .enter_freeze = intel_idle_freeze, }, | ||
| 135 | { | 138 | { |
| 136 | .name = "C1E-NHM", | 139 | .name = "C1E-NHM", |
| 137 | .desc = "MWAIT 0x01", | 140 | .desc = "MWAIT 0x01", |
| 138 | .flags = MWAIT2flg(0x01), | 141 | .flags = MWAIT2flg(0x01), |
| 139 | .exit_latency = 10, | 142 | .exit_latency = 10, |
| 140 | .target_residency = 20, | 143 | .target_residency = 20, |
| 141 | .enter = &intel_idle }, | 144 | .enter = &intel_idle, |
| 145 | .enter_freeze = intel_idle_freeze, }, | ||
| 142 | { | 146 | { |
| 143 | .name = "C3-NHM", | 147 | .name = "C3-NHM", |
| 144 | .desc = "MWAIT 0x10", | 148 | .desc = "MWAIT 0x10", |
| 145 | .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, | 149 | .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 146 | .exit_latency = 20, | 150 | .exit_latency = 20, |
| 147 | .target_residency = 80, | 151 | .target_residency = 80, |
| 148 | .enter = &intel_idle }, | 152 | .enter = &intel_idle, |
| 153 | .enter_freeze = intel_idle_freeze, }, | ||
| 149 | { | 154 | { |
| 150 | .name = "C6-NHM", | 155 | .name = "C6-NHM", |
| 151 | .desc = "MWAIT 0x20", | 156 | .desc = "MWAIT 0x20", |
| 152 | .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, | 157 | .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 153 | .exit_latency = 200, | 158 | .exit_latency = 200, |
| 154 | .target_residency = 800, | 159 | .target_residency = 800, |
| 155 | .enter = &intel_idle }, | 160 | .enter = &intel_idle, |
| 161 | .enter_freeze = intel_idle_freeze, }, | ||
| 156 | { | 162 | { |
| 157 | .enter = NULL } | 163 | .enter = NULL } |
| 158 | }; | 164 | }; |
| @@ -164,35 +170,40 @@ static struct cpuidle_state snb_cstates[] = { | |||
| 164 | .flags = MWAIT2flg(0x00), | 170 | .flags = MWAIT2flg(0x00), |
| 165 | .exit_latency = 2, | 171 | .exit_latency = 2, |
| 166 | .target_residency = 2, | 172 | .target_residency = 2, |
| 167 | .enter = &intel_idle }, | 173 | .enter = &intel_idle, |
| 174 | .enter_freeze = intel_idle_freeze, }, | ||
| 168 | { | 175 | { |
| 169 | .name = "C1E-SNB", | 176 | .name = "C1E-SNB", |
| 170 | .desc = "MWAIT 0x01", | 177 | .desc = "MWAIT 0x01", |
| 171 | .flags = MWAIT2flg(0x01), | 178 | .flags = MWAIT2flg(0x01), |
| 172 | .exit_latency = 10, | 179 | .exit_latency = 10, |
| 173 | .target_residency = 20, | 180 | .target_residency = 20, |
| 174 | .enter = &intel_idle }, | 181 | .enter = &intel_idle, |
| 182 | .enter_freeze = intel_idle_freeze, }, | ||
| 175 | { | 183 | { |
| 176 | .name = "C3-SNB", | 184 | .name = "C3-SNB", |
| 177 | .desc = "MWAIT 0x10", | 185 | .desc = "MWAIT 0x10", |
| 178 | .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, | 186 | .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 179 | .exit_latency = 80, | 187 | .exit_latency = 80, |
| 180 | .target_residency = 211, | 188 | .target_residency = 211, |
| 181 | .enter = &intel_idle }, | 189 | .enter = &intel_idle, |
| 190 | .enter_freeze = intel_idle_freeze, }, | ||
| 182 | { | 191 | { |
| 183 | .name = "C6-SNB", | 192 | .name = "C6-SNB", |
| 184 | .desc = "MWAIT 0x20", | 193 | .desc = "MWAIT 0x20", |
| 185 | .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, | 194 | .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 186 | .exit_latency = 104, | 195 | .exit_latency = 104, |
| 187 | .target_residency = 345, | 196 | .target_residency = 345, |
| 188 | .enter = &intel_idle }, | 197 | .enter = &intel_idle, |
| 198 | .enter_freeze = intel_idle_freeze, }, | ||
| 189 | { | 199 | { |
| 190 | .name = "C7-SNB", | 200 | .name = "C7-SNB", |
| 191 | .desc = "MWAIT 0x30", | 201 | .desc = "MWAIT 0x30", |
| 192 | .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, | 202 | .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 193 | .exit_latency = 109, | 203 | .exit_latency = 109, |
| 194 | .target_residency = 345, | 204 | .target_residency = 345, |
| 195 | .enter = &intel_idle }, | 205 | .enter = &intel_idle, |
| 206 | .enter_freeze = intel_idle_freeze, }, | ||
| 196 | { | 207 | { |
| 197 | .enter = NULL } | 208 | .enter = NULL } |
| 198 | }; | 209 | }; |
| @@ -204,42 +215,48 @@ static struct cpuidle_state byt_cstates[] = { | |||
| 204 | .flags = MWAIT2flg(0x00), | 215 | .flags = MWAIT2flg(0x00), |
| 205 | .exit_latency = 1, | 216 | .exit_latency = 1, |
| 206 | .target_residency = 1, | 217 | .target_residency = 1, |
| 207 | .enter = &intel_idle }, | 218 | .enter = &intel_idle, |
| 219 | .enter_freeze = intel_idle_freeze, }, | ||
| 208 | { | 220 | { |
| 209 | .name = "C1E-BYT", | 221 | .name = "C1E-BYT", |
| 210 | .desc = "MWAIT 0x01", | 222 | .desc = "MWAIT 0x01", |
| 211 | .flags = MWAIT2flg(0x01), | 223 | .flags = MWAIT2flg(0x01), |
| 212 | .exit_latency = 15, | 224 | .exit_latency = 15, |
| 213 | .target_residency = 30, | 225 | .target_residency = 30, |
| 214 | .enter = &intel_idle }, | 226 | .enter = &intel_idle, |
| 227 | .enter_freeze = intel_idle_freeze, }, | ||
| 215 | { | 228 | { |
| 216 | .name = "C6N-BYT", | 229 | .name = "C6N-BYT", |
| 217 | .desc = "MWAIT 0x58", | 230 | .desc = "MWAIT 0x58", |
| 218 | .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED, | 231 | .flags = MWAIT2flg(0x58) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 219 | .exit_latency = 40, | 232 | .exit_latency = 40, |
| 220 | .target_residency = 275, | 233 | .target_residency = 275, |
| 221 | .enter = &intel_idle }, | 234 | .enter = &intel_idle, |
| 235 | .enter_freeze = intel_idle_freeze, }, | ||
| 222 | { | 236 | { |
| 223 | .name = "C6S-BYT", | 237 | .name = "C6S-BYT", |
| 224 | .desc = "MWAIT 0x52", | 238 | .desc = "MWAIT 0x52", |
| 225 | .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, | 239 | .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 226 | .exit_latency = 140, | 240 | .exit_latency = 140, |
| 227 | .target_residency = 560, | 241 | .target_residency = 560, |
| 228 | .enter = &intel_idle }, | 242 | .enter = &intel_idle, |
| 243 | .enter_freeze = intel_idle_freeze, }, | ||
| 229 | { | 244 | { |
| 230 | .name = "C7-BYT", | 245 | .name = "C7-BYT", |
| 231 | .desc = "MWAIT 0x60", | 246 | .desc = "MWAIT 0x60", |
| 232 | .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, | 247 | .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 233 | .exit_latency = 1200, | 248 | .exit_latency = 1200, |
| 234 | .target_residency = 1500, | 249 | .target_residency = 1500, |
| 235 | .enter = &intel_idle }, | 250 | .enter = &intel_idle, |
| 251 | .enter_freeze = intel_idle_freeze, }, | ||
| 236 | { | 252 | { |
| 237 | .name = "C7S-BYT", | 253 | .name = "C7S-BYT", |
| 238 | .desc = "MWAIT 0x64", | 254 | .desc = "MWAIT 0x64", |
| 239 | .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, | 255 | .flags = MWAIT2flg(0x64) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 240 | .exit_latency = 10000, | 256 | .exit_latency = 10000, |
| 241 | .target_residency = 20000, | 257 | .target_residency = 20000, |
| 242 | .enter = &intel_idle }, | 258 | .enter = &intel_idle, |
| 259 | .enter_freeze = intel_idle_freeze, }, | ||
| 243 | { | 260 | { |
| 244 | .enter = NULL } | 261 | .enter = NULL } |
| 245 | }; | 262 | }; |
| @@ -251,35 +268,40 @@ static struct cpuidle_state ivb_cstates[] = { | |||
| 251 | .flags = MWAIT2flg(0x00), | 268 | .flags = MWAIT2flg(0x00), |
| 252 | .exit_latency = 1, | 269 | .exit_latency = 1, |
| 253 | .target_residency = 1, | 270 | .target_residency = 1, |
| 254 | .enter = &intel_idle }, | 271 | .enter = &intel_idle, |
| 272 | .enter_freeze = intel_idle_freeze, }, | ||
| 255 | { | 273 | { |
| 256 | .name = "C1E-IVB", | 274 | .name = "C1E-IVB", |
| 257 | .desc = "MWAIT 0x01", | 275 | .desc = "MWAIT 0x01", |
| 258 | .flags = MWAIT2flg(0x01), | 276 | .flags = MWAIT2flg(0x01), |
| 259 | .exit_latency = 10, | 277 | .exit_latency = 10, |
| 260 | .target_residency = 20, | 278 | .target_residency = 20, |
| 261 | .enter = &intel_idle }, | 279 | .enter = &intel_idle, |
| 280 | .enter_freeze = intel_idle_freeze, }, | ||
| 262 | { | 281 | { |
| 263 | .name = "C3-IVB", | 282 | .name = "C3-IVB", |
| 264 | .desc = "MWAIT 0x10", | 283 | .desc = "MWAIT 0x10", |
| 265 | .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, | 284 | .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 266 | .exit_latency = 59, | 285 | .exit_latency = 59, |
| 267 | .target_residency = 156, | 286 | .target_residency = 156, |
| 268 | .enter = &intel_idle }, | 287 | .enter = &intel_idle, |
| 288 | .enter_freeze = intel_idle_freeze, }, | ||
| 269 | { | 289 | { |
| 270 | .name = "C6-IVB", | 290 | .name = "C6-IVB", |
| 271 | .desc = "MWAIT 0x20", | 291 | .desc = "MWAIT 0x20", |
| 272 | .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, | 292 | .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 273 | .exit_latency = 80, | 293 | .exit_latency = 80, |
| 274 | .target_residency = 300, | 294 | .target_residency = 300, |
| 275 | .enter = &intel_idle }, | 295 | .enter = &intel_idle, |
| 296 | .enter_freeze = intel_idle_freeze, }, | ||
| 276 | { | 297 | { |
| 277 | .name = "C7-IVB", | 298 | .name = "C7-IVB", |
| 278 | .desc = "MWAIT 0x30", | 299 | .desc = "MWAIT 0x30", |
| 279 | .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, | 300 | .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 280 | .exit_latency = 87, | 301 | .exit_latency = 87, |
| 281 | .target_residency = 300, | 302 | .target_residency = 300, |
| 282 | .enter = &intel_idle }, | 303 | .enter = &intel_idle, |
| 304 | .enter_freeze = intel_idle_freeze, }, | ||
| 283 | { | 305 | { |
| 284 | .enter = NULL } | 306 | .enter = NULL } |
| 285 | }; | 307 | }; |
| @@ -291,28 +313,32 @@ static struct cpuidle_state ivt_cstates[] = { | |||
| 291 | .flags = MWAIT2flg(0x00), | 313 | .flags = MWAIT2flg(0x00), |
| 292 | .exit_latency = 1, | 314 | .exit_latency = 1, |
| 293 | .target_residency = 1, | 315 | .target_residency = 1, |
| 294 | .enter = &intel_idle }, | 316 | .enter = &intel_idle, |
| 317 | .enter_freeze = intel_idle_freeze, }, | ||
| 295 | { | 318 | { |
| 296 | .name = "C1E-IVT", | 319 | .name = "C1E-IVT", |
| 297 | .desc = "MWAIT 0x01", | 320 | .desc = "MWAIT 0x01", |
| 298 | .flags = MWAIT2flg(0x01), | 321 | .flags = MWAIT2flg(0x01), |
| 299 | .exit_latency = 10, | 322 | .exit_latency = 10, |
| 300 | .target_residency = 80, | 323 | .target_residency = 80, |
| 301 | .enter = &intel_idle }, | 324 | .enter = &intel_idle, |
| 325 | .enter_freeze = intel_idle_freeze, }, | ||
| 302 | { | 326 | { |
| 303 | .name = "C3-IVT", | 327 | .name = "C3-IVT", |
| 304 | .desc = "MWAIT 0x10", | 328 | .desc = "MWAIT 0x10", |
| 305 | .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, | 329 | .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 306 | .exit_latency = 59, | 330 | .exit_latency = 59, |
| 307 | .target_residency = 156, | 331 | .target_residency = 156, |
| 308 | .enter = &intel_idle }, | 332 | .enter = &intel_idle, |
| 333 | .enter_freeze = intel_idle_freeze, }, | ||
| 309 | { | 334 | { |
| 310 | .name = "C6-IVT", | 335 | .name = "C6-IVT", |
| 311 | .desc = "MWAIT 0x20", | 336 | .desc = "MWAIT 0x20", |
| 312 | .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, | 337 | .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 313 | .exit_latency = 82, | 338 | .exit_latency = 82, |
| 314 | .target_residency = 300, | 339 | .target_residency = 300, |
| 315 | .enter = &intel_idle }, | 340 | .enter = &intel_idle, |
| 341 | .enter_freeze = intel_idle_freeze, }, | ||
| 316 | { | 342 | { |
| 317 | .enter = NULL } | 343 | .enter = NULL } |
| 318 | }; | 344 | }; |
| @@ -324,28 +350,32 @@ static struct cpuidle_state ivt_cstates_4s[] = { | |||
| 324 | .flags = MWAIT2flg(0x00), | 350 | .flags = MWAIT2flg(0x00), |
| 325 | .exit_latency = 1, | 351 | .exit_latency = 1, |
| 326 | .target_residency = 1, | 352 | .target_residency = 1, |
| 327 | .enter = &intel_idle }, | 353 | .enter = &intel_idle, |
| 354 | .enter_freeze = intel_idle_freeze, }, | ||
| 328 | { | 355 | { |
| 329 | .name = "C1E-IVT-4S", | 356 | .name = "C1E-IVT-4S", |
| 330 | .desc = "MWAIT 0x01", | 357 | .desc = "MWAIT 0x01", |
| 331 | .flags = MWAIT2flg(0x01), | 358 | .flags = MWAIT2flg(0x01), |
| 332 | .exit_latency = 10, | 359 | .exit_latency = 10, |
| 333 | .target_residency = 250, | 360 | .target_residency = 250, |
| 334 | .enter = &intel_idle }, | 361 | .enter = &intel_idle, |
| 362 | .enter_freeze = intel_idle_freeze, }, | ||
| 335 | { | 363 | { |
| 336 | .name = "C3-IVT-4S", | 364 | .name = "C3-IVT-4S", |
| 337 | .desc = "MWAIT 0x10", | 365 | .desc = "MWAIT 0x10", |
| 338 | .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, | 366 | .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 339 | .exit_latency = 59, | 367 | .exit_latency = 59, |
| 340 | .target_residency = 300, | 368 | .target_residency = 300, |
| 341 | .enter = &intel_idle }, | 369 | .enter = &intel_idle, |
| 370 | .enter_freeze = intel_idle_freeze, }, | ||
| 342 | { | 371 | { |
| 343 | .name = "C6-IVT-4S", | 372 | .name = "C6-IVT-4S", |
| 344 | .desc = "MWAIT 0x20", | 373 | .desc = "MWAIT 0x20", |
| 345 | .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, | 374 | .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 346 | .exit_latency = 84, | 375 | .exit_latency = 84, |
| 347 | .target_residency = 400, | 376 | .target_residency = 400, |
| 348 | .enter = &intel_idle }, | 377 | .enter = &intel_idle, |
| 378 | .enter_freeze = intel_idle_freeze, }, | ||
| 349 | { | 379 | { |
| 350 | .enter = NULL } | 380 | .enter = NULL } |
| 351 | }; | 381 | }; |
| @@ -357,28 +387,32 @@ static struct cpuidle_state ivt_cstates_8s[] = { | |||
| 357 | .flags = MWAIT2flg(0x00), | 387 | .flags = MWAIT2flg(0x00), |
| 358 | .exit_latency = 1, | 388 | .exit_latency = 1, |
| 359 | .target_residency = 1, | 389 | .target_residency = 1, |
| 360 | .enter = &intel_idle }, | 390 | .enter = &intel_idle, |
| 391 | .enter_freeze = intel_idle_freeze, }, | ||
| 361 | { | 392 | { |
| 362 | .name = "C1E-IVT-8S", | 393 | .name = "C1E-IVT-8S", |
| 363 | .desc = "MWAIT 0x01", | 394 | .desc = "MWAIT 0x01", |
| 364 | .flags = MWAIT2flg(0x01), | 395 | .flags = MWAIT2flg(0x01), |
| 365 | .exit_latency = 10, | 396 | .exit_latency = 10, |
| 366 | .target_residency = 500, | 397 | .target_residency = 500, |
| 367 | .enter = &intel_idle }, | 398 | .enter = &intel_idle, |
| 399 | .enter_freeze = intel_idle_freeze, }, | ||
| 368 | { | 400 | { |
| 369 | .name = "C3-IVT-8S", | 401 | .name = "C3-IVT-8S", |
| 370 | .desc = "MWAIT 0x10", | 402 | .desc = "MWAIT 0x10", |
| 371 | .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, | 403 | .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 372 | .exit_latency = 59, | 404 | .exit_latency = 59, |
| 373 | .target_residency = 600, | 405 | .target_residency = 600, |
| 374 | .enter = &intel_idle }, | 406 | .enter = &intel_idle, |
| 407 | .enter_freeze = intel_idle_freeze, }, | ||
| 375 | { | 408 | { |
| 376 | .name = "C6-IVT-8S", | 409 | .name = "C6-IVT-8S", |
| 377 | .desc = "MWAIT 0x20", | 410 | .desc = "MWAIT 0x20", |
| 378 | .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, | 411 | .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 379 | .exit_latency = 88, | 412 | .exit_latency = 88, |
| 380 | .target_residency = 700, | 413 | .target_residency = 700, |
| 381 | .enter = &intel_idle }, | 414 | .enter = &intel_idle, |
| 415 | .enter_freeze = intel_idle_freeze, }, | ||
| 382 | { | 416 | { |
| 383 | .enter = NULL } | 417 | .enter = NULL } |
| 384 | }; | 418 | }; |
| @@ -390,56 +424,64 @@ static struct cpuidle_state hsw_cstates[] = { | |||
| 390 | .flags = MWAIT2flg(0x00), | 424 | .flags = MWAIT2flg(0x00), |
| 391 | .exit_latency = 2, | 425 | .exit_latency = 2, |
| 392 | .target_residency = 2, | 426 | .target_residency = 2, |
| 393 | .enter = &intel_idle }, | 427 | .enter = &intel_idle, |
| 428 | .enter_freeze = intel_idle_freeze, }, | ||
| 394 | { | 429 | { |
| 395 | .name = "C1E-HSW", | 430 | .name = "C1E-HSW", |
| 396 | .desc = "MWAIT 0x01", | 431 | .desc = "MWAIT 0x01", |
| 397 | .flags = MWAIT2flg(0x01), | 432 | .flags = MWAIT2flg(0x01), |
| 398 | .exit_latency = 10, | 433 | .exit_latency = 10, |
| 399 | .target_residency = 20, | 434 | .target_residency = 20, |
| 400 | .enter = &intel_idle }, | 435 | .enter = &intel_idle, |
| 436 | .enter_freeze = intel_idle_freeze, }, | ||
| 401 | { | 437 | { |
| 402 | .name = "C3-HSW", | 438 | .name = "C3-HSW", |
| 403 | .desc = "MWAIT 0x10", | 439 | .desc = "MWAIT 0x10", |
| 404 | .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, | 440 | .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 405 | .exit_latency = 33, | 441 | .exit_latency = 33, |
| 406 | .target_residency = 100, | 442 | .target_residency = 100, |
| 407 | .enter = &intel_idle }, | 443 | .enter = &intel_idle, |
| 444 | .enter_freeze = intel_idle_freeze, }, | ||
| 408 | { | 445 | { |
| 409 | .name = "C6-HSW", | 446 | .name = "C6-HSW", |
| 410 | .desc = "MWAIT 0x20", | 447 | .desc = "MWAIT 0x20", |
| 411 | .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, | 448 | .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 412 | .exit_latency = 133, | 449 | .exit_latency = 133, |
| 413 | .target_residency = 400, | 450 | .target_residency = 400, |
| 414 | .enter = &intel_idle }, | 451 | .enter = &intel_idle, |
| 452 | .enter_freeze = intel_idle_freeze, }, | ||
| 415 | { | 453 | { |
| 416 | .name = "C7s-HSW", | 454 | .name = "C7s-HSW", |
| 417 | .desc = "MWAIT 0x32", | 455 | .desc = "MWAIT 0x32", |
| 418 | .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, | 456 | .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 419 | .exit_latency = 166, | 457 | .exit_latency = 166, |
| 420 | .target_residency = 500, | 458 | .target_residency = 500, |
| 421 | .enter = &intel_idle }, | 459 | .enter = &intel_idle, |
| 460 | .enter_freeze = intel_idle_freeze, }, | ||
| 422 | { | 461 | { |
| 423 | .name = "C8-HSW", | 462 | .name = "C8-HSW", |
| 424 | .desc = "MWAIT 0x40", | 463 | .desc = "MWAIT 0x40", |
| 425 | .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, | 464 | .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 426 | .exit_latency = 300, | 465 | .exit_latency = 300, |
| 427 | .target_residency = 900, | 466 | .target_residency = 900, |
| 428 | .enter = &intel_idle }, | 467 | .enter = &intel_idle, |
| 468 | .enter_freeze = intel_idle_freeze, }, | ||
| 429 | { | 469 | { |
| 430 | .name = "C9-HSW", | 470 | .name = "C9-HSW", |
| 431 | .desc = "MWAIT 0x50", | 471 | .desc = "MWAIT 0x50", |
| 432 | .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, | 472 | .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 433 | .exit_latency = 600, | 473 | .exit_latency = 600, |
| 434 | .target_residency = 1800, | 474 | .target_residency = 1800, |
| 435 | .enter = &intel_idle }, | 475 | .enter = &intel_idle, |
| 476 | .enter_freeze = intel_idle_freeze, }, | ||
| 436 | { | 477 | { |
| 437 | .name = "C10-HSW", | 478 | .name = "C10-HSW", |
| 438 | .desc = "MWAIT 0x60", | 479 | .desc = "MWAIT 0x60", |
| 439 | .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, | 480 | .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 440 | .exit_latency = 2600, | 481 | .exit_latency = 2600, |
| 441 | .target_residency = 7700, | 482 | .target_residency = 7700, |
| 442 | .enter = &intel_idle }, | 483 | .enter = &intel_idle, |
| 484 | .enter_freeze = intel_idle_freeze, }, | ||
| 443 | { | 485 | { |
| 444 | .enter = NULL } | 486 | .enter = NULL } |
| 445 | }; | 487 | }; |
| @@ -450,56 +492,64 @@ static struct cpuidle_state bdw_cstates[] = { | |||
| 450 | .flags = MWAIT2flg(0x00), | 492 | .flags = MWAIT2flg(0x00), |
| 451 | .exit_latency = 2, | 493 | .exit_latency = 2, |
| 452 | .target_residency = 2, | 494 | .target_residency = 2, |
| 453 | .enter = &intel_idle }, | 495 | .enter = &intel_idle, |
| 496 | .enter_freeze = intel_idle_freeze, }, | ||
| 454 | { | 497 | { |
| 455 | .name = "C1E-BDW", | 498 | .name = "C1E-BDW", |
| 456 | .desc = "MWAIT 0x01", | 499 | .desc = "MWAIT 0x01", |
| 457 | .flags = MWAIT2flg(0x01), | 500 | .flags = MWAIT2flg(0x01), |
| 458 | .exit_latency = 10, | 501 | .exit_latency = 10, |
| 459 | .target_residency = 20, | 502 | .target_residency = 20, |
| 460 | .enter = &intel_idle }, | 503 | .enter = &intel_idle, |
| 504 | .enter_freeze = intel_idle_freeze, }, | ||
| 461 | { | 505 | { |
| 462 | .name = "C3-BDW", | 506 | .name = "C3-BDW", |
| 463 | .desc = "MWAIT 0x10", | 507 | .desc = "MWAIT 0x10", |
| 464 | .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, | 508 | .flags = MWAIT2flg(0x10) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 465 | .exit_latency = 40, | 509 | .exit_latency = 40, |
| 466 | .target_residency = 100, | 510 | .target_residency = 100, |
| 467 | .enter = &intel_idle }, | 511 | .enter = &intel_idle, |
| 512 | .enter_freeze = intel_idle_freeze, }, | ||
| 468 | { | 513 | { |
| 469 | .name = "C6-BDW", | 514 | .name = "C6-BDW", |
| 470 | .desc = "MWAIT 0x20", | 515 | .desc = "MWAIT 0x20", |
| 471 | .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, | 516 | .flags = MWAIT2flg(0x20) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 472 | .exit_latency = 133, | 517 | .exit_latency = 133, |
| 473 | .target_residency = 400, | 518 | .target_residency = 400, |
| 474 | .enter = &intel_idle }, | 519 | .enter = &intel_idle, |
| 520 | .enter_freeze = intel_idle_freeze, }, | ||
| 475 | { | 521 | { |
| 476 | .name = "C7s-BDW", | 522 | .name = "C7s-BDW", |
| 477 | .desc = "MWAIT 0x32", | 523 | .desc = "MWAIT 0x32", |
| 478 | .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, | 524 | .flags = MWAIT2flg(0x32) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 479 | .exit_latency = 166, | 525 | .exit_latency = 166, |
| 480 | .target_residency = 500, | 526 | .target_residency = 500, |
| 481 | .enter = &intel_idle }, | 527 | .enter = &intel_idle, |
| 528 | .enter_freeze = intel_idle_freeze, }, | ||
| 482 | { | 529 | { |
| 483 | .name = "C8-BDW", | 530 | .name = "C8-BDW", |
| 484 | .desc = "MWAIT 0x40", | 531 | .desc = "MWAIT 0x40", |
| 485 | .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, | 532 | .flags = MWAIT2flg(0x40) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 486 | .exit_latency = 300, | 533 | .exit_latency = 300, |
| 487 | .target_residency = 900, | 534 | .target_residency = 900, |
| 488 | .enter = &intel_idle }, | 535 | .enter = &intel_idle, |
| 536 | .enter_freeze = intel_idle_freeze, }, | ||
| 489 | { | 537 | { |
| 490 | .name = "C9-BDW", | 538 | .name = "C9-BDW", |
| 491 | .desc = "MWAIT 0x50", | 539 | .desc = "MWAIT 0x50", |
| 492 | .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, | 540 | .flags = MWAIT2flg(0x50) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 493 | .exit_latency = 600, | 541 | .exit_latency = 600, |
| 494 | .target_residency = 1800, | 542 | .target_residency = 1800, |
| 495 | .enter = &intel_idle }, | 543 | .enter = &intel_idle, |
| 544 | .enter_freeze = intel_idle_freeze, }, | ||
| 496 | { | 545 | { |
| 497 | .name = "C10-BDW", | 546 | .name = "C10-BDW", |
| 498 | .desc = "MWAIT 0x60", | 547 | .desc = "MWAIT 0x60", |
| 499 | .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, | 548 | .flags = MWAIT2flg(0x60) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 500 | .exit_latency = 2600, | 549 | .exit_latency = 2600, |
| 501 | .target_residency = 7700, | 550 | .target_residency = 7700, |
| 502 | .enter = &intel_idle }, | 551 | .enter = &intel_idle, |
| 552 | .enter_freeze = intel_idle_freeze, }, | ||
| 503 | { | 553 | { |
| 504 | .enter = NULL } | 554 | .enter = NULL } |
| 505 | }; | 555 | }; |
| @@ -511,28 +561,32 @@ static struct cpuidle_state atom_cstates[] = { | |||
| 511 | .flags = MWAIT2flg(0x00), | 561 | .flags = MWAIT2flg(0x00), |
| 512 | .exit_latency = 10, | 562 | .exit_latency = 10, |
| 513 | .target_residency = 20, | 563 | .target_residency = 20, |
| 514 | .enter = &intel_idle }, | 564 | .enter = &intel_idle, |
| 565 | .enter_freeze = intel_idle_freeze, }, | ||
| 515 | { | 566 | { |
| 516 | .name = "C2-ATM", | 567 | .name = "C2-ATM", |
| 517 | .desc = "MWAIT 0x10", | 568 | .desc = "MWAIT 0x10", |
| 518 | .flags = MWAIT2flg(0x10), | 569 | .flags = MWAIT2flg(0x10), |
| 519 | .exit_latency = 20, | 570 | .exit_latency = 20, |
| 520 | .target_residency = 80, | 571 | .target_residency = 80, |
| 521 | .enter = &intel_idle }, | 572 | .enter = &intel_idle, |
| 573 | .enter_freeze = intel_idle_freeze, }, | ||
| 522 | { | 574 | { |
| 523 | .name = "C4-ATM", | 575 | .name = "C4-ATM", |
| 524 | .desc = "MWAIT 0x30", | 576 | .desc = "MWAIT 0x30", |
| 525 | .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, | 577 | .flags = MWAIT2flg(0x30) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 526 | .exit_latency = 100, | 578 | .exit_latency = 100, |
| 527 | .target_residency = 400, | 579 | .target_residency = 400, |
| 528 | .enter = &intel_idle }, | 580 | .enter = &intel_idle, |
| 581 | .enter_freeze = intel_idle_freeze, }, | ||
| 529 | { | 582 | { |
| 530 | .name = "C6-ATM", | 583 | .name = "C6-ATM", |
| 531 | .desc = "MWAIT 0x52", | 584 | .desc = "MWAIT 0x52", |
| 532 | .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, | 585 | .flags = MWAIT2flg(0x52) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 533 | .exit_latency = 140, | 586 | .exit_latency = 140, |
| 534 | .target_residency = 560, | 587 | .target_residency = 560, |
| 535 | .enter = &intel_idle }, | 588 | .enter = &intel_idle, |
| 589 | .enter_freeze = intel_idle_freeze, }, | ||
| 536 | { | 590 | { |
| 537 | .enter = NULL } | 591 | .enter = NULL } |
| 538 | }; | 592 | }; |
| @@ -543,14 +597,16 @@ static struct cpuidle_state avn_cstates[] = { | |||
| 543 | .flags = MWAIT2flg(0x00), | 597 | .flags = MWAIT2flg(0x00), |
| 544 | .exit_latency = 2, | 598 | .exit_latency = 2, |
| 545 | .target_residency = 2, | 599 | .target_residency = 2, |
| 546 | .enter = &intel_idle }, | 600 | .enter = &intel_idle, |
| 601 | .enter_freeze = intel_idle_freeze, }, | ||
| 547 | { | 602 | { |
| 548 | .name = "C6-AVN", | 603 | .name = "C6-AVN", |
| 549 | .desc = "MWAIT 0x51", | 604 | .desc = "MWAIT 0x51", |
| 550 | .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED, | 605 | .flags = MWAIT2flg(0x51) | CPUIDLE_FLAG_TLB_FLUSHED, |
| 551 | .exit_latency = 15, | 606 | .exit_latency = 15, |
| 552 | .target_residency = 45, | 607 | .target_residency = 45, |
| 553 | .enter = &intel_idle }, | 608 | .enter = &intel_idle, |
| 609 | .enter_freeze = intel_idle_freeze, }, | ||
| 554 | { | 610 | { |
| 555 | .enter = NULL } | 611 | .enter = NULL } |
| 556 | }; | 612 | }; |
| @@ -592,6 +648,21 @@ static int intel_idle(struct cpuidle_device *dev, | |||
| 592 | return index; | 648 | return index; |
| 593 | } | 649 | } |
| 594 | 650 | ||
| 651 | /** | ||
| 652 | * intel_idle_freeze - simplified "enter" callback routine for suspend-to-idle | ||
| 653 | * @dev: cpuidle_device | ||
| 654 | * @drv: cpuidle driver | ||
| 655 | * @index: state index | ||
| 656 | */ | ||
| 657 | static void intel_idle_freeze(struct cpuidle_device *dev, | ||
| 658 | struct cpuidle_driver *drv, int index) | ||
| 659 | { | ||
| 660 | unsigned long ecx = 1; /* break on interrupt flag */ | ||
| 661 | unsigned long eax = flg2MWAIT(drv->states[index].flags); | ||
| 662 | |||
| 663 | mwait_idle_with_hints(eax, ecx); | ||
| 664 | } | ||
| 665 | |||
| 595 | static void __setup_broadcast_timer(void *arg) | 666 | static void __setup_broadcast_timer(void *arg) |
| 596 | { | 667 | { |
| 597 | unsigned long reason = (unsigned long)arg; | 668 | unsigned long reason = (unsigned long)arg; |
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h index ab70f3bc44ad..f551a9299ac9 100644 --- a/include/linux/cpuidle.h +++ b/include/linux/cpuidle.h | |||
| @@ -50,6 +50,15 @@ struct cpuidle_state { | |||
| 50 | int index); | 50 | int index); |
| 51 | 51 | ||
| 52 | int (*enter_dead) (struct cpuidle_device *dev, int index); | 52 | int (*enter_dead) (struct cpuidle_device *dev, int index); |
| 53 | |||
| 54 | /* | ||
| 55 | * CPUs execute ->enter_freeze with the local tick or entire timekeeping | ||
| 56 | * suspended, so it must not re-enable interrupts at any point (even | ||
| 57 | * temporarily) or attempt to change states of clock event devices. | ||
| 58 | */ | ||
| 59 | void (*enter_freeze) (struct cpuidle_device *dev, | ||
| 60 | struct cpuidle_driver *drv, | ||
| 61 | int index); | ||
| 53 | }; | 62 | }; |
| 54 | 63 | ||
| 55 | /* Idle State Flags */ | 64 | /* Idle State Flags */ |
| @@ -141,7 +150,7 @@ extern void cpuidle_resume(void); | |||
| 141 | extern int cpuidle_enable_device(struct cpuidle_device *dev); | 150 | extern int cpuidle_enable_device(struct cpuidle_device *dev); |
| 142 | extern void cpuidle_disable_device(struct cpuidle_device *dev); | 151 | extern void cpuidle_disable_device(struct cpuidle_device *dev); |
| 143 | extern int cpuidle_play_dead(void); | 152 | extern int cpuidle_play_dead(void); |
| 144 | extern void cpuidle_use_deepest_state(bool enable); | 153 | extern void cpuidle_enter_freeze(void); |
| 145 | 154 | ||
| 146 | extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); | 155 | extern struct cpuidle_driver *cpuidle_get_cpu_driver(struct cpuidle_device *dev); |
| 147 | #else | 156 | #else |
| @@ -174,7 +183,7 @@ static inline int cpuidle_enable_device(struct cpuidle_device *dev) | |||
| 174 | {return -ENODEV; } | 183 | {return -ENODEV; } |
| 175 | static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } | 184 | static inline void cpuidle_disable_device(struct cpuidle_device *dev) { } |
| 176 | static inline int cpuidle_play_dead(void) {return -ENODEV; } | 185 | static inline int cpuidle_play_dead(void) {return -ENODEV; } |
| 177 | static inline void cpuidle_use_deepest_state(bool enable) {} | 186 | static inline void cpuidle_enter_freeze(void) { } |
| 178 | static inline struct cpuidle_driver *cpuidle_get_cpu_driver( | 187 | static inline struct cpuidle_driver *cpuidle_get_cpu_driver( |
| 179 | struct cpuidle_device *dev) {return NULL; } | 188 | struct cpuidle_device *dev) {return NULL; } |
| 180 | #endif | 189 | #endif |
diff --git a/include/linux/suspend.h b/include/linux/suspend.h index 3388c1b6f7d8..5efe743ce1e8 100644 --- a/include/linux/suspend.h +++ b/include/linux/suspend.h | |||
| @@ -201,6 +201,21 @@ struct platform_freeze_ops { | |||
| 201 | */ | 201 | */ |
| 202 | extern void suspend_set_ops(const struct platform_suspend_ops *ops); | 202 | extern void suspend_set_ops(const struct platform_suspend_ops *ops); |
| 203 | extern int suspend_valid_only_mem(suspend_state_t state); | 203 | extern int suspend_valid_only_mem(suspend_state_t state); |
| 204 | |||
| 205 | /* Suspend-to-idle state machnine. */ | ||
| 206 | enum freeze_state { | ||
| 207 | FREEZE_STATE_NONE, /* Not suspended/suspending. */ | ||
| 208 | FREEZE_STATE_ENTER, /* Enter suspend-to-idle. */ | ||
| 209 | FREEZE_STATE_WAKE, /* Wake up from suspend-to-idle. */ | ||
| 210 | }; | ||
| 211 | |||
| 212 | extern enum freeze_state __read_mostly suspend_freeze_state; | ||
| 213 | |||
| 214 | static inline bool idle_should_freeze(void) | ||
| 215 | { | ||
| 216 | return unlikely(suspend_freeze_state == FREEZE_STATE_ENTER); | ||
| 217 | } | ||
| 218 | |||
| 204 | extern void freeze_set_ops(const struct platform_freeze_ops *ops); | 219 | extern void freeze_set_ops(const struct platform_freeze_ops *ops); |
| 205 | extern void freeze_wake(void); | 220 | extern void freeze_wake(void); |
| 206 | 221 | ||
| @@ -228,6 +243,7 @@ extern int pm_suspend(suspend_state_t state); | |||
| 228 | 243 | ||
| 229 | static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} | 244 | static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} |
| 230 | static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } | 245 | static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } |
| 246 | static inline bool idle_should_freeze(void) { return false; } | ||
| 231 | static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {} | 247 | static inline void freeze_set_ops(const struct platform_freeze_ops *ops) {} |
| 232 | static inline void freeze_wake(void) {} | 248 | static inline void freeze_wake(void) {} |
| 233 | #endif /* !CONFIG_SUSPEND */ | 249 | #endif /* !CONFIG_SUSPEND */ |
diff --git a/include/linux/tick.h b/include/linux/tick.h index eda850ca757a..9c085dc12ae9 100644 --- a/include/linux/tick.h +++ b/include/linux/tick.h | |||
| @@ -79,6 +79,9 @@ extern void __init tick_init(void); | |||
| 79 | extern int tick_is_oneshot_available(void); | 79 | extern int tick_is_oneshot_available(void); |
| 80 | extern struct tick_device *tick_get_device(int cpu); | 80 | extern struct tick_device *tick_get_device(int cpu); |
| 81 | 81 | ||
| 82 | extern void tick_freeze(void); | ||
| 83 | extern void tick_unfreeze(void); | ||
| 84 | |||
| 82 | # ifdef CONFIG_HIGH_RES_TIMERS | 85 | # ifdef CONFIG_HIGH_RES_TIMERS |
| 83 | extern int tick_init_highres(void); | 86 | extern int tick_init_highres(void); |
| 84 | extern int tick_program_event(ktime_t expires, int force); | 87 | extern int tick_program_event(ktime_t expires, int force); |
| @@ -119,6 +122,8 @@ static inline int tick_oneshot_mode_active(void) { return 0; } | |||
| 119 | 122 | ||
| 120 | #else /* CONFIG_GENERIC_CLOCKEVENTS */ | 123 | #else /* CONFIG_GENERIC_CLOCKEVENTS */ |
| 121 | static inline void tick_init(void) { } | 124 | static inline void tick_init(void) { } |
| 125 | static inline void tick_freeze(void) { } | ||
| 126 | static inline void tick_unfreeze(void) { } | ||
| 122 | static inline void tick_cancel_sched_timer(int cpu) { } | 127 | static inline void tick_cancel_sched_timer(int cpu) { } |
| 123 | static inline void tick_clock_notify(void) { } | 128 | static inline void tick_clock_notify(void) { } |
| 124 | static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } | 129 | static inline int tick_check_oneshot_change(int allow_nohz) { return 0; } |
| @@ -226,5 +231,4 @@ static inline void tick_nohz_task_switch(struct task_struct *tsk) | |||
| 226 | __tick_nohz_task_switch(tsk); | 231 | __tick_nohz_task_switch(tsk); |
| 227 | } | 232 | } |
| 228 | 233 | ||
| 229 | |||
| 230 | #endif | 234 | #endif |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index c347e3ce3a55..b7d6b3a721b1 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
| @@ -37,7 +37,9 @@ const char *pm_states[PM_SUSPEND_MAX]; | |||
| 37 | static const struct platform_suspend_ops *suspend_ops; | 37 | static const struct platform_suspend_ops *suspend_ops; |
| 38 | static const struct platform_freeze_ops *freeze_ops; | 38 | static const struct platform_freeze_ops *freeze_ops; |
| 39 | static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head); | 39 | static DECLARE_WAIT_QUEUE_HEAD(suspend_freeze_wait_head); |
| 40 | static bool suspend_freeze_wake; | 40 | |
| 41 | enum freeze_state __read_mostly suspend_freeze_state; | ||
| 42 | static DEFINE_SPINLOCK(suspend_freeze_lock); | ||
| 41 | 43 | ||
| 42 | void freeze_set_ops(const struct platform_freeze_ops *ops) | 44 | void freeze_set_ops(const struct platform_freeze_ops *ops) |
| 43 | { | 45 | { |
| @@ -48,22 +50,49 @@ void freeze_set_ops(const struct platform_freeze_ops *ops) | |||
| 48 | 50 | ||
| 49 | static void freeze_begin(void) | 51 | static void freeze_begin(void) |
| 50 | { | 52 | { |
| 51 | suspend_freeze_wake = false; | 53 | suspend_freeze_state = FREEZE_STATE_NONE; |
| 52 | } | 54 | } |
| 53 | 55 | ||
| 54 | static void freeze_enter(void) | 56 | static void freeze_enter(void) |
| 55 | { | 57 | { |
| 56 | cpuidle_use_deepest_state(true); | 58 | spin_lock_irq(&suspend_freeze_lock); |
| 59 | if (pm_wakeup_pending()) | ||
| 60 | goto out; | ||
| 61 | |||
| 62 | suspend_freeze_state = FREEZE_STATE_ENTER; | ||
| 63 | spin_unlock_irq(&suspend_freeze_lock); | ||
| 64 | |||
| 65 | get_online_cpus(); | ||
| 57 | cpuidle_resume(); | 66 | cpuidle_resume(); |
| 58 | wait_event(suspend_freeze_wait_head, suspend_freeze_wake); | 67 | |
| 68 | /* Push all the CPUs into the idle loop. */ | ||
| 69 | wake_up_all_idle_cpus(); | ||
| 70 | pr_debug("PM: suspend-to-idle\n"); | ||
| 71 | /* Make the current CPU wait so it can enter the idle loop too. */ | ||
| 72 | wait_event(suspend_freeze_wait_head, | ||
| 73 | suspend_freeze_state == FREEZE_STATE_WAKE); | ||
| 74 | pr_debug("PM: resume from suspend-to-idle\n"); | ||
| 75 | |||
| 59 | cpuidle_pause(); | 76 | cpuidle_pause(); |
| 60 | cpuidle_use_deepest_state(false); | 77 | put_online_cpus(); |
| 78 | |||
| 79 | spin_lock_irq(&suspend_freeze_lock); | ||
| 80 | |||
| 81 | out: | ||
| 82 | suspend_freeze_state = FREEZE_STATE_NONE; | ||
| 83 | spin_unlock_irq(&suspend_freeze_lock); | ||
| 61 | } | 84 | } |
| 62 | 85 | ||
| 63 | void freeze_wake(void) | 86 | void freeze_wake(void) |
| 64 | { | 87 | { |
| 65 | suspend_freeze_wake = true; | 88 | unsigned long flags; |
| 66 | wake_up(&suspend_freeze_wait_head); | 89 | |
| 90 | spin_lock_irqsave(&suspend_freeze_lock, flags); | ||
| 91 | if (suspend_freeze_state > FREEZE_STATE_NONE) { | ||
| 92 | suspend_freeze_state = FREEZE_STATE_WAKE; | ||
| 93 | wake_up(&suspend_freeze_wait_head); | ||
| 94 | } | ||
| 95 | spin_unlock_irqrestore(&suspend_freeze_lock, flags); | ||
| 67 | } | 96 | } |
| 68 | EXPORT_SYMBOL_GPL(freeze_wake); | 97 | EXPORT_SYMBOL_GPL(freeze_wake); |
| 69 | 98 | ||
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c index aaf1c1d5cf5d..94b2d7b88a27 100644 --- a/kernel/sched/idle.c +++ b/kernel/sched/idle.c | |||
| @@ -7,6 +7,7 @@ | |||
| 7 | #include <linux/tick.h> | 7 | #include <linux/tick.h> |
| 8 | #include <linux/mm.h> | 8 | #include <linux/mm.h> |
| 9 | #include <linux/stackprotector.h> | 9 | #include <linux/stackprotector.h> |
| 10 | #include <linux/suspend.h> | ||
| 10 | 11 | ||
| 11 | #include <asm/tlb.h> | 12 | #include <asm/tlb.h> |
| 12 | 13 | ||
| @@ -105,6 +106,21 @@ static void cpuidle_idle_call(void) | |||
| 105 | rcu_idle_enter(); | 106 | rcu_idle_enter(); |
| 106 | 107 | ||
| 107 | /* | 108 | /* |
| 109 | * Suspend-to-idle ("freeze") is a system state in which all user space | ||
| 110 | * has been frozen, all I/O devices have been suspended and the only | ||
| 111 | * activity happens here and in iterrupts (if any). In that case bypass | ||
| 112 | * the cpuidle governor and go stratight for the deepest idle state | ||
| 113 | * available. Possibly also suspend the local tick and the entire | ||
| 114 | * timekeeping to prevent timer interrupts from kicking us out of idle | ||
| 115 | * until a proper wakeup interrupt happens. | ||
| 116 | */ | ||
| 117 | if (idle_should_freeze()) { | ||
| 118 | cpuidle_enter_freeze(); | ||
| 119 | local_irq_enable(); | ||
| 120 | goto exit_idle; | ||
| 121 | } | ||
| 122 | |||
| 123 | /* | ||
| 108 | * Ask the cpuidle framework to choose a convenient idle state. | 124 | * Ask the cpuidle framework to choose a convenient idle state. |
| 109 | * Fall back to the default arch idle method on errors. | 125 | * Fall back to the default arch idle method on errors. |
| 110 | */ | 126 | */ |
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 7efeedf53ebd..f7c515595b42 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c | |||
| @@ -394,6 +394,56 @@ void tick_resume(void) | |||
| 394 | } | 394 | } |
| 395 | } | 395 | } |
| 396 | 396 | ||
| 397 | static DEFINE_RAW_SPINLOCK(tick_freeze_lock); | ||
| 398 | static unsigned int tick_freeze_depth; | ||
| 399 | |||
| 400 | /** | ||
| 401 | * tick_freeze - Suspend the local tick and (possibly) timekeeping. | ||
| 402 | * | ||
| 403 | * Check if this is the last online CPU executing the function and if so, | ||
| 404 | * suspend timekeeping. Otherwise suspend the local tick. | ||
| 405 | * | ||
| 406 | * Call with interrupts disabled. Must be balanced with %tick_unfreeze(). | ||
| 407 | * Interrupts must not be enabled before the subsequent %tick_unfreeze(). | ||
| 408 | */ | ||
| 409 | void tick_freeze(void) | ||
| 410 | { | ||
| 411 | raw_spin_lock(&tick_freeze_lock); | ||
| 412 | |||
| 413 | tick_freeze_depth++; | ||
| 414 | if (tick_freeze_depth == num_online_cpus()) { | ||
| 415 | timekeeping_suspend(); | ||
| 416 | } else { | ||
| 417 | tick_suspend(); | ||
| 418 | tick_suspend_broadcast(); | ||
| 419 | } | ||
| 420 | |||
| 421 | raw_spin_unlock(&tick_freeze_lock); | ||
| 422 | } | ||
| 423 | |||
| 424 | /** | ||
| 425 | * tick_unfreeze - Resume the local tick and (possibly) timekeeping. | ||
| 426 | * | ||
| 427 | * Check if this is the first CPU executing the function and if so, resume | ||
| 428 | * timekeeping. Otherwise resume the local tick. | ||
| 429 | * | ||
| 430 | * Call with interrupts disabled. Must be balanced with %tick_freeze(). | ||
| 431 | * Interrupts must not be enabled after the preceding %tick_freeze(). | ||
| 432 | */ | ||
| 433 | void tick_unfreeze(void) | ||
| 434 | { | ||
| 435 | raw_spin_lock(&tick_freeze_lock); | ||
| 436 | |||
| 437 | if (tick_freeze_depth == num_online_cpus()) | ||
| 438 | timekeeping_resume(); | ||
| 439 | else | ||
| 440 | tick_resume(); | ||
| 441 | |||
| 442 | tick_freeze_depth--; | ||
| 443 | |||
| 444 | raw_spin_unlock(&tick_freeze_lock); | ||
| 445 | } | ||
| 446 | |||
| 397 | /** | 447 | /** |
| 398 | * tick_init - initialize the tick control | 448 | * tick_init - initialize the tick control |
| 399 | */ | 449 | */ |
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c index b124af259800..91db94136c10 100644 --- a/kernel/time/timekeeping.c +++ b/kernel/time/timekeeping.c | |||
| @@ -230,9 +230,7 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) | |||
| 230 | 230 | ||
| 231 | /** | 231 | /** |
| 232 | * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper. | 232 | * update_fast_timekeeper - Update the fast and NMI safe monotonic timekeeper. |
| 233 | * @tk: The timekeeper from which we take the update | 233 | * @tkr: Timekeeping readout base from which we take the update |
| 234 | * @tkf: The fast timekeeper to update | ||
| 235 | * @tbase: The time base for the fast timekeeper (mono/raw) | ||
| 236 | * | 234 | * |
| 237 | * We want to use this from any context including NMI and tracing / | 235 | * We want to use this from any context including NMI and tracing / |
| 238 | * instrumenting the timekeeping code itself. | 236 | * instrumenting the timekeeping code itself. |
| @@ -244,11 +242,11 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) | |||
| 244 | * smp_wmb(); <- Ensure that the last base[1] update is visible | 242 | * smp_wmb(); <- Ensure that the last base[1] update is visible |
| 245 | * tkf->seq++; | 243 | * tkf->seq++; |
| 246 | * smp_wmb(); <- Ensure that the seqcount update is visible | 244 | * smp_wmb(); <- Ensure that the seqcount update is visible |
| 247 | * update(tkf->base[0], tk); | 245 | * update(tkf->base[0], tkr); |
| 248 | * smp_wmb(); <- Ensure that the base[0] update is visible | 246 | * smp_wmb(); <- Ensure that the base[0] update is visible |
| 249 | * tkf->seq++; | 247 | * tkf->seq++; |
| 250 | * smp_wmb(); <- Ensure that the seqcount update is visible | 248 | * smp_wmb(); <- Ensure that the seqcount update is visible |
| 251 | * update(tkf->base[1], tk); | 249 | * update(tkf->base[1], tkr); |
| 252 | * | 250 | * |
| 253 | * The reader side does: | 251 | * The reader side does: |
| 254 | * | 252 | * |
| @@ -269,7 +267,7 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk) | |||
| 269 | * slightly wrong timestamp (a few nanoseconds). See | 267 | * slightly wrong timestamp (a few nanoseconds). See |
| 270 | * @ktime_get_mono_fast_ns. | 268 | * @ktime_get_mono_fast_ns. |
| 271 | */ | 269 | */ |
| 272 | static void update_fast_timekeeper(struct timekeeper *tk) | 270 | static void update_fast_timekeeper(struct tk_read_base *tkr) |
| 273 | { | 271 | { |
| 274 | struct tk_read_base *base = tk_fast_mono.base; | 272 | struct tk_read_base *base = tk_fast_mono.base; |
| 275 | 273 | ||
| @@ -277,7 +275,7 @@ static void update_fast_timekeeper(struct timekeeper *tk) | |||
| 277 | raw_write_seqcount_latch(&tk_fast_mono.seq); | 275 | raw_write_seqcount_latch(&tk_fast_mono.seq); |
| 278 | 276 | ||
| 279 | /* Update base[0] */ | 277 | /* Update base[0] */ |
| 280 | memcpy(base, &tk->tkr, sizeof(*base)); | 278 | memcpy(base, tkr, sizeof(*base)); |
| 281 | 279 | ||
| 282 | /* Force readers back to base[0] */ | 280 | /* Force readers back to base[0] */ |
| 283 | raw_write_seqcount_latch(&tk_fast_mono.seq); | 281 | raw_write_seqcount_latch(&tk_fast_mono.seq); |
| @@ -334,6 +332,35 @@ u64 notrace ktime_get_mono_fast_ns(void) | |||
| 334 | } | 332 | } |
| 335 | EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns); | 333 | EXPORT_SYMBOL_GPL(ktime_get_mono_fast_ns); |
| 336 | 334 | ||
| 335 | /* Suspend-time cycles value for halted fast timekeeper. */ | ||
| 336 | static cycle_t cycles_at_suspend; | ||
| 337 | |||
| 338 | static cycle_t dummy_clock_read(struct clocksource *cs) | ||
| 339 | { | ||
| 340 | return cycles_at_suspend; | ||
| 341 | } | ||
| 342 | |||
| 343 | /** | ||
| 344 | * halt_fast_timekeeper - Prevent fast timekeeper from accessing clocksource. | ||
| 345 | * @tk: Timekeeper to snapshot. | ||
| 346 | * | ||
| 347 | * It generally is unsafe to access the clocksource after timekeeping has been | ||
| 348 | * suspended, so take a snapshot of the readout base of @tk and use it as the | ||
| 349 | * fast timekeeper's readout base while suspended. It will return the same | ||
| 350 | * number of cycles every time until timekeeping is resumed at which time the | ||
| 351 | * proper readout base for the fast timekeeper will be restored automatically. | ||
| 352 | */ | ||
| 353 | static void halt_fast_timekeeper(struct timekeeper *tk) | ||
| 354 | { | ||
| 355 | static struct tk_read_base tkr_dummy; | ||
| 356 | struct tk_read_base *tkr = &tk->tkr; | ||
| 357 | |||
| 358 | memcpy(&tkr_dummy, tkr, sizeof(tkr_dummy)); | ||
| 359 | cycles_at_suspend = tkr->read(tkr->clock); | ||
| 360 | tkr_dummy.read = dummy_clock_read; | ||
| 361 | update_fast_timekeeper(&tkr_dummy); | ||
| 362 | } | ||
| 363 | |||
| 337 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD | 364 | #ifdef CONFIG_GENERIC_TIME_VSYSCALL_OLD |
| 338 | 365 | ||
| 339 | static inline void update_vsyscall(struct timekeeper *tk) | 366 | static inline void update_vsyscall(struct timekeeper *tk) |
| @@ -462,7 +489,7 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action) | |||
| 462 | memcpy(&shadow_timekeeper, &tk_core.timekeeper, | 489 | memcpy(&shadow_timekeeper, &tk_core.timekeeper, |
| 463 | sizeof(tk_core.timekeeper)); | 490 | sizeof(tk_core.timekeeper)); |
| 464 | 491 | ||
| 465 | update_fast_timekeeper(tk); | 492 | update_fast_timekeeper(&tk->tkr); |
| 466 | } | 493 | } |
| 467 | 494 | ||
| 468 | /** | 495 | /** |
| @@ -1170,7 +1197,7 @@ void timekeeping_inject_sleeptime64(struct timespec64 *delta) | |||
| 1170 | * xtime/wall_to_monotonic/jiffies/etc are | 1197 | * xtime/wall_to_monotonic/jiffies/etc are |
| 1171 | * still managed by arch specific suspend/resume code. | 1198 | * still managed by arch specific suspend/resume code. |
| 1172 | */ | 1199 | */ |
| 1173 | static void timekeeping_resume(void) | 1200 | void timekeeping_resume(void) |
| 1174 | { | 1201 | { |
| 1175 | struct timekeeper *tk = &tk_core.timekeeper; | 1202 | struct timekeeper *tk = &tk_core.timekeeper; |
| 1176 | struct clocksource *clock = tk->tkr.clock; | 1203 | struct clocksource *clock = tk->tkr.clock; |
| @@ -1251,7 +1278,7 @@ static void timekeeping_resume(void) | |||
| 1251 | hrtimers_resume(); | 1278 | hrtimers_resume(); |
| 1252 | } | 1279 | } |
| 1253 | 1280 | ||
| 1254 | static int timekeeping_suspend(void) | 1281 | int timekeeping_suspend(void) |
| 1255 | { | 1282 | { |
| 1256 | struct timekeeper *tk = &tk_core.timekeeper; | 1283 | struct timekeeper *tk = &tk_core.timekeeper; |
| 1257 | unsigned long flags; | 1284 | unsigned long flags; |
| @@ -1296,6 +1323,7 @@ static int timekeeping_suspend(void) | |||
| 1296 | } | 1323 | } |
| 1297 | 1324 | ||
| 1298 | timekeeping_update(tk, TK_MIRROR); | 1325 | timekeeping_update(tk, TK_MIRROR); |
| 1326 | halt_fast_timekeeper(tk); | ||
| 1299 | write_seqcount_end(&tk_core.seq); | 1327 | write_seqcount_end(&tk_core.seq); |
| 1300 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); | 1328 | raw_spin_unlock_irqrestore(&timekeeper_lock, flags); |
| 1301 | 1329 | ||
diff --git a/kernel/time/timekeeping.h b/kernel/time/timekeeping.h index adc1fc98bde3..1d91416055d5 100644 --- a/kernel/time/timekeeping.h +++ b/kernel/time/timekeeping.h | |||
| @@ -16,5 +16,7 @@ extern int timekeeping_inject_offset(struct timespec *ts); | |||
| 16 | extern s32 timekeeping_get_tai_offset(void); | 16 | extern s32 timekeeping_get_tai_offset(void); |
| 17 | extern void timekeeping_set_tai_offset(s32 tai_offset); | 17 | extern void timekeeping_set_tai_offset(s32 tai_offset); |
| 18 | extern void timekeeping_clocktai(struct timespec *ts); | 18 | extern void timekeeping_clocktai(struct timespec *ts); |
| 19 | extern int timekeeping_suspend(void); | ||
| 20 | extern void timekeeping_resume(void); | ||
| 19 | 21 | ||
| 20 | #endif | 22 | #endif |
