aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/acpi/processor_idle.c
diff options
context:
space:
mode:
authorDeepthi Dharwar <deepthi@linux.vnet.ibm.com>2011-10-28 06:50:09 -0400
committerLen Brown <len.brown@intel.com>2011-11-06 21:13:30 -0500
commite978aa7d7d57d04eb5f88a7507c4fb98577def77 (patch)
treed6d6dfe1dba4d4749c7eafe348351aa499c3c5eb /drivers/acpi/processor_idle.c
parentc3b92c8787367a8bb53d57d9789b558f1295cc96 (diff)
cpuidle: Move dev->last_residency update to driver enter routine; remove dev->last_state
Cpuidle governor only suggests the state to enter using the governor->select() interface, but allows the low level driver to override the recommended state. The actual entered state may be different because of software or hardware demotion. Software demotion is done by the back-end cpuidle driver and can be accounted correctly. Current cpuidle code uses last_state field to capture the actual state entered and based on that updates the statistics for the state entered. Ideally the driver enter routine should update the counters, and it should return the state actually entered rather than the time spent there. The generic cpuidle code should simply handle where the counters live in the sysfs namespace, not updating the counters. Reference: https://lkml.org/lkml/2011/3/25/52 Signed-off-by: Deepthi Dharwar <deepthi@linux.vnet.ibm.com> Signed-off-by: Trinabh Gupta <g.trinabh@gmail.com> Tested-by: Jean Pihet <j-pihet@ti.com> Reviewed-by: Kevin Hilman <khilman@ti.com> Acked-by: Arjan van de Ven <arjan@linux.intel.com> Acked-by: Kevin Hilman <khilman@ti.com> Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/acpi/processor_idle.c')
-rw-r--r--drivers/acpi/processor_idle.c75
1 files changed, 49 insertions, 26 deletions
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 431ab11c8c1b..9cd08cecb347 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -741,22 +741,24 @@ static inline void acpi_idle_do_entry(struct acpi_processor_cx *cx)
741/** 741/**
742 * acpi_idle_enter_c1 - enters an ACPI C1 state-type 742 * acpi_idle_enter_c1 - enters an ACPI C1 state-type
743 * @dev: the target CPU 743 * @dev: the target CPU
744 * @state: the state data 744 * @index: index of target state
745 * 745 *
746 * This is equivalent to the HALT instruction. 746 * This is equivalent to the HALT instruction.
747 */ 747 */
748static int acpi_idle_enter_c1(struct cpuidle_device *dev, 748static int acpi_idle_enter_c1(struct cpuidle_device *dev,
749 struct cpuidle_state *state) 749 int index)
750{ 750{
751 ktime_t kt1, kt2; 751 ktime_t kt1, kt2;
752 s64 idle_time; 752 s64 idle_time;
753 struct acpi_processor *pr; 753 struct acpi_processor *pr;
754 struct cpuidle_state *state = &dev->states[index];
754 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 755 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
755 756
756 pr = __this_cpu_read(processors); 757 pr = __this_cpu_read(processors);
758 dev->last_residency = 0;
757 759
758 if (unlikely(!pr)) 760 if (unlikely(!pr))
759 return 0; 761 return -EINVAL;
760 762
761 local_irq_disable(); 763 local_irq_disable();
762 764
@@ -764,7 +766,7 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
764 if (acpi_idle_suspend) { 766 if (acpi_idle_suspend) {
765 local_irq_enable(); 767 local_irq_enable();
766 cpu_relax(); 768 cpu_relax();
767 return 0; 769 return -EINVAL;
768 } 770 }
769 771
770 lapic_timer_state_broadcast(pr, cx, 1); 772 lapic_timer_state_broadcast(pr, cx, 1);
@@ -773,37 +775,46 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
773 kt2 = ktime_get_real(); 775 kt2 = ktime_get_real();
774 idle_time = ktime_to_us(ktime_sub(kt2, kt1)); 776 idle_time = ktime_to_us(ktime_sub(kt2, kt1));
775 777
778 /* Update device last_residency*/
779 dev->last_residency = (int)idle_time;
780
776 local_irq_enable(); 781 local_irq_enable();
777 cx->usage++; 782 cx->usage++;
778 lapic_timer_state_broadcast(pr, cx, 0); 783 lapic_timer_state_broadcast(pr, cx, 0);
779 784
780 return idle_time; 785 return index;
781} 786}
782 787
783/** 788/**
784 * acpi_idle_enter_simple - enters an ACPI state without BM handling 789 * acpi_idle_enter_simple - enters an ACPI state without BM handling
785 * @dev: the target CPU 790 * @dev: the target CPU
786 * @state: the state data 791 * @index: the index of suggested state
787 */ 792 */
788static int acpi_idle_enter_simple(struct cpuidle_device *dev, 793static int acpi_idle_enter_simple(struct cpuidle_device *dev,
789 struct cpuidle_state *state) 794 int index)
790{ 795{
791 struct acpi_processor *pr; 796 struct acpi_processor *pr;
797 struct cpuidle_state *state = &dev->states[index];
792 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 798 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
793 ktime_t kt1, kt2; 799 ktime_t kt1, kt2;
794 s64 idle_time_ns; 800 s64 idle_time_ns;
795 s64 idle_time; 801 s64 idle_time;
796 802
797 pr = __this_cpu_read(processors); 803 pr = __this_cpu_read(processors);
804 dev->last_residency = 0;
798 805
799 if (unlikely(!pr)) 806 if (unlikely(!pr))
800 return 0; 807 return -EINVAL;
801
802 if (acpi_idle_suspend)
803 return(acpi_idle_enter_c1(dev, state));
804 808
805 local_irq_disable(); 809 local_irq_disable();
806 810
811 if (acpi_idle_suspend) {
812 local_irq_enable();
813 cpu_relax();
814 return -EINVAL;
815 }
816
817
807 if (cx->entry_method != ACPI_CSTATE_FFH) { 818 if (cx->entry_method != ACPI_CSTATE_FFH) {
808 current_thread_info()->status &= ~TS_POLLING; 819 current_thread_info()->status &= ~TS_POLLING;
809 /* 820 /*
@@ -815,7 +826,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
815 if (unlikely(need_resched())) { 826 if (unlikely(need_resched())) {
816 current_thread_info()->status |= TS_POLLING; 827 current_thread_info()->status |= TS_POLLING;
817 local_irq_enable(); 828 local_irq_enable();
818 return 0; 829 return -EINVAL;
819 } 830 }
820 } 831 }
821 832
@@ -837,6 +848,9 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
837 idle_time = idle_time_ns; 848 idle_time = idle_time_ns;
838 do_div(idle_time, NSEC_PER_USEC); 849 do_div(idle_time, NSEC_PER_USEC);
839 850
851 /* Update device last_residency*/
852 dev->last_residency = (int)idle_time;
853
840 /* Tell the scheduler how much we idled: */ 854 /* Tell the scheduler how much we idled: */
841 sched_clock_idle_wakeup_event(idle_time_ns); 855 sched_clock_idle_wakeup_event(idle_time_ns);
842 856
@@ -848,7 +862,7 @@ static int acpi_idle_enter_simple(struct cpuidle_device *dev,
848 862
849 lapic_timer_state_broadcast(pr, cx, 0); 863 lapic_timer_state_broadcast(pr, cx, 0);
850 cx->time += idle_time; 864 cx->time += idle_time;
851 return idle_time; 865 return index;
852} 866}
853 867
854static int c3_cpu_count; 868static int c3_cpu_count;
@@ -857,14 +871,15 @@ static DEFINE_SPINLOCK(c3_lock);
857/** 871/**
858 * acpi_idle_enter_bm - enters C3 with proper BM handling 872 * acpi_idle_enter_bm - enters C3 with proper BM handling
859 * @dev: the target CPU 873 * @dev: the target CPU
860 * @state: the state data 874 * @index: the index of suggested state
861 * 875 *
862 * If BM is detected, the deepest non-C3 idle state is entered instead. 876 * If BM is detected, the deepest non-C3 idle state is entered instead.
863 */ 877 */
864static int acpi_idle_enter_bm(struct cpuidle_device *dev, 878static int acpi_idle_enter_bm(struct cpuidle_device *dev,
865 struct cpuidle_state *state) 879 int index)
866{ 880{
867 struct acpi_processor *pr; 881 struct acpi_processor *pr;
882 struct cpuidle_state *state = &dev->states[index];
868 struct acpi_processor_cx *cx = cpuidle_get_statedata(state); 883 struct acpi_processor_cx *cx = cpuidle_get_statedata(state);
869 ktime_t kt1, kt2; 884 ktime_t kt1, kt2;
870 s64 idle_time_ns; 885 s64 idle_time_ns;
@@ -872,22 +887,26 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
872 887
873 888
874 pr = __this_cpu_read(processors); 889 pr = __this_cpu_read(processors);
890 dev->last_residency = 0;
875 891
876 if (unlikely(!pr)) 892 if (unlikely(!pr))
877 return 0; 893 return -EINVAL;
878 894
879 if (acpi_idle_suspend) 895
880 return(acpi_idle_enter_c1(dev, state)); 896 if (acpi_idle_suspend) {
897 cpu_relax();
898 return -EINVAL;
899 }
881 900
882 if (!cx->bm_sts_skip && acpi_idle_bm_check()) { 901 if (!cx->bm_sts_skip && acpi_idle_bm_check()) {
883 if (dev->safe_state) { 902 if (dev->safe_state_index >= 0) {
884 dev->last_state = dev->safe_state; 903 return dev->states[dev->safe_state_index].enter(dev,
885 return dev->safe_state->enter(dev, dev->safe_state); 904 dev->safe_state_index);
886 } else { 905 } else {
887 local_irq_disable(); 906 local_irq_disable();
888 acpi_safe_halt(); 907 acpi_safe_halt();
889 local_irq_enable(); 908 local_irq_enable();
890 return 0; 909 return -EINVAL;
891 } 910 }
892 } 911 }
893 912
@@ -904,7 +923,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
904 if (unlikely(need_resched())) { 923 if (unlikely(need_resched())) {
905 current_thread_info()->status |= TS_POLLING; 924 current_thread_info()->status |= TS_POLLING;
906 local_irq_enable(); 925 local_irq_enable();
907 return 0; 926 return -EINVAL;
908 } 927 }
909 } 928 }
910 929
@@ -954,6 +973,9 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
954 idle_time = idle_time_ns; 973 idle_time = idle_time_ns;
955 do_div(idle_time, NSEC_PER_USEC); 974 do_div(idle_time, NSEC_PER_USEC);
956 975
976 /* Update device last_residency*/
977 dev->last_residency = (int)idle_time;
978
957 /* Tell the scheduler how much we idled: */ 979 /* Tell the scheduler how much we idled: */
958 sched_clock_idle_wakeup_event(idle_time_ns); 980 sched_clock_idle_wakeup_event(idle_time_ns);
959 981
@@ -965,7 +987,7 @@ static int acpi_idle_enter_bm(struct cpuidle_device *dev,
965 987
966 lapic_timer_state_broadcast(pr, cx, 0); 988 lapic_timer_state_broadcast(pr, cx, 0);
967 cx->time += idle_time; 989 cx->time += idle_time;
968 return idle_time; 990 return index;
969} 991}
970 992
971struct cpuidle_driver acpi_idle_driver = { 993struct cpuidle_driver acpi_idle_driver = {
@@ -992,6 +1014,7 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
992 } 1014 }
993 1015
994 dev->cpu = pr->id; 1016 dev->cpu = pr->id;
1017 dev->safe_state_index = -1;
995 for (i = 0; i < CPUIDLE_STATE_MAX; i++) { 1018 for (i = 0; i < CPUIDLE_STATE_MAX; i++) {
996 dev->states[i].name[0] = '\0'; 1019 dev->states[i].name[0] = '\0';
997 dev->states[i].desc[0] = '\0'; 1020 dev->states[i].desc[0] = '\0';
@@ -1027,13 +1050,13 @@ static int acpi_processor_setup_cpuidle(struct acpi_processor *pr)
1027 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1050 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1028 1051
1029 state->enter = acpi_idle_enter_c1; 1052 state->enter = acpi_idle_enter_c1;
1030 dev->safe_state = state; 1053 dev->safe_state_index = count;
1031 break; 1054 break;
1032 1055
1033 case ACPI_STATE_C2: 1056 case ACPI_STATE_C2:
1034 state->flags |= CPUIDLE_FLAG_TIME_VALID; 1057 state->flags |= CPUIDLE_FLAG_TIME_VALID;
1035 state->enter = acpi_idle_enter_simple; 1058 state->enter = acpi_idle_enter_simple;
1036 dev->safe_state = state; 1059 dev->safe_state_index = count;
1037 break; 1060 break;
1038 1061
1039 case ACPI_STATE_C3: 1062 case ACPI_STATE_C3: