aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>2006-01-11 16:44:18 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2006-01-11 22:04:54 -0500
commit5a07a30c3cc4dc438494d6416ffa74008a2194b3 (patch)
treeeafda8ee2f58f392f5d78af7748ce58ddb6bf9a2 /arch
parent01b8faaef5d239aeabb1e712c5d9619f29e808dd (diff)
[PATCH] i386/x86-64: Remove sub jiffy profile timer support
Remove the finer control of local APIC timer. We cannot provide a sub-jiffy control like this when we use broadcast from external timer in place of local APIC. Instead of removing this only on systems that may end up using broadcast from external timer (due to C3), I am going the "I'm feeling lucky" way to remove this fully. Basically, I am not sure about usefulness of this code today. Few other architectures also don't seem to support this today. If you are using profiling and fine grained control and don't like this going away in normal case, yell at me right now. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/kernel/apic.c61
-rw-r--r--arch/x86_64/kernel/apic.c53
2 files changed, 8 insertions, 106 deletions
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index d8f94e78de8a..2d8c6ce1ecda 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -92,10 +92,6 @@ void __init apic_intr_init(void)
92/* Using APIC to generate smp_local_timer_interrupt? */ 92/* Using APIC to generate smp_local_timer_interrupt? */
93int using_apic_timer = 0; 93int using_apic_timer = 0;
94 94
95static DEFINE_PER_CPU(int, prof_multiplier) = 1;
96static DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
97static DEFINE_PER_CPU(int, prof_counter) = 1;
98
99static int enabled_via_apicbase; 95static int enabled_via_apicbase;
100 96
101void enable_NMI_through_LVT0 (void * dummy) 97void enable_NMI_through_LVT0 (void * dummy)
@@ -1092,34 +1088,6 @@ void enable_APIC_timer(void)
1092 } 1088 }
1093} 1089}
1094 1090
1095/*
1096 * the frequency of the profiling timer can be changed
1097 * by writing a multiplier value into /proc/profile.
1098 */
1099int setup_profiling_timer(unsigned int multiplier)
1100{
1101 int i;
1102
1103 /*
1104 * Sanity check. [at least 500 APIC cycles should be
1105 * between APIC interrupts as a rule of thumb, to avoid
1106 * irqs flooding us]
1107 */
1108 if ( (!multiplier) || (calibration_result/multiplier < 500))
1109 return -EINVAL;
1110
1111 /*
1112 * Set the new multiplier for each CPU. CPUs don't start using the
1113 * new values until the next timer interrupt in which they do process
1114 * accounting. At that time they also adjust their APIC timers
1115 * accordingly.
1116 */
1117 for (i = 0; i < NR_CPUS; ++i)
1118 per_cpu(prof_multiplier, i) = multiplier;
1119
1120 return 0;
1121}
1122
1123#undef APIC_DIVISOR 1091#undef APIC_DIVISOR
1124 1092
1125/* 1093/*
@@ -1134,32 +1102,10 @@ int setup_profiling_timer(unsigned int multiplier)
1134 1102
1135inline void smp_local_timer_interrupt(struct pt_regs * regs) 1103inline void smp_local_timer_interrupt(struct pt_regs * regs)
1136{ 1104{
1137 int cpu = smp_processor_id();
1138
1139 profile_tick(CPU_PROFILING, regs); 1105 profile_tick(CPU_PROFILING, regs);
1140 if (--per_cpu(prof_counter, cpu) <= 0) {
1141 /*
1142 * The multiplier may have changed since the last time we got
1143 * to this point as a result of the user writing to
1144 * /proc/profile. In this case we need to adjust the APIC
1145 * timer accordingly.
1146 *
1147 * Interrupts are already masked off at this point.
1148 */
1149 per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu);
1150 if (per_cpu(prof_counter, cpu) !=
1151 per_cpu(prof_old_multiplier, cpu)) {
1152 __setup_APIC_LVTT(
1153 calibration_result/
1154 per_cpu(prof_counter, cpu));
1155 per_cpu(prof_old_multiplier, cpu) =
1156 per_cpu(prof_counter, cpu);
1157 }
1158
1159#ifdef CONFIG_SMP 1106#ifdef CONFIG_SMP
1160 update_process_times(user_mode_vm(regs)); 1107 update_process_times(user_mode_vm(regs));
1161#endif 1108#endif
1162 }
1163 1109
1164 /* 1110 /*
1165 * We take the 'long' return path, and there every subsystem 1111 * We take the 'long' return path, and there every subsystem
@@ -1206,6 +1152,11 @@ fastcall void smp_apic_timer_interrupt(struct pt_regs *regs)
1206 irq_exit(); 1152 irq_exit();
1207} 1153}
1208 1154
1155int setup_profiling_timer(unsigned int multiplier)
1156{
1157 return -EINVAL;
1158}
1159
1209/* 1160/*
1210 * This interrupt should _never_ happen with our APIC/SMP architecture 1161 * This interrupt should _never_ happen with our APIC/SMP architecture
1211 */ 1162 */
diff --git a/arch/x86_64/kernel/apic.c b/arch/x86_64/kernel/apic.c
index 18691ce4c759..fa47ae731576 100644
--- a/arch/x86_64/kernel/apic.c
+++ b/arch/x86_64/kernel/apic.c
@@ -41,10 +41,6 @@ int disable_apic_timer __initdata;
41/* Using APIC to generate smp_local_timer_interrupt? */ 41/* Using APIC to generate smp_local_timer_interrupt? */
42int using_apic_timer = 0; 42int using_apic_timer = 0;
43 43
44static DEFINE_PER_CPU(int, prof_multiplier) = 1;
45static DEFINE_PER_CPU(int, prof_old_multiplier) = 1;
46static DEFINE_PER_CPU(int, prof_counter) = 1;
47
48static void apic_pm_activate(void); 44static void apic_pm_activate(void);
49 45
50void enable_NMI_through_LVT0 (void * dummy) 46void enable_NMI_through_LVT0 (void * dummy)
@@ -805,32 +801,9 @@ void enable_APIC_timer(void)
805 } 801 }
806} 802}
807 803
808/*
809 * the frequency of the profiling timer can be changed
810 * by writing a multiplier value into /proc/profile.
811 */
812int setup_profiling_timer(unsigned int multiplier) 804int setup_profiling_timer(unsigned int multiplier)
813{ 805{
814 int i; 806 return -EINVAL;
815
816 /*
817 * Sanity check. [at least 500 APIC cycles should be
818 * between APIC interrupts as a rule of thumb, to avoid
819 * irqs flooding us]
820 */
821 if ( (!multiplier) || (calibration_result/multiplier < 500))
822 return -EINVAL;
823
824 /*
825 * Set the new multiplier for each CPU. CPUs don't start using the
826 * new values until the next timer interrupt in which they do process
827 * accounting. At that time they also adjust their APIC timers
828 * accordingly.
829 */
830 for (i = 0; i < NR_CPUS; ++i)
831 per_cpu(prof_multiplier, i) = multiplier;
832
833 return 0;
834} 807}
835 808
836#ifdef CONFIG_X86_MCE_AMD 809#ifdef CONFIG_X86_MCE_AMD
@@ -857,32 +830,10 @@ void setup_threshold_lvt(unsigned long lvt_off)
857 830
858void smp_local_timer_interrupt(struct pt_regs *regs) 831void smp_local_timer_interrupt(struct pt_regs *regs)
859{ 832{
860 int cpu = smp_processor_id();
861
862 profile_tick(CPU_PROFILING, regs); 833 profile_tick(CPU_PROFILING, regs);
863 if (--per_cpu(prof_counter, cpu) <= 0) {
864 /*
865 * The multiplier may have changed since the last time we got
866 * to this point as a result of the user writing to
867 * /proc/profile. In this case we need to adjust the APIC
868 * timer accordingly.
869 *
870 * Interrupts are already masked off at this point.
871 */
872 per_cpu(prof_counter, cpu) = per_cpu(prof_multiplier, cpu);
873 if (per_cpu(prof_counter, cpu) !=
874 per_cpu(prof_old_multiplier, cpu)) {
875 __setup_APIC_LVTT(calibration_result/
876 per_cpu(prof_counter, cpu));
877 per_cpu(prof_old_multiplier, cpu) =
878 per_cpu(prof_counter, cpu);
879 }
880
881#ifdef CONFIG_SMP 834#ifdef CONFIG_SMP
882 update_process_times(user_mode(regs)); 835 update_process_times(user_mode(regs));
883#endif 836#endif
884 }
885
886 /* 837 /*
887 * We take the 'long' return path, and there every subsystem 838 * We take the 'long' return path, and there every subsystem
888 * grabs the appropriate locks (kernel lock/ irq lock). 839 * grabs the appropriate locks (kernel lock/ irq lock).