aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorVenkatesh Pallipadi <venki@google.com>2010-10-04 20:03:20 -0400
committerIngo Molnar <mingo@elte.hu>2010-10-18 14:52:25 -0400
commite82b8e4ea4f3dffe6e7939f90e78da675fcc450e (patch)
tree116e54691f87d2cb0f2f5df36d1e114624c225d0 /arch/x86
parentb52bfee445d315549d41eacf2fa7c156e7d153d5 (diff)
x86: Add IRQ_TIME_ACCOUNTING
This patch adds IRQ_TIME_ACCOUNTING option on x86 and runtime enables it when TSC is enabled. This change just enables fine grained irq time accounting, isn't used yet. Following patches use it for different purposes. Signed-off-by: Venkatesh Pallipadi <venki@google.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1286237003-12406-6-git-send-email-venki@google.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig11
-rw-r--r--arch/x86/kernel/tsc.c8
2 files changed, 19 insertions, 0 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index cea0cd9a316f..f4c70c246ffe 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -795,6 +795,17 @@ config SCHED_MC
795 making when dealing with multi-core CPU chips at a cost of slightly 795 making when dealing with multi-core CPU chips at a cost of slightly
796 increased overhead in some places. If unsure say N here. 796 increased overhead in some places. If unsure say N here.
797 797
798config IRQ_TIME_ACCOUNTING
799 bool "Fine granularity task level IRQ time accounting"
800 default n
801 ---help---
802 Select this option to enable fine granularity task irq time
803 accounting. This is done by reading a timestamp on each
804 transitions between softirq and hardirq state, so there can be a
805 small performance impact.
806
807 If in doubt, say N here.
808
798source "kernel/Kconfig.preempt" 809source "kernel/Kconfig.preempt"
799 810
800config X86_UP_APIC 811config X86_UP_APIC
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 26a863a9c2a8..a1c2cd768538 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -104,10 +104,14 @@ int __init notsc_setup(char *str)
104 104
105__setup("notsc", notsc_setup); 105__setup("notsc", notsc_setup);
106 106
107static int no_sched_irq_time;
108
107static int __init tsc_setup(char *str) 109static int __init tsc_setup(char *str)
108{ 110{
109 if (!strcmp(str, "reliable")) 111 if (!strcmp(str, "reliable"))
110 tsc_clocksource_reliable = 1; 112 tsc_clocksource_reliable = 1;
113 if (!strncmp(str, "noirqtime", 9))
114 no_sched_irq_time = 1;
111 return 1; 115 return 1;
112} 116}
113 117
@@ -801,6 +805,7 @@ void mark_tsc_unstable(char *reason)
801 if (!tsc_unstable) { 805 if (!tsc_unstable) {
802 tsc_unstable = 1; 806 tsc_unstable = 1;
803 sched_clock_stable = 0; 807 sched_clock_stable = 0;
808 disable_sched_clock_irqtime();
804 printk(KERN_INFO "Marking TSC unstable due to %s\n", reason); 809 printk(KERN_INFO "Marking TSC unstable due to %s\n", reason);
805 /* Change only the rating, when not registered */ 810 /* Change only the rating, when not registered */
806 if (clocksource_tsc.mult) 811 if (clocksource_tsc.mult)
@@ -987,6 +992,9 @@ void __init tsc_init(void)
987 /* now allow native_sched_clock() to use rdtsc */ 992 /* now allow native_sched_clock() to use rdtsc */
988 tsc_disabled = 0; 993 tsc_disabled = 0;
989 994
995 if (!no_sched_irq_time)
996 enable_sched_clock_irqtime();
997
990 lpj = ((u64)tsc_khz * 1000); 998 lpj = ((u64)tsc_khz * 1000);
991 do_div(lpj, HZ); 999 do_div(lpj, HZ);
992 lpj_fine = lpj; 1000 lpj_fine = lpj;