aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSeiji Aguchi <seiji.aguchi@hds.com>2013-06-20 11:45:17 -0400
committerH. Peter Anvin <hpa@linux.intel.com>2013-06-21 01:25:01 -0400
commiteddc0e922a3530e0f22cef170229bcae3a7d5e31 (patch)
tree473c0c70cddcc8f8d23713c0bb65fcbf4dc9cd26
parentf5abaa1bfc3dbf26d19d3513f39279ca369f8d65 (diff)
x86, trace: Introduce entering/exiting_irq()
When implementing tracepoints in interrupt handers, if the tracepoints are simply added in the performance sensitive path of interrupt handers, it may cause potential performance problem due to the time penalty. To solve the problem, an idea is to prepare non-trace/trace irq handers and switch their IDTs at the enabling/disabling time. So, let's introduce entering_irq()/exiting_irq() for pre/post- processing of each irq handler. A way to use them is as follows. Non-trace irq handler: smp_irq_handler() { entering_irq(); /* pre-processing of this handler */ __smp_irq_handler(); /* * common logic between non-trace and trace handlers * in a vector. */ exiting_irq(); /* post-processing of this handler */ } Trace irq_handler: smp_trace_irq_handler() { entering_irq(); /* pre-processing of this handler */ trace_irq_entry(); /* tracepoint for irq entry */ __smp_irq_handler(); /* * common logic between non-trace and trace handlers * in a vector. */ trace_irq_exit(); /* tracepoint for irq exit */ exiting_irq(); /* post-processing of this handler */ } If tracepoints can place outside entering_irq()/exiting_irq() as follows, it looks cleaner. smp_trace_irq_handler() { trace_irq_entry(); smp_irq_handler(); trace_irq_exit(); } But it doesn't work. The problem is with irq_enter/exit() being called. They must be called before trace_irq_enter/exit(), because of the rcu_irq_enter() must be called before any tracepoints are used, as tracepoints use rcu to synchronize. As a possible alternative, we may be able to call irq_enter() first as follows if irq_enter() can nest. smp_trace_irq_hander() { irq_entry(); trace_irq_entry(); smp_irq_handler(); trace_irq_exit(); irq_exit(); } But it doesn't work, either. If irq_enter() is nested, it may have a time penalty because it has to check if it was already called or not. The time penalty is not desired in performance sensitive paths even if it is tiny. Signed-off-by: Seiji Aguchi <seiji.aguchi@hds.com> Link: http://lkml.kernel.org/r/51C3238D.9040706@hds.com Signed-off-by: H. Peter Anvin <hpa@linux.intel.com> Cc: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r--arch/x86/include/asm/apic.h27
-rw-r--r--arch/x86/kernel/apic/apic.c33
-rw-r--r--arch/x86/kernel/cpu/mcheck/therm_throt.c14
-rw-r--r--arch/x86/kernel/cpu/mcheck/threshold.c14
-rw-r--r--arch/x86/kernel/irq.c18
-rw-r--r--arch/x86/kernel/irq_work.c14
-rw-r--r--arch/x86/kernel/smp.c35
7 files changed, 109 insertions, 46 deletions
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 338803422239..f8119b582c3c 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -12,6 +12,7 @@
12#include <asm/fixmap.h> 12#include <asm/fixmap.h>
13#include <asm/mpspec.h> 13#include <asm/mpspec.h>
14#include <asm/msr.h> 14#include <asm/msr.h>
15#include <asm/idle.h>
15 16
16#define ARCH_APICTIMER_STOPS_ON_C3 1 17#define ARCH_APICTIMER_STOPS_ON_C3 1
17 18
@@ -687,5 +688,31 @@ extern int default_check_phys_apicid_present(int phys_apicid);
687#endif 688#endif
688 689
689#endif /* CONFIG_X86_LOCAL_APIC */ 690#endif /* CONFIG_X86_LOCAL_APIC */
691extern void irq_enter(void);
692extern void irq_exit(void);
693
694static inline void entering_irq(void)
695{
696 irq_enter();
697 exit_idle();
698}
699
700static inline void entering_ack_irq(void)
701{
702 ack_APIC_irq();
703 entering_irq();
704}
705
706static inline void exiting_irq(void)
707{
708 irq_exit();
709}
710
711static inline void exiting_ack_irq(void)
712{
713 irq_exit();
714 /* Ack only at the end to avoid potential reentry */
715 ack_APIC_irq();
716}
690 717
691#endif /* _ASM_X86_APIC_H */ 718#endif /* _ASM_X86_APIC_H */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 904611bf0e5a..59ee76fe1c53 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -919,17 +919,14 @@ void __irq_entry smp_apic_timer_interrupt(struct pt_regs *regs)
919 /* 919 /*
920 * NOTE! We'd better ACK the irq immediately, 920 * NOTE! We'd better ACK the irq immediately,
921 * because timer handling can be slow. 921 * because timer handling can be slow.
922 */ 922 *
923 ack_APIC_irq();
924 /*
925 * update_process_times() expects us to have done irq_enter(). 923 * update_process_times() expects us to have done irq_enter().
926 * Besides, if we don't timer interrupts ignore the global 924 * Besides, if we don't timer interrupts ignore the global
927 * interrupt lock, which is the WrongThing (tm) to do. 925 * interrupt lock, which is the WrongThing (tm) to do.
928 */ 926 */
929 irq_enter(); 927 entering_ack_irq();
930 exit_idle();
931 local_apic_timer_interrupt(); 928 local_apic_timer_interrupt();
932 irq_exit(); 929 exiting_irq();
933 930
934 set_irq_regs(old_regs); 931 set_irq_regs(old_regs);
935} 932}
@@ -1907,12 +1904,10 @@ int __init APIC_init_uniprocessor(void)
1907/* 1904/*
1908 * This interrupt should _never_ happen with our APIC/SMP architecture 1905 * This interrupt should _never_ happen with our APIC/SMP architecture
1909 */ 1906 */
1910void smp_spurious_interrupt(struct pt_regs *regs) 1907static inline void __smp_spurious_interrupt(void)
1911{ 1908{
1912 u32 v; 1909 u32 v;
1913 1910
1914 irq_enter();
1915 exit_idle();
1916 /* 1911 /*
1917 * Check if this really is a spurious interrupt and ACK it 1912 * Check if this really is a spurious interrupt and ACK it
1918 * if it is a vectored one. Just in case... 1913 * if it is a vectored one. Just in case...
@@ -1927,13 +1922,19 @@ void smp_spurious_interrupt(struct pt_regs *regs)
1927 /* see sw-dev-man vol 3, chapter 7.4.13.5 */ 1922 /* see sw-dev-man vol 3, chapter 7.4.13.5 */
1928 pr_info("spurious APIC interrupt on CPU#%d, " 1923 pr_info("spurious APIC interrupt on CPU#%d, "
1929 "should never happen.\n", smp_processor_id()); 1924 "should never happen.\n", smp_processor_id());
1930 irq_exit(); 1925}
1926
1927void smp_spurious_interrupt(struct pt_regs *regs)
1928{
1929 entering_irq();
1930 __smp_spurious_interrupt();
1931 exiting_irq();
1931} 1932}
1932 1933
1933/* 1934/*
1934 * This interrupt should never happen with our APIC/SMP architecture 1935 * This interrupt should never happen with our APIC/SMP architecture
1935 */ 1936 */
1936void smp_error_interrupt(struct pt_regs *regs) 1937static inline void __smp_error_interrupt(struct pt_regs *regs)
1937{ 1938{
1938 u32 v0, v1; 1939 u32 v0, v1;
1939 u32 i = 0; 1940 u32 i = 0;
@@ -1948,8 +1949,6 @@ void smp_error_interrupt(struct pt_regs *regs)
1948 "Illegal register address", /* APIC Error Bit 7 */ 1949 "Illegal register address", /* APIC Error Bit 7 */
1949 }; 1950 };
1950 1951
1951 irq_enter();
1952 exit_idle();
1953 /* First tickle the hardware, only then report what went on. -- REW */ 1952 /* First tickle the hardware, only then report what went on. -- REW */
1954 v0 = apic_read(APIC_ESR); 1953 v0 = apic_read(APIC_ESR);
1955 apic_write(APIC_ESR, 0); 1954 apic_write(APIC_ESR, 0);
@@ -1970,7 +1969,13 @@ void smp_error_interrupt(struct pt_regs *regs)
1970 1969
1971 apic_printk(APIC_DEBUG, KERN_CONT "\n"); 1970 apic_printk(APIC_DEBUG, KERN_CONT "\n");
1972 1971
1973 irq_exit(); 1972}
1973
1974void smp_error_interrupt(struct pt_regs *regs)
1975{
1976 entering_irq();
1977 __smp_error_interrupt(regs);
1978 exiting_irq();
1974} 1979}
1975 1980
1976/** 1981/**
diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
index 47a1870279aa..f6b35f2a6a37 100644
--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
@@ -378,15 +378,17 @@ static void unexpected_thermal_interrupt(void)
378 378
379static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt; 379static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
380 380
381asmlinkage void smp_thermal_interrupt(struct pt_regs *regs) 381static inline void __smp_thermal_interrupt(void)
382{ 382{
383 irq_enter();
384 exit_idle();
385 inc_irq_stat(irq_thermal_count); 383 inc_irq_stat(irq_thermal_count);
386 smp_thermal_vector(); 384 smp_thermal_vector();
387 irq_exit(); 385}
388 /* Ack only at the end to avoid potential reentry */ 386
389 ack_APIC_irq(); 387asmlinkage void smp_thermal_interrupt(struct pt_regs *regs)
388{
389 entering_irq();
390 __smp_thermal_interrupt();
391 exiting_ack_irq();
390} 392}
391 393
392/* Thermal monitoring depends on APIC, ACPI and clock modulation */ 394/* Thermal monitoring depends on APIC, ACPI and clock modulation */
diff --git a/arch/x86/kernel/cpu/mcheck/threshold.c b/arch/x86/kernel/cpu/mcheck/threshold.c
index aa578cadb940..610cd98d6ef9 100644
--- a/arch/x86/kernel/cpu/mcheck/threshold.c
+++ b/arch/x86/kernel/cpu/mcheck/threshold.c
@@ -17,13 +17,15 @@ static void default_threshold_interrupt(void)
17 17
18void (*mce_threshold_vector)(void) = default_threshold_interrupt; 18void (*mce_threshold_vector)(void) = default_threshold_interrupt;
19 19
20asmlinkage void smp_threshold_interrupt(void) 20static inline void __smp_threshold_interrupt(void)
21{ 21{
22 irq_enter();
23 exit_idle();
24 inc_irq_stat(irq_threshold_count); 22 inc_irq_stat(irq_threshold_count);
25 mce_threshold_vector(); 23 mce_threshold_vector();
26 irq_exit(); 24}
27 /* Ack only at the end to avoid potential reentry */ 25
28 ack_APIC_irq(); 26asmlinkage void smp_threshold_interrupt(void)
27{
28 entering_irq();
29 __smp_threshold_interrupt();
30 exiting_ack_irq();
29} 31}
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index ac0631d8996f..e3b8df1754cc 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -204,23 +204,21 @@ unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
204/* 204/*
205 * Handler for X86_PLATFORM_IPI_VECTOR. 205 * Handler for X86_PLATFORM_IPI_VECTOR.
206 */ 206 */
207void smp_x86_platform_ipi(struct pt_regs *regs) 207void __smp_x86_platform_ipi(void)
208{ 208{
209 struct pt_regs *old_regs = set_irq_regs(regs);
210
211 ack_APIC_irq();
212
213 irq_enter();
214
215 exit_idle();
216
217 inc_irq_stat(x86_platform_ipis); 209 inc_irq_stat(x86_platform_ipis);
218 210
219 if (x86_platform_ipi_callback) 211 if (x86_platform_ipi_callback)
220 x86_platform_ipi_callback(); 212 x86_platform_ipi_callback();
213}
221 214
222 irq_exit(); 215void smp_x86_platform_ipi(struct pt_regs *regs)
216{
217 struct pt_regs *old_regs = set_irq_regs(regs);
223 218
219 entering_ack_irq();
220 __smp_x86_platform_ipi();
221 exiting_irq();
224 set_irq_regs(old_regs); 222 set_irq_regs(old_regs);
225} 223}
226 224
diff --git a/arch/x86/kernel/irq_work.c b/arch/x86/kernel/irq_work.c
index ca8f703a1e70..074d46fdbd1f 100644
--- a/arch/x86/kernel/irq_work.c
+++ b/arch/x86/kernel/irq_work.c
@@ -9,13 +9,23 @@
9#include <linux/hardirq.h> 9#include <linux/hardirq.h>
10#include <asm/apic.h> 10#include <asm/apic.h>
11 11
12void smp_irq_work_interrupt(struct pt_regs *regs) 12static inline void irq_work_entering_irq(void)
13{ 13{
14 irq_enter(); 14 irq_enter();
15 ack_APIC_irq(); 15 ack_APIC_irq();
16}
17
18static inline void __smp_irq_work_interrupt(void)
19{
16 inc_irq_stat(apic_irq_work_irqs); 20 inc_irq_stat(apic_irq_work_irqs);
17 irq_work_run(); 21 irq_work_run();
18 irq_exit(); 22}
23
24void smp_irq_work_interrupt(struct pt_regs *regs)
25{
26 irq_work_entering_irq();
27 __smp_irq_work_interrupt();
28 exiting_irq();
19} 29}
20 30
21void arch_irq_work_raise(void) 31void arch_irq_work_raise(void)
diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
index 48d2b7ded422..d85837574a79 100644
--- a/arch/x86/kernel/smp.c
+++ b/arch/x86/kernel/smp.c
@@ -249,32 +249,51 @@ finish:
249/* 249/*
250 * Reschedule call back. 250 * Reschedule call back.
251 */ 251 */
252void smp_reschedule_interrupt(struct pt_regs *regs) 252static inline void __smp_reschedule_interrupt(void)
253{ 253{
254 ack_APIC_irq();
255 inc_irq_stat(irq_resched_count); 254 inc_irq_stat(irq_resched_count);
256 scheduler_ipi(); 255 scheduler_ipi();
256}
257
258void smp_reschedule_interrupt(struct pt_regs *regs)
259{
260 ack_APIC_irq();
261 __smp_reschedule_interrupt();
257 /* 262 /*
258 * KVM uses this interrupt to force a cpu out of guest mode 263 * KVM uses this interrupt to force a cpu out of guest mode
259 */ 264 */
260} 265}
261 266
262void smp_call_function_interrupt(struct pt_regs *regs) 267static inline void call_function_entering_irq(void)
263{ 268{
264 ack_APIC_irq(); 269 ack_APIC_irq();
265 irq_enter(); 270 irq_enter();
271}
272
273static inline void __smp_call_function_interrupt(void)
274{
266 generic_smp_call_function_interrupt(); 275 generic_smp_call_function_interrupt();
267 inc_irq_stat(irq_call_count); 276 inc_irq_stat(irq_call_count);
268 irq_exit();
269} 277}
270 278
271void smp_call_function_single_interrupt(struct pt_regs *regs) 279void smp_call_function_interrupt(struct pt_regs *regs)
280{
281 call_function_entering_irq();
282 __smp_call_function_interrupt();
283 exiting_irq();
284}
285
286static inline void __smp_call_function_single_interrupt(void)
272{ 287{
273 ack_APIC_irq();
274 irq_enter();
275 generic_smp_call_function_single_interrupt(); 288 generic_smp_call_function_single_interrupt();
276 inc_irq_stat(irq_call_count); 289 inc_irq_stat(irq_call_count);
277 irq_exit(); 290}
291
292void smp_call_function_single_interrupt(struct pt_regs *regs)
293{
294 call_function_entering_irq();
295 __smp_call_function_single_interrupt();
296 exiting_irq();
278} 297}
279 298
280static int __init nonmi_ipi_setup(char *str) 299static int __init nonmi_ipi_setup(char *str)