aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-10-14 02:01:34 -0400
committerIngo Molnar <mingo@elte.hu>2010-10-18 13:58:50 -0400
commite360adbe29241a0194e10e20595360dd7b98a2b3 (patch)
treeef5fa5f50a895096bfb25bc11b25949603158238 /include/linux
parent8e5fc1a7320baf6076391607515dceb61319b36a (diff)
irq_work: Add generic hardirq context callbacks
Provide a mechanism that allows running code in IRQ context. It is most useful for NMI code that needs to interact with the rest of the system -- like wakeup a task to drain buffers. Perf currently has such a mechanism, so extract that and provide it as a generic feature, independent of perf so that others may also benefit. The IRQ context callback is generated through self-IPIs where possible, or on architectures like powerpc the decrementer (the built-in timer facility) is set to generate an interrupt immediately. Architectures that don't have anything like this get to do with a callback from the timer tick. These architectures can call irq_work_run() at the tail of any IRQ handlers that might enqueue such work (like the perf IRQ handler) to avoid undue latencies in processing the work. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Kyle McMartin <kyle@mcmartin.ca> Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> [ various fixes ] Signed-off-by: Huang Ying <ying.huang@intel.com> LKML-Reference: <1287036094.7768.291.camel@yhuang-dev> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/irq_work.h20
-rw-r--r--include/linux/perf_event.h11
2 files changed, 22 insertions, 9 deletions
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
new file mode 100644
index 000000000000..4fa09d4d0b71
--- /dev/null
+++ b/include/linux/irq_work.h
@@ -0,0 +1,20 @@
1#ifndef _LINUX_IRQ_WORK_H
2#define _LINUX_IRQ_WORK_H
3
4struct irq_work {
5 struct irq_work *next;
6 void (*func)(struct irq_work *);
7};
8
9static inline
10void init_irq_work(struct irq_work *entry, void (*func)(struct irq_work *))
11{
12 entry->next = NULL;
13 entry->func = func;
14}
15
16bool irq_work_queue(struct irq_work *entry);
17void irq_work_run(void);
18void irq_work_sync(struct irq_work *entry);
19
20#endif /* _LINUX_IRQ_WORK_H */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index a9227e985207..2ebfc9ae4755 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -486,6 +486,7 @@ struct perf_guest_info_callbacks {
486#include <linux/workqueue.h> 486#include <linux/workqueue.h>
487#include <linux/ftrace.h> 487#include <linux/ftrace.h>
488#include <linux/cpu.h> 488#include <linux/cpu.h>
489#include <linux/irq_work.h>
489#include <asm/atomic.h> 490#include <asm/atomic.h>
490#include <asm/local.h> 491#include <asm/local.h>
491 492
@@ -672,11 +673,6 @@ struct perf_buffer {
672 void *data_pages[0]; 673 void *data_pages[0];
673}; 674};
674 675
675struct perf_pending_entry {
676 struct perf_pending_entry *next;
677 void (*func)(struct perf_pending_entry *);
678};
679
680struct perf_sample_data; 676struct perf_sample_data;
681 677
682typedef void (*perf_overflow_handler_t)(struct perf_event *, int, 678typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
@@ -784,7 +780,7 @@ struct perf_event {
784 int pending_wakeup; 780 int pending_wakeup;
785 int pending_kill; 781 int pending_kill;
786 int pending_disable; 782 int pending_disable;
787 struct perf_pending_entry pending; 783 struct irq_work pending;
788 784
789 atomic_t event_limit; 785 atomic_t event_limit;
790 786
@@ -898,8 +894,6 @@ extern int perf_event_init_task(struct task_struct *child);
898extern void perf_event_exit_task(struct task_struct *child); 894extern void perf_event_exit_task(struct task_struct *child);
899extern void perf_event_free_task(struct task_struct *task); 895extern void perf_event_free_task(struct task_struct *task);
900extern void perf_event_delayed_put(struct task_struct *task); 896extern void perf_event_delayed_put(struct task_struct *task);
901extern void set_perf_event_pending(void);
902extern void perf_event_do_pending(void);
903extern void perf_event_print_debug(void); 897extern void perf_event_print_debug(void);
904extern void perf_pmu_disable(struct pmu *pmu); 898extern void perf_pmu_disable(struct pmu *pmu);
905extern void perf_pmu_enable(struct pmu *pmu); 899extern void perf_pmu_enable(struct pmu *pmu);
@@ -1078,7 +1072,6 @@ static inline int perf_event_init_task(struct task_struct *child) { return 0; }
1078static inline void perf_event_exit_task(struct task_struct *child) { } 1072static inline void perf_event_exit_task(struct task_struct *child) { }
1079static inline void perf_event_free_task(struct task_struct *task) { } 1073static inline void perf_event_free_task(struct task_struct *task) { }
1080static inline void perf_event_delayed_put(struct task_struct *task) { } 1074static inline void perf_event_delayed_put(struct task_struct *task) { }
1081static inline void perf_event_do_pending(void) { }
1082static inline void perf_event_print_debug(void) { } 1075static inline void perf_event_print_debug(void) { }
1083static inline int perf_event_task_disable(void) { return -EINVAL; } 1076static inline int perf_event_task_disable(void) { return -EINVAL; }
1084static inline int perf_event_task_enable(void) { return -EINVAL; } 1077static inline int perf_event_task_enable(void) { return -EINVAL; }