aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/interrupt.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/interrupt.h')
-rw-r--r--include/linux/interrupt.h80
1 files changed, 56 insertions, 24 deletions
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 75f3f00ac1e5..9f6580a184c9 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -52,16 +52,20 @@
52 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished. 52 * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
53 * Used by threaded interrupts which need to keep the 53 * Used by threaded interrupts which need to keep the
54 * irq line disabled until the threaded handler has been run. 54 * irq line disabled until the threaded handler has been run.
55 * IRQF_NODELAY - Interrupt is not force threaded on -rt
55 */ 56 */
56#define IRQF_DISABLED 0x00000020 57#define IRQF_DISABLED 0x00000020
57#define IRQF_SAMPLE_RANDOM 0x00000040 58#define IRQF_SAMPLE_RANDOM 0x00000040
58#define IRQF_SHARED 0x00000080 59#define IRQF_SHARED 0x00000080
59#define IRQF_PROBE_SHARED 0x00000100 60#define IRQF_PROBE_SHARED 0x00000100
60#define IRQF_TIMER 0x00000200 61#define __IRQF_TIMER 0x00000200
61#define IRQF_PERCPU 0x00000400 62#define IRQF_PERCPU 0x00000400
62#define IRQF_NOBALANCING 0x00000800 63#define IRQF_NOBALANCING 0x00000800
63#define IRQF_IRQPOLL 0x00001000 64#define IRQF_IRQPOLL 0x00001000
64#define IRQF_ONESHOT 0x00002000 65#define IRQF_ONESHOT 0x00002000
66#define IRQF_NODELAY 0x00004000
67
68#define IRQF_TIMER (__IRQF_TIMER | IRQF_NODELAY)
65 69
66/* 70/*
67 * Bits used by threaded handlers: 71 * Bits used by threaded handlers:
@@ -91,6 +95,7 @@ typedef irqreturn_t (*irq_handler_t)(int, void *);
91 * @thread_fn: interupt handler function for threaded interrupts 95 * @thread_fn: interupt handler function for threaded interrupts
92 * @thread: thread pointer for threaded interrupts 96 * @thread: thread pointer for threaded interrupts
93 * @thread_flags: flags related to @thread 97 * @thread_flags: flags related to @thread
98 * @thread_mask: bit mask to account for forced threads
94 */ 99 */
95struct irqaction { 100struct irqaction {
96 irq_handler_t handler; 101 irq_handler_t handler;
@@ -103,6 +108,7 @@ struct irqaction {
103 irq_handler_t thread_fn; 108 irq_handler_t thread_fn;
104 struct task_struct *thread; 109 struct task_struct *thread;
105 unsigned long thread_flags; 110 unsigned long thread_flags;
111 unsigned long thread_mask;
106}; 112};
107 113
108extern irqreturn_t no_action(int cpl, void *dev_id); 114extern irqreturn_t no_action(int cpl, void *dev_id);
@@ -179,7 +185,7 @@ extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
179#ifdef CONFIG_LOCKDEP 185#ifdef CONFIG_LOCKDEP
180# define local_irq_enable_in_hardirq() do { } while (0) 186# define local_irq_enable_in_hardirq() do { } while (0)
181#else 187#else
182# define local_irq_enable_in_hardirq() local_irq_enable() 188# define local_irq_enable_in_hardirq() local_irq_enable_nort()
183#endif 189#endif
184 190
185extern void disable_irq_nosync(unsigned int irq); 191extern void disable_irq_nosync(unsigned int irq);
@@ -319,6 +325,7 @@ static inline int disable_irq_wake(unsigned int irq)
319 325
320#ifndef __ARCH_SET_SOFTIRQ_PENDING 326#ifndef __ARCH_SET_SOFTIRQ_PENDING
321#define set_softirq_pending(x) (local_softirq_pending() = (x)) 327#define set_softirq_pending(x) (local_softirq_pending() = (x))
328// FIXME: PREEMPT_RT: set_bit()?
322#define or_softirq_pending(x) (local_softirq_pending() |= (x)) 329#define or_softirq_pending(x) (local_softirq_pending() |= (x))
323#endif 330#endif
324 331
@@ -350,7 +357,6 @@ enum
350 SCHED_SOFTIRQ, 357 SCHED_SOFTIRQ,
351 HRTIMER_SOFTIRQ, 358 HRTIMER_SOFTIRQ,
352 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */ 359 RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
353
354 NR_SOFTIRQS 360 NR_SOFTIRQS
355}; 361};
356 362
@@ -368,14 +374,23 @@ struct softirq_action
368 void (*action)(struct softirq_action *); 374 void (*action)(struct softirq_action *);
369}; 375};
370 376
377#ifdef CONFIG_PREEMPT_HARDIRQS
378# define __raise_softirq_irqoff(nr) raise_softirq_irqoff(nr)
379# define __do_raise_softirq_irqoff(nr) \
380 do { or_softirq_pending(1UL << (nr)); } while (0)
381#else
382# define __raise_softirq_irqoff(nr) \
383 do { or_softirq_pending(1UL << (nr)); } while (0)
384# define __do_raise_softirq_irqoff(nr) __raise_softirq_irqoff(nr)
385#endif
386
371asmlinkage void do_softirq(void); 387asmlinkage void do_softirq(void);
372asmlinkage void __do_softirq(void); 388asmlinkage void __do_softirq(void);
373extern void open_softirq(int nr, void (*action)(struct softirq_action *)); 389extern void open_softirq(int nr, void (*action)(struct softirq_action *));
374extern void softirq_init(void); 390extern void softirq_init(void);
375#define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
376extern void raise_softirq_irqoff(unsigned int nr); 391extern void raise_softirq_irqoff(unsigned int nr);
377extern void raise_softirq(unsigned int nr); 392extern void raise_softirq(unsigned int nr);
378extern void wakeup_softirqd(void); 393extern void softirq_check_pending_idle(void);
379 394
380/* This is the worklist that queues up per-cpu softirq work. 395/* This is the worklist that queues up per-cpu softirq work.
381 * 396 *
@@ -410,8 +425,9 @@ extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
410 to be executed on some cpu at least once after this. 425 to be executed on some cpu at least once after this.
411 * If the tasklet is already scheduled, but its excecution is still not 426 * If the tasklet is already scheduled, but its excecution is still not
412 started, it will be executed only once. 427 started, it will be executed only once.
413 * If this tasklet is already running on another CPU (or schedule is called 428 * If this tasklet is already running on another CPU, it is rescheduled
414 from tasklet itself), it is rescheduled for later. 429 for later.
430 * Schedule must not be called from the tasklet itself (a lockup occurs)
415 * Tasklet is strictly serialized wrt itself, but not 431 * Tasklet is strictly serialized wrt itself, but not
416 wrt another tasklets. If client needs some intertask synchronization, 432 wrt another tasklets. If client needs some intertask synchronization,
417 he makes it with spinlocks. 433 he makes it with spinlocks.
@@ -436,27 +452,36 @@ struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
436enum 452enum
437{ 453{
438 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ 454 TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */
439 TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ 455 TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */
456 TASKLET_STATE_PENDING /* Tasklet is pending */
440}; 457};
441 458
442#ifdef CONFIG_SMP 459#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED)
460#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN)
461#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING)
462
463#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT)
443static inline int tasklet_trylock(struct tasklet_struct *t) 464static inline int tasklet_trylock(struct tasklet_struct *t)
444{ 465{
445 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); 466 return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
446} 467}
447 468
469static inline int tasklet_tryunlock(struct tasklet_struct *t)
470{
471 return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
472}
473
448static inline void tasklet_unlock(struct tasklet_struct *t) 474static inline void tasklet_unlock(struct tasklet_struct *t)
449{ 475{
450 smp_mb__before_clear_bit(); 476 smp_mb__before_clear_bit();
451 clear_bit(TASKLET_STATE_RUN, &(t)->state); 477 clear_bit(TASKLET_STATE_RUN, &(t)->state);
452} 478}
453 479
454static inline void tasklet_unlock_wait(struct tasklet_struct *t) 480extern void tasklet_unlock_wait(struct tasklet_struct *t);
455{ 481
456 while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
457}
458#else 482#else
459#define tasklet_trylock(t) 1 483#define tasklet_trylock(t) 1
484#define tasklet_tryunlock(t) 1
460#define tasklet_unlock_wait(t) do { } while (0) 485#define tasklet_unlock_wait(t) do { } while (0)
461#define tasklet_unlock(t) do { } while (0) 486#define tasklet_unlock(t) do { } while (0)
462#endif 487#endif
@@ -505,22 +530,14 @@ static inline void tasklet_disable(struct tasklet_struct *t)
505 smp_mb(); 530 smp_mb();
506} 531}
507 532
508static inline void tasklet_enable(struct tasklet_struct *t) 533extern void tasklet_enable(struct tasklet_struct *t);
509{ 534extern void tasklet_hi_enable(struct tasklet_struct *t);
510 smp_mb__before_atomic_dec();
511 atomic_dec(&t->count);
512}
513
514static inline void tasklet_hi_enable(struct tasklet_struct *t)
515{
516 smp_mb__before_atomic_dec();
517 atomic_dec(&t->count);
518}
519 535
520extern void tasklet_kill(struct tasklet_struct *t); 536extern void tasklet_kill(struct tasklet_struct *t);
521extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); 537extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
522extern void tasklet_init(struct tasklet_struct *t, 538extern void tasklet_init(struct tasklet_struct *t,
523 void (*func)(unsigned long), unsigned long data); 539 void (*func)(unsigned long), unsigned long data);
540extern void takeover_tasklets(unsigned int cpu);
524 541
525struct tasklet_hrtimer { 542struct tasklet_hrtimer {
526 struct hrtimer timer; 543 struct hrtimer timer;
@@ -613,4 +630,19 @@ extern int arch_probe_nr_irqs(void);
613extern int arch_early_irq_init(void); 630extern int arch_early_irq_init(void);
614extern int arch_init_chip_data(struct irq_desc *desc, int node); 631extern int arch_init_chip_data(struct irq_desc *desc, int node);
615 632
633/*
634 * local_irq* variants depending on RT/!RT
635 */
636#ifdef CONFIG_PREEMPT_RT
637# define local_irq_disable_nort() do { } while (0)
638# define local_irq_enable_nort() do { } while (0)
639# define local_irq_save_nort(flags) do { local_save_flags(flags); } while (0)
640# define local_irq_restore_nort(flags) do { (void)(flags); } while (0)
641#else
642# define local_irq_disable_nort() local_irq_disable()
643# define local_irq_enable_nort() local_irq_enable()
644# define local_irq_save_nort(flags) local_irq_save(flags)
645# define local_irq_restore_nort(flags) local_irq_restore(flags)
646#endif
647
616#endif 648#endif