diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/irq/manage.c | 2 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 7 | ||||
-rw-r--r-- | kernel/irq_work.c | 150 | ||||
-rw-r--r-- | kernel/printk.c | 36 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 7 | ||||
-rw-r--r-- | kernel/timer.c | 1 |
6 files changed, 133 insertions, 70 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index e49a288fa479..88e7bed62711 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -1524,6 +1524,7 @@ void enable_percpu_irq(unsigned int irq, unsigned int type) | |||
1524 | out: | 1524 | out: |
1525 | irq_put_desc_unlock(desc, flags); | 1525 | irq_put_desc_unlock(desc, flags); |
1526 | } | 1526 | } |
1527 | EXPORT_SYMBOL_GPL(enable_percpu_irq); | ||
1527 | 1528 | ||
1528 | void disable_percpu_irq(unsigned int irq) | 1529 | void disable_percpu_irq(unsigned int irq) |
1529 | { | 1530 | { |
@@ -1537,6 +1538,7 @@ void disable_percpu_irq(unsigned int irq) | |||
1537 | irq_percpu_disable(desc, cpu); | 1538 | irq_percpu_disable(desc, cpu); |
1538 | irq_put_desc_unlock(desc, flags); | 1539 | irq_put_desc_unlock(desc, flags); |
1539 | } | 1540 | } |
1541 | EXPORT_SYMBOL_GPL(disable_percpu_irq); | ||
1540 | 1542 | ||
1541 | /* | 1543 | /* |
1542 | * Internal function to unregister a percpu irqaction. | 1544 | * Internal function to unregister a percpu irqaction. |
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 611cd6003c45..7b5f012bde9d 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -80,13 +80,11 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) | |||
80 | 80 | ||
81 | /* | 81 | /* |
82 | * All handlers must agree on IRQF_SHARED, so we test just the | 82 | * All handlers must agree on IRQF_SHARED, so we test just the |
83 | * first. Check for action->next as well. | 83 | * first. |
84 | */ | 84 | */ |
85 | action = desc->action; | 85 | action = desc->action; |
86 | if (!action || !(action->flags & IRQF_SHARED) || | 86 | if (!action || !(action->flags & IRQF_SHARED) || |
87 | (action->flags & __IRQF_TIMER) || | 87 | (action->flags & __IRQF_TIMER)) |
88 | (action->handler(irq, action->dev_id) == IRQ_HANDLED) || | ||
89 | !action->next) | ||
90 | goto out; | 88 | goto out; |
91 | 89 | ||
92 | /* Already running on another processor */ | 90 | /* Already running on another processor */ |
@@ -104,6 +102,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) | |||
104 | do { | 102 | do { |
105 | if (handle_irq_event(desc) == IRQ_HANDLED) | 103 | if (handle_irq_event(desc) == IRQ_HANDLED) |
106 | ret = IRQ_HANDLED; | 104 | ret = IRQ_HANDLED; |
105 | /* Make sure that there is still a valid action */ | ||
107 | action = desc->action; | 106 | action = desc->action; |
108 | } while ((desc->istate & IRQS_PENDING) && action); | 107 | } while ((desc->istate & IRQS_PENDING) && action); |
109 | desc->istate &= ~IRQS_POLL_INPROGRESS; | 108 | desc->istate &= ~IRQS_POLL_INPROGRESS; |
diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 1588e3b2871b..55fcce6065cf 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c | |||
@@ -12,37 +12,36 @@ | |||
12 | #include <linux/percpu.h> | 12 | #include <linux/percpu.h> |
13 | #include <linux/hardirq.h> | 13 | #include <linux/hardirq.h> |
14 | #include <linux/irqflags.h> | 14 | #include <linux/irqflags.h> |
15 | #include <linux/sched.h> | ||
16 | #include <linux/tick.h> | ||
17 | #include <linux/cpu.h> | ||
18 | #include <linux/notifier.h> | ||
15 | #include <asm/processor.h> | 19 | #include <asm/processor.h> |
16 | 20 | ||
17 | /* | ||
18 | * An entry can be in one of four states: | ||
19 | * | ||
20 | * free NULL, 0 -> {claimed} : free to be used | ||
21 | * claimed NULL, 3 -> {pending} : claimed to be enqueued | ||
22 | * pending next, 3 -> {busy} : queued, pending callback | ||
23 | * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed | ||
24 | */ | ||
25 | |||
26 | #define IRQ_WORK_PENDING 1UL | ||
27 | #define IRQ_WORK_BUSY 2UL | ||
28 | #define IRQ_WORK_FLAGS 3UL | ||
29 | 21 | ||
30 | static DEFINE_PER_CPU(struct llist_head, irq_work_list); | 22 | static DEFINE_PER_CPU(struct llist_head, irq_work_list); |
23 | static DEFINE_PER_CPU(int, irq_work_raised); | ||
31 | 24 | ||
32 | /* | 25 | /* |
33 | * Claim the entry so that no one else will poke at it. | 26 | * Claim the entry so that no one else will poke at it. |
34 | */ | 27 | */ |
35 | static bool irq_work_claim(struct irq_work *work) | 28 | static bool irq_work_claim(struct irq_work *work) |
36 | { | 29 | { |
37 | unsigned long flags, nflags; | 30 | unsigned long flags, oflags, nflags; |
38 | 31 | ||
32 | /* | ||
33 | * Start with our best wish as a premise but only trust any | ||
34 | * flag value after cmpxchg() result. | ||
35 | */ | ||
36 | flags = work->flags & ~IRQ_WORK_PENDING; | ||
39 | for (;;) { | 37 | for (;;) { |
40 | flags = work->flags; | ||
41 | if (flags & IRQ_WORK_PENDING) | ||
42 | return false; | ||
43 | nflags = flags | IRQ_WORK_FLAGS; | 38 | nflags = flags | IRQ_WORK_FLAGS; |
44 | if (cmpxchg(&work->flags, flags, nflags) == flags) | 39 | oflags = cmpxchg(&work->flags, flags, nflags); |
40 | if (oflags == flags) | ||
45 | break; | 41 | break; |
42 | if (oflags & IRQ_WORK_PENDING) | ||
43 | return false; | ||
44 | flags = oflags; | ||
46 | cpu_relax(); | 45 | cpu_relax(); |
47 | } | 46 | } |
48 | 47 | ||
@@ -57,57 +56,69 @@ void __weak arch_irq_work_raise(void) | |||
57 | } | 56 | } |
58 | 57 | ||
59 | /* | 58 | /* |
60 | * Queue the entry and raise the IPI if needed. | 59 | * Enqueue the irq_work @entry unless it's already pending |
60 | * somewhere. | ||
61 | * | ||
62 | * Can be re-enqueued while the callback is still in progress. | ||
61 | */ | 63 | */ |
62 | static void __irq_work_queue(struct irq_work *work) | 64 | void irq_work_queue(struct irq_work *work) |
63 | { | 65 | { |
64 | bool empty; | 66 | /* Only queue if not already pending */ |
67 | if (!irq_work_claim(work)) | ||
68 | return; | ||
65 | 69 | ||
70 | /* Queue the entry and raise the IPI if needed. */ | ||
66 | preempt_disable(); | 71 | preempt_disable(); |
67 | 72 | ||
68 | empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); | 73 | llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); |
69 | /* The list was empty, raise self-interrupt to start processing. */ | 74 | |
70 | if (empty) | 75 | /* |
71 | arch_irq_work_raise(); | 76 | * If the work is not "lazy" or the tick is stopped, raise the irq |
77 | * work interrupt (if supported by the arch), otherwise, just wait | ||
78 | * for the next tick. | ||
79 | */ | ||
80 | if (!(work->flags & IRQ_WORK_LAZY) || tick_nohz_tick_stopped()) { | ||
81 | if (!this_cpu_cmpxchg(irq_work_raised, 0, 1)) | ||
82 | arch_irq_work_raise(); | ||
83 | } | ||
72 | 84 | ||
73 | preempt_enable(); | 85 | preempt_enable(); |
74 | } | 86 | } |
87 | EXPORT_SYMBOL_GPL(irq_work_queue); | ||
75 | 88 | ||
76 | /* | 89 | bool irq_work_needs_cpu(void) |
77 | * Enqueue the irq_work @entry, returns true on success, failure when the | ||
78 | * @entry was already enqueued by someone else. | ||
79 | * | ||
80 | * Can be re-enqueued while the callback is still in progress. | ||
81 | */ | ||
82 | bool irq_work_queue(struct irq_work *work) | ||
83 | { | 90 | { |
84 | if (!irq_work_claim(work)) { | 91 | struct llist_head *this_list; |
85 | /* | 92 | |
86 | * Already enqueued, can't do! | 93 | this_list = &__get_cpu_var(irq_work_list); |
87 | */ | 94 | if (llist_empty(this_list)) |
88 | return false; | 95 | return false; |
89 | } | ||
90 | 96 | ||
91 | __irq_work_queue(work); | 97 | /* All work should have been flushed before going offline */ |
98 | WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); | ||
99 | |||
92 | return true; | 100 | return true; |
93 | } | 101 | } |
94 | EXPORT_SYMBOL_GPL(irq_work_queue); | ||
95 | 102 | ||
96 | /* | 103 | static void __irq_work_run(void) |
97 | * Run the irq_work entries on this cpu. Requires to be ran from hardirq | ||
98 | * context with local IRQs disabled. | ||
99 | */ | ||
100 | void irq_work_run(void) | ||
101 | { | 104 | { |
105 | unsigned long flags; | ||
102 | struct irq_work *work; | 106 | struct irq_work *work; |
103 | struct llist_head *this_list; | 107 | struct llist_head *this_list; |
104 | struct llist_node *llnode; | 108 | struct llist_node *llnode; |
105 | 109 | ||
110 | |||
111 | /* | ||
112 | * Reset the "raised" state right before we check the list because | ||
113 | * an NMI may enqueue after we find the list empty from the runner. | ||
114 | */ | ||
115 | __this_cpu_write(irq_work_raised, 0); | ||
116 | barrier(); | ||
117 | |||
106 | this_list = &__get_cpu_var(irq_work_list); | 118 | this_list = &__get_cpu_var(irq_work_list); |
107 | if (llist_empty(this_list)) | 119 | if (llist_empty(this_list)) |
108 | return; | 120 | return; |
109 | 121 | ||
110 | BUG_ON(!in_irq()); | ||
111 | BUG_ON(!irqs_disabled()); | 122 | BUG_ON(!irqs_disabled()); |
112 | 123 | ||
113 | llnode = llist_del_all(this_list); | 124 | llnode = llist_del_all(this_list); |
@@ -119,16 +130,31 @@ void irq_work_run(void) | |||
119 | /* | 130 | /* |
120 | * Clear the PENDING bit, after this point the @work | 131 | * Clear the PENDING bit, after this point the @work |
121 | * can be re-used. | 132 | * can be re-used. |
133 | * Make it immediately visible so that other CPUs trying | ||
134 | * to claim that work don't rely on us to handle their data | ||
135 | * while we are in the middle of the func. | ||
122 | */ | 136 | */ |
123 | work->flags = IRQ_WORK_BUSY; | 137 | flags = work->flags & ~IRQ_WORK_PENDING; |
138 | xchg(&work->flags, flags); | ||
139 | |||
124 | work->func(work); | 140 | work->func(work); |
125 | /* | 141 | /* |
126 | * Clear the BUSY bit and return to the free state if | 142 | * Clear the BUSY bit and return to the free state if |
127 | * no-one else claimed it meanwhile. | 143 | * no-one else claimed it meanwhile. |
128 | */ | 144 | */ |
129 | (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0); | 145 | (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY); |
130 | } | 146 | } |
131 | } | 147 | } |
148 | |||
149 | /* | ||
150 | * Run the irq_work entries on this cpu. Requires to be ran from hardirq | ||
151 | * context with local IRQs disabled. | ||
152 | */ | ||
153 | void irq_work_run(void) | ||
154 | { | ||
155 | BUG_ON(!in_irq()); | ||
156 | __irq_work_run(); | ||
157 | } | ||
132 | EXPORT_SYMBOL_GPL(irq_work_run); | 158 | EXPORT_SYMBOL_GPL(irq_work_run); |
133 | 159 | ||
134 | /* | 160 | /* |
@@ -143,3 +169,35 @@ void irq_work_sync(struct irq_work *work) | |||
143 | cpu_relax(); | 169 | cpu_relax(); |
144 | } | 170 | } |
145 | EXPORT_SYMBOL_GPL(irq_work_sync); | 171 | EXPORT_SYMBOL_GPL(irq_work_sync); |
172 | |||
173 | #ifdef CONFIG_HOTPLUG_CPU | ||
174 | static int irq_work_cpu_notify(struct notifier_block *self, | ||
175 | unsigned long action, void *hcpu) | ||
176 | { | ||
177 | long cpu = (long)hcpu; | ||
178 | |||
179 | switch (action) { | ||
180 | case CPU_DYING: | ||
181 | /* Called from stop_machine */ | ||
182 | if (WARN_ON_ONCE(cpu != smp_processor_id())) | ||
183 | break; | ||
184 | __irq_work_run(); | ||
185 | break; | ||
186 | default: | ||
187 | break; | ||
188 | } | ||
189 | return NOTIFY_OK; | ||
190 | } | ||
191 | |||
192 | static struct notifier_block cpu_notify; | ||
193 | |||
194 | static __init int irq_work_init_cpu_notifier(void) | ||
195 | { | ||
196 | cpu_notify.notifier_call = irq_work_cpu_notify; | ||
197 | cpu_notify.priority = 0; | ||
198 | register_cpu_notifier(&cpu_notify); | ||
199 | return 0; | ||
200 | } | ||
201 | device_initcall(irq_work_init_cpu_notifier); | ||
202 | |||
203 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
diff --git a/kernel/printk.c b/kernel/printk.c index 267ce780abe8..f24633afa46a 100644 --- a/kernel/printk.c +++ b/kernel/printk.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include <linux/notifier.h> | 42 | #include <linux/notifier.h> |
43 | #include <linux/rculist.h> | 43 | #include <linux/rculist.h> |
44 | #include <linux/poll.h> | 44 | #include <linux/poll.h> |
45 | #include <linux/irq_work.h> | ||
45 | 46 | ||
46 | #include <asm/uaccess.h> | 47 | #include <asm/uaccess.h> |
47 | 48 | ||
@@ -1959,30 +1960,32 @@ int is_console_locked(void) | |||
1959 | static DEFINE_PER_CPU(int, printk_pending); | 1960 | static DEFINE_PER_CPU(int, printk_pending); |
1960 | static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf); | 1961 | static DEFINE_PER_CPU(char [PRINTK_BUF_SIZE], printk_sched_buf); |
1961 | 1962 | ||
1962 | void printk_tick(void) | 1963 | static void wake_up_klogd_work_func(struct irq_work *irq_work) |
1963 | { | 1964 | { |
1964 | if (__this_cpu_read(printk_pending)) { | 1965 | int pending = __this_cpu_xchg(printk_pending, 0); |
1965 | int pending = __this_cpu_xchg(printk_pending, 0); | 1966 | |
1966 | if (pending & PRINTK_PENDING_SCHED) { | 1967 | if (pending & PRINTK_PENDING_SCHED) { |
1967 | char *buf = __get_cpu_var(printk_sched_buf); | 1968 | char *buf = __get_cpu_var(printk_sched_buf); |
1968 | printk(KERN_WARNING "[sched_delayed] %s", buf); | 1969 | printk(KERN_WARNING "[sched_delayed] %s", buf); |
1969 | } | ||
1970 | if (pending & PRINTK_PENDING_WAKEUP) | ||
1971 | wake_up_interruptible(&log_wait); | ||
1972 | } | 1970 | } |
1973 | } | ||
1974 | 1971 | ||
1975 | int printk_needs_cpu(int cpu) | 1972 | if (pending & PRINTK_PENDING_WAKEUP) |
1976 | { | 1973 | wake_up_interruptible(&log_wait); |
1977 | if (cpu_is_offline(cpu)) | ||
1978 | printk_tick(); | ||
1979 | return __this_cpu_read(printk_pending); | ||
1980 | } | 1974 | } |
1981 | 1975 | ||
1976 | static DEFINE_PER_CPU(struct irq_work, wake_up_klogd_work) = { | ||
1977 | .func = wake_up_klogd_work_func, | ||
1978 | .flags = IRQ_WORK_LAZY, | ||
1979 | }; | ||
1980 | |||
1982 | void wake_up_klogd(void) | 1981 | void wake_up_klogd(void) |
1983 | { | 1982 | { |
1984 | if (waitqueue_active(&log_wait)) | 1983 | preempt_disable(); |
1984 | if (waitqueue_active(&log_wait)) { | ||
1985 | this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); | 1985 | this_cpu_or(printk_pending, PRINTK_PENDING_WAKEUP); |
1986 | irq_work_queue(&__get_cpu_var(wake_up_klogd_work)); | ||
1987 | } | ||
1988 | preempt_enable(); | ||
1986 | } | 1989 | } |
1987 | 1990 | ||
1988 | static void console_cont_flush(char *text, size_t size) | 1991 | static void console_cont_flush(char *text, size_t size) |
@@ -2462,6 +2465,7 @@ int printk_sched(const char *fmt, ...) | |||
2462 | va_end(args); | 2465 | va_end(args); |
2463 | 2466 | ||
2464 | __this_cpu_or(printk_pending, PRINTK_PENDING_SCHED); | 2467 | __this_cpu_or(printk_pending, PRINTK_PENDING_SCHED); |
2468 | irq_work_queue(&__get_cpu_var(wake_up_klogd_work)); | ||
2465 | local_irq_restore(flags); | 2469 | local_irq_restore(flags); |
2466 | 2470 | ||
2467 | return r; | 2471 | return r; |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index d58e552d9fd1..fb8e5e469d1c 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/profile.h> | 20 | #include <linux/profile.h> |
21 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
22 | #include <linux/module.h> | 22 | #include <linux/module.h> |
23 | #include <linux/irq_work.h> | ||
23 | 24 | ||
24 | #include <asm/irq_regs.h> | 25 | #include <asm/irq_regs.h> |
25 | 26 | ||
@@ -28,7 +29,7 @@ | |||
28 | /* | 29 | /* |
29 | * Per cpu nohz control structure | 30 | * Per cpu nohz control structure |
30 | */ | 31 | */ |
31 | static DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); | 32 | DEFINE_PER_CPU(struct tick_sched, tick_cpu_sched); |
32 | 33 | ||
33 | /* | 34 | /* |
34 | * The time, when the last jiffy update happened. Protected by jiffies_lock. | 35 | * The time, when the last jiffy update happened. Protected by jiffies_lock. |
@@ -331,8 +332,8 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts, | |||
331 | time_delta = timekeeping_max_deferment(); | 332 | time_delta = timekeeping_max_deferment(); |
332 | } while (read_seqretry(&jiffies_lock, seq)); | 333 | } while (read_seqretry(&jiffies_lock, seq)); |
333 | 334 | ||
334 | if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) || | 335 | if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || |
335 | arch_needs_cpu(cpu)) { | 336 | arch_needs_cpu(cpu) || irq_work_needs_cpu()) { |
336 | next_jiffies = last_jiffies + 1; | 337 | next_jiffies = last_jiffies + 1; |
337 | delta_jiffies = 1; | 338 | delta_jiffies = 1; |
338 | } else { | 339 | } else { |
diff --git a/kernel/timer.c b/kernel/timer.c index 367d00858482..ff3b5165737b 100644 --- a/kernel/timer.c +++ b/kernel/timer.c | |||
@@ -1351,7 +1351,6 @@ void update_process_times(int user_tick) | |||
1351 | account_process_tick(p, user_tick); | 1351 | account_process_tick(p, user_tick); |
1352 | run_local_timers(); | 1352 | run_local_timers(); |
1353 | rcu_check_callbacks(cpu, user_tick); | 1353 | rcu_check_callbacks(cpu, user_tick); |
1354 | printk_tick(); | ||
1355 | #ifdef CONFIG_IRQ_WORK | 1354 | #ifdef CONFIG_IRQ_WORK |
1356 | if (in_irq()) | 1355 | if (in_irq()) |
1357 | irq_work_run(); | 1356 | irq_work_run(); |