aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-04-01 22:19:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-01 22:19:15 -0400
commit7a48837732f87a574ee3e1855927dc250117f565 (patch)
treef2e975a347d6d489e9f1932f9864fc978910def0 /kernel
parent1a0b6abaea78f73d9bc0a2f6df2d9e4c917cade1 (diff)
parent27fbf4e87c16bb3e40730890169a643a494b7c64 (diff)
Merge branch 'for-3.15/core' of git://git.kernel.dk/linux-block
Pull core block layer updates from Jens Axboe: "This is the pull request for the core block IO bits for the 3.15 kernel. It's a smaller round this time, it contains: - Various little blk-mq fixes and additions from Christoph and myself. - Cleanup of the IPI usage from the block layer, and associated helper code. From Frederic Weisbecker and Jan Kara. - Duplicate code cleanup in bio-integrity from Gu Zheng. This will give you a merge conflict, but that should be easy to resolve. - blk-mq notify spinlock fix for RT from Mike Galbraith. - A blktrace partial accounting bug fix from Roman Pen. - Missing REQ_SYNC detection fix for blk-mq from Shaohua Li" * 'for-3.15/core' of git://git.kernel.dk/linux-block: (25 commits) blk-mq: add REQ_SYNC early rt,blk,mq: Make blk_mq_cpu_notify_lock a raw spinlock blk-mq: support partial I/O completions blk-mq: merge blk_mq_insert_request and blk_mq_run_request blk-mq: remove blk_mq_alloc_rq blk-mq: don't dump CPU -> hw queue map on driver load blk-mq: fix wrong usage of hctx->state vs hctx->flags blk-mq: allow blk_mq_init_commands() to return failure block: remove old blk_iopoll_enabled variable blktrace: fix accounting of partially completed requests smp: Rename __smp_call_function_single() to smp_call_function_single_async() smp: Remove wait argument from __smp_call_function_single() watchdog: Simplify a little the IPI call smp: Move __smp_call_function_single() below its safe version smp: Consolidate the various smp_call_function_single() declensions smp: Teach __smp_call_function_single() to check for offline cpus smp: Remove unused list_head from csd smp: Iterate functions through llist_for_each_entry_safe() block: Stop abusing rq->csd.list in blk-softirq block: Remove useless IPI struct initialization ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sched/core.c2
-rw-r--r--kernel/smp.c139
-rw-r--r--kernel/sysctl.c12
-rw-r--r--kernel/trace/blktrace.c20
-rw-r--r--kernel/up.c6
-rw-r--r--kernel/watchdog.c3
6 files changed, 81 insertions, 101 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3c4d096544ce..9cae286824bb 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -432,7 +432,7 @@ void hrtick_start(struct rq *rq, u64 delay)
432 if (rq == this_rq()) { 432 if (rq == this_rq()) {
433 __hrtick_restart(rq); 433 __hrtick_restart(rq);
434 } else if (!rq->hrtick_csd_pending) { 434 } else if (!rq->hrtick_csd_pending) {
435 __smp_call_function_single(cpu_of(rq), &rq->hrtick_csd, 0); 435 smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
436 rq->hrtick_csd_pending = 1; 436 rq->hrtick_csd_pending = 1;
437 } 437 }
438} 438}
diff --git a/kernel/smp.c b/kernel/smp.c
index ffee35bef179..06d574e42c72 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -117,13 +117,43 @@ static void csd_unlock(struct call_single_data *csd)
117 csd->flags &= ~CSD_FLAG_LOCK; 117 csd->flags &= ~CSD_FLAG_LOCK;
118} 118}
119 119
120static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
121
120/* 122/*
121 * Insert a previously allocated call_single_data element 123 * Insert a previously allocated call_single_data element
122 * for execution on the given CPU. data must already have 124 * for execution on the given CPU. data must already have
123 * ->func, ->info, and ->flags set. 125 * ->func, ->info, and ->flags set.
124 */ 126 */
125static void generic_exec_single(int cpu, struct call_single_data *csd, int wait) 127static int generic_exec_single(int cpu, struct call_single_data *csd,
128 smp_call_func_t func, void *info, int wait)
126{ 129{
130 struct call_single_data csd_stack = { .flags = 0 };
131 unsigned long flags;
132
133
134 if (cpu == smp_processor_id()) {
135 local_irq_save(flags);
136 func(info);
137 local_irq_restore(flags);
138 return 0;
139 }
140
141
142 if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu))
143 return -ENXIO;
144
145
146 if (!csd) {
147 csd = &csd_stack;
148 if (!wait)
149 csd = &__get_cpu_var(csd_data);
150 }
151
152 csd_lock(csd);
153
154 csd->func = func;
155 csd->info = info;
156
127 if (wait) 157 if (wait)
128 csd->flags |= CSD_FLAG_WAIT; 158 csd->flags |= CSD_FLAG_WAIT;
129 159
@@ -143,6 +173,8 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
143 173
144 if (wait) 174 if (wait)
145 csd_lock_wait(csd); 175 csd_lock_wait(csd);
176
177 return 0;
146} 178}
147 179
148/* 180/*
@@ -151,7 +183,8 @@ static void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
151 */ 183 */
152void generic_smp_call_function_single_interrupt(void) 184void generic_smp_call_function_single_interrupt(void)
153{ 185{
154 struct llist_node *entry, *next; 186 struct llist_node *entry;
187 struct call_single_data *csd, *csd_next;
155 188
156 /* 189 /*
157 * Shouldn't receive this interrupt on a cpu that is not yet online. 190 * Shouldn't receive this interrupt on a cpu that is not yet online.
@@ -161,21 +194,12 @@ void generic_smp_call_function_single_interrupt(void)
161 entry = llist_del_all(&__get_cpu_var(call_single_queue)); 194 entry = llist_del_all(&__get_cpu_var(call_single_queue));
162 entry = llist_reverse_order(entry); 195 entry = llist_reverse_order(entry);
163 196
164 while (entry) { 197 llist_for_each_entry_safe(csd, csd_next, entry, llist) {
165 struct call_single_data *csd;
166
167 next = entry->next;
168
169 csd = llist_entry(entry, struct call_single_data, llist);
170 csd->func(csd->info); 198 csd->func(csd->info);
171 csd_unlock(csd); 199 csd_unlock(csd);
172
173 entry = next;
174 } 200 }
175} 201}
176 202
177static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
178
179/* 203/*
180 * smp_call_function_single - Run a function on a specific CPU 204 * smp_call_function_single - Run a function on a specific CPU
181 * @func: The function to run. This must be fast and non-blocking. 205 * @func: The function to run. This must be fast and non-blocking.
@@ -187,12 +211,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
187int smp_call_function_single(int cpu, smp_call_func_t func, void *info, 211int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
188 int wait) 212 int wait)
189{ 213{
190 struct call_single_data d = {
191 .flags = 0,
192 };
193 unsigned long flags;
194 int this_cpu; 214 int this_cpu;
195 int err = 0; 215 int err;
196 216
197 /* 217 /*
198 * prevent preemption and reschedule on another processor, 218 * prevent preemption and reschedule on another processor,
@@ -209,32 +229,41 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
209 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled() 229 WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
210 && !oops_in_progress); 230 && !oops_in_progress);
211 231
212 if (cpu == this_cpu) { 232 err = generic_exec_single(cpu, NULL, func, info, wait);
213 local_irq_save(flags);
214 func(info);
215 local_irq_restore(flags);
216 } else {
217 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
218 struct call_single_data *csd = &d;
219 233
220 if (!wait) 234 put_cpu();
221 csd = &__get_cpu_var(csd_data);
222 235
223 csd_lock(csd); 236 return err;
237}
238EXPORT_SYMBOL(smp_call_function_single);
224 239
225 csd->func = func; 240/**
226 csd->info = info; 241 * smp_call_function_single_async(): Run an asynchronous function on a
227 generic_exec_single(cpu, csd, wait); 242 * specific CPU.
228 } else { 243 * @cpu: The CPU to run on.
229 err = -ENXIO; /* CPU not online */ 244 * @csd: Pre-allocated and setup data structure
230 } 245 *
231 } 246 * Like smp_call_function_single(), but the call is asynchonous and
247 * can thus be done from contexts with disabled interrupts.
248 *
249 * The caller passes his own pre-allocated data structure
250 * (ie: embedded in an object) and is responsible for synchronizing it
251 * such that the IPIs performed on the @csd are strictly serialized.
252 *
253 * NOTE: Be careful, there is unfortunately no current debugging facility to
254 * validate the correctness of this serialization.
255 */
256int smp_call_function_single_async(int cpu, struct call_single_data *csd)
257{
258 int err = 0;
232 259
233 put_cpu(); 260 preempt_disable();
261 err = generic_exec_single(cpu, csd, csd->func, csd->info, 0);
262 preempt_enable();
234 263
235 return err; 264 return err;
236} 265}
237EXPORT_SYMBOL(smp_call_function_single); 266EXPORT_SYMBOL_GPL(smp_call_function_single_async);
238 267
239/* 268/*
240 * smp_call_function_any - Run a function on any of the given cpus 269 * smp_call_function_any - Run a function on any of the given cpus
@@ -280,44 +309,6 @@ call:
280EXPORT_SYMBOL_GPL(smp_call_function_any); 309EXPORT_SYMBOL_GPL(smp_call_function_any);
281 310
282/** 311/**
283 * __smp_call_function_single(): Run a function on a specific CPU
284 * @cpu: The CPU to run on.
285 * @data: Pre-allocated and setup data structure
286 * @wait: If true, wait until function has completed on specified CPU.
287 *
288 * Like smp_call_function_single(), but allow caller to pass in a
289 * pre-allocated data structure. Useful for embedding @data inside
290 * other structures, for instance.
291 */
292void __smp_call_function_single(int cpu, struct call_single_data *csd,
293 int wait)
294{
295 unsigned int this_cpu;
296 unsigned long flags;
297
298 this_cpu = get_cpu();
299 /*
300 * Can deadlock when called with interrupts disabled.
301 * We allow cpu's that are not yet online though, as no one else can
302 * send smp call function interrupt to this cpu and as such deadlocks
303 * can't happen.
304 */
305 WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled()
306 && !oops_in_progress);
307
308 if (cpu == this_cpu) {
309 local_irq_save(flags);
310 csd->func(csd->info);
311 local_irq_restore(flags);
312 } else {
313 csd_lock(csd);
314 generic_exec_single(cpu, csd, wait);
315 }
316 put_cpu();
317}
318EXPORT_SYMBOL_GPL(__smp_call_function_single);
319
320/**
321 * smp_call_function_many(): Run a function on a set of other CPUs. 312 * smp_call_function_many(): Run a function on a set of other CPUs.
322 * @mask: The set of cpus to run on (only runs on online subset). 313 * @mask: The set of cpus to run on (only runs on online subset).
323 * @func: The function to run. This must be fast and non-blocking. 314 * @func: The function to run. This must be fast and non-blocking.
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 7754ff16f334..09d2e2413605 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -112,9 +112,6 @@ extern int sysctl_nr_open_min, sysctl_nr_open_max;
112#ifndef CONFIG_MMU 112#ifndef CONFIG_MMU
113extern int sysctl_nr_trim_pages; 113extern int sysctl_nr_trim_pages;
114#endif 114#endif
115#ifdef CONFIG_BLOCK
116extern int blk_iopoll_enabled;
117#endif
118 115
119/* Constants used for minimum and maximum */ 116/* Constants used for minimum and maximum */
120#ifdef CONFIG_LOCKUP_DETECTOR 117#ifdef CONFIG_LOCKUP_DETECTOR
@@ -1087,15 +1084,6 @@ static struct ctl_table kern_table[] = {
1087 .proc_handler = proc_dointvec, 1084 .proc_handler = proc_dointvec,
1088 }, 1085 },
1089#endif 1086#endif
1090#ifdef CONFIG_BLOCK
1091 {
1092 .procname = "blk_iopoll",
1093 .data = &blk_iopoll_enabled,
1094 .maxlen = sizeof(int),
1095 .mode = 0644,
1096 .proc_handler = proc_dointvec,
1097 },
1098#endif
1099 { } 1087 { }
1100}; 1088};
1101 1089
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index b418cb0d7242..4f3a3c03eadb 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -702,6 +702,7 @@ void blk_trace_shutdown(struct request_queue *q)
702 * blk_add_trace_rq - Add a trace for a request oriented action 702 * blk_add_trace_rq - Add a trace for a request oriented action
703 * @q: queue the io is for 703 * @q: queue the io is for
704 * @rq: the source request 704 * @rq: the source request
705 * @nr_bytes: number of completed bytes
705 * @what: the action 706 * @what: the action
706 * 707 *
707 * Description: 708 * Description:
@@ -709,7 +710,7 @@ void blk_trace_shutdown(struct request_queue *q)
709 * 710 *
710 **/ 711 **/
711static void blk_add_trace_rq(struct request_queue *q, struct request *rq, 712static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
712 u32 what) 713 unsigned int nr_bytes, u32 what)
713{ 714{
714 struct blk_trace *bt = q->blk_trace; 715 struct blk_trace *bt = q->blk_trace;
715 716
@@ -718,11 +719,11 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
718 719
719 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) { 720 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
720 what |= BLK_TC_ACT(BLK_TC_PC); 721 what |= BLK_TC_ACT(BLK_TC_PC);
721 __blk_add_trace(bt, 0, blk_rq_bytes(rq), rq->cmd_flags, 722 __blk_add_trace(bt, 0, nr_bytes, rq->cmd_flags,
722 what, rq->errors, rq->cmd_len, rq->cmd); 723 what, rq->errors, rq->cmd_len, rq->cmd);
723 } else { 724 } else {
724 what |= BLK_TC_ACT(BLK_TC_FS); 725 what |= BLK_TC_ACT(BLK_TC_FS);
725 __blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq), 726 __blk_add_trace(bt, blk_rq_pos(rq), nr_bytes,
726 rq->cmd_flags, what, rq->errors, 0, NULL); 727 rq->cmd_flags, what, rq->errors, 0, NULL);
727 } 728 }
728} 729}
@@ -730,33 +731,34 @@ static void blk_add_trace_rq(struct request_queue *q, struct request *rq,
730static void blk_add_trace_rq_abort(void *ignore, 731static void blk_add_trace_rq_abort(void *ignore,
731 struct request_queue *q, struct request *rq) 732 struct request_queue *q, struct request *rq)
732{ 733{
733 blk_add_trace_rq(q, rq, BLK_TA_ABORT); 734 blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ABORT);
734} 735}
735 736
736static void blk_add_trace_rq_insert(void *ignore, 737static void blk_add_trace_rq_insert(void *ignore,
737 struct request_queue *q, struct request *rq) 738 struct request_queue *q, struct request *rq)
738{ 739{
739 blk_add_trace_rq(q, rq, BLK_TA_INSERT); 740 blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_INSERT);
740} 741}
741 742
742static void blk_add_trace_rq_issue(void *ignore, 743static void blk_add_trace_rq_issue(void *ignore,
743 struct request_queue *q, struct request *rq) 744 struct request_queue *q, struct request *rq)
744{ 745{
745 blk_add_trace_rq(q, rq, BLK_TA_ISSUE); 746 blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_ISSUE);
746} 747}
747 748
748static void blk_add_trace_rq_requeue(void *ignore, 749static void blk_add_trace_rq_requeue(void *ignore,
749 struct request_queue *q, 750 struct request_queue *q,
750 struct request *rq) 751 struct request *rq)
751{ 752{
752 blk_add_trace_rq(q, rq, BLK_TA_REQUEUE); 753 blk_add_trace_rq(q, rq, blk_rq_bytes(rq), BLK_TA_REQUEUE);
753} 754}
754 755
755static void blk_add_trace_rq_complete(void *ignore, 756static void blk_add_trace_rq_complete(void *ignore,
756 struct request_queue *q, 757 struct request_queue *q,
757 struct request *rq) 758 struct request *rq,
759 unsigned int nr_bytes)
758{ 760{
759 blk_add_trace_rq(q, rq, BLK_TA_COMPLETE); 761 blk_add_trace_rq(q, rq, nr_bytes, BLK_TA_COMPLETE);
760} 762}
761 763
762/** 764/**
diff --git a/kernel/up.c b/kernel/up.c
index 509403e3fbc6..1760bf3d1463 100644
--- a/kernel/up.c
+++ b/kernel/up.c
@@ -22,16 +22,16 @@ int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
22} 22}
23EXPORT_SYMBOL(smp_call_function_single); 23EXPORT_SYMBOL(smp_call_function_single);
24 24
25void __smp_call_function_single(int cpu, struct call_single_data *csd, 25int smp_call_function_single_async(int cpu, struct call_single_data *csd)
26 int wait)
27{ 26{
28 unsigned long flags; 27 unsigned long flags;
29 28
30 local_irq_save(flags); 29 local_irq_save(flags);
31 csd->func(csd->info); 30 csd->func(csd->info);
32 local_irq_restore(flags); 31 local_irq_restore(flags);
32 return 0;
33} 33}
34EXPORT_SYMBOL(__smp_call_function_single); 34EXPORT_SYMBOL(smp_call_function_single_async);
35 35
36int on_each_cpu(smp_call_func_t func, void *info, int wait) 36int on_each_cpu(smp_call_func_t func, void *info, int wait)
37{ 37{
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 4431610f049a..01c6f979486f 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -505,7 +505,6 @@ static void restart_watchdog_hrtimer(void *info)
505 505
506static void update_timers(int cpu) 506static void update_timers(int cpu)
507{ 507{
508 struct call_single_data data = {.func = restart_watchdog_hrtimer};
509 /* 508 /*
510 * Make sure that perf event counter will adopt to a new 509 * Make sure that perf event counter will adopt to a new
511 * sampling period. Updating the sampling period directly would 510 * sampling period. Updating the sampling period directly would
@@ -515,7 +514,7 @@ static void update_timers(int cpu)
515 * might be late already so we have to restart the timer as well. 514 * might be late already so we have to restart the timer as well.
516 */ 515 */
517 watchdog_nmi_disable(cpu); 516 watchdog_nmi_disable(cpu);
518 __smp_call_function_single(cpu, &data, 1); 517 smp_call_function_single(cpu, restart_watchdog_hrtimer, NULL, 1);
519 watchdog_nmi_enable(cpu); 518 watchdog_nmi_enable(cpu);
520} 519}
521 520