summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-mq-cpu.c31
-rw-r--r--block/blk-mq.c68
-rw-r--r--block/blk-mq.h1
3 files changed, 11 insertions, 89 deletions
diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
index 0045ace9bdf0..20576e3476e9 100644
--- a/block/blk-mq-cpu.c
+++ b/block/blk-mq-cpu.c
@@ -28,32 +28,6 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
28 return NOTIFY_OK; 28 return NOTIFY_OK;
29} 29}
30 30
31static void blk_mq_cpu_notify(void *data, unsigned long action,
32 unsigned int cpu)
33{
34 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
35 /*
36 * If the CPU goes away, ensure that we run any pending
37 * completions.
38 */
39 struct llist_node *node;
40 struct request *rq;
41
42 local_irq_disable();
43
44 node = llist_del_all(&per_cpu(ipi_lists, cpu));
45 while (node) {
46 struct llist_node *next = node->next;
47
48 rq = llist_entry(node, struct request, ll_list);
49 __blk_mq_end_io(rq, rq->errors);
50 node = next;
51 }
52
53 local_irq_enable();
54 }
55}
56
57static struct notifier_block __cpuinitdata blk_mq_main_cpu_notifier = { 31static struct notifier_block __cpuinitdata blk_mq_main_cpu_notifier = {
58 .notifier_call = blk_mq_main_cpu_notify, 32 .notifier_call = blk_mq_main_cpu_notify,
59}; 33};
@@ -82,12 +56,7 @@ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
82 notifier->data = data; 56 notifier->data = data;
83} 57}
84 58
85static struct blk_mq_cpu_notifier __cpuinitdata cpu_notifier = {
86 .notify = blk_mq_cpu_notify,
87};
88
89void __init blk_mq_cpu_init(void) 59void __init blk_mq_cpu_init(void)
90{ 60{
91 register_hotcpu_notifier(&blk_mq_main_cpu_notifier); 61 register_hotcpu_notifier(&blk_mq_main_cpu_notifier);
92 blk_mq_register_cpu_notifier(&cpu_notifier);
93} 62}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 473ce4039060..68734f87f1da 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -27,8 +27,6 @@ static LIST_HEAD(all_q_list);
27 27
28static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx); 28static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx);
29 29
30DEFINE_PER_CPU(struct llist_head, ipi_lists);
31
32static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, 30static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
33 unsigned int cpu) 31 unsigned int cpu)
34{ 32{
@@ -339,55 +337,12 @@ void __blk_mq_end_io(struct request *rq, int error)
339 blk_mq_complete_request(rq, error); 337 blk_mq_complete_request(rq, error);
340} 338}
341 339
342#if defined(CONFIG_SMP) 340static void blk_mq_end_io_remote(void *data)
343
344/*
345 * Called with interrupts disabled.
346 */
347static void ipi_end_io(void *data)
348{ 341{
349 struct llist_head *list = &per_cpu(ipi_lists, smp_processor_id()); 342 struct request *rq = data;
350 struct llist_node *entry, *next;
351 struct request *rq;
352
353 entry = llist_del_all(list);
354
355 while (entry) {
356 next = entry->next;
357 rq = llist_entry(entry, struct request, ll_list);
358 __blk_mq_end_io(rq, rq->errors);
359 entry = next;
360 }
361}
362
363static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
364 struct request *rq, const int error)
365{
366 struct call_single_data *data = &rq->csd;
367
368 rq->errors = error;
369 rq->ll_list.next = NULL;
370
371 /*
372 * If the list is non-empty, an existing IPI must already
373 * be "in flight". If that is the case, we need not schedule
374 * a new one.
375 */
376 if (llist_add(&rq->ll_list, &per_cpu(ipi_lists, ctx->cpu))) {
377 data->func = ipi_end_io;
378 data->flags = 0;
379 __smp_call_function_single(ctx->cpu, data, 0);
380 }
381 343
382 return true; 344 __blk_mq_end_io(rq, rq->errors);
383} 345}
384#else /* CONFIG_SMP */
385static int ipi_remote_cpu(struct blk_mq_ctx *ctx, const int cpu,
386 struct request *rq, const int error)
387{
388 return false;
389}
390#endif
391 346
392/* 347/*
393 * End IO on this request on a multiqueue enabled driver. We'll either do 348 * End IO on this request on a multiqueue enabled driver. We'll either do
@@ -403,11 +358,15 @@ void blk_mq_end_io(struct request *rq, int error)
403 return __blk_mq_end_io(rq, error); 358 return __blk_mq_end_io(rq, error);
404 359
405 cpu = get_cpu(); 360 cpu = get_cpu();
406 361 if (cpu != ctx->cpu && cpu_online(ctx->cpu)) {
407 if (cpu == ctx->cpu || !cpu_online(ctx->cpu) || 362 rq->errors = error;
408 !ipi_remote_cpu(ctx, cpu, rq, error)) 363 rq->csd.func = blk_mq_end_io_remote;
364 rq->csd.info = rq;
365 rq->csd.flags = 0;
366 __smp_call_function_single(ctx->cpu, &rq->csd, 0);
367 } else {
409 __blk_mq_end_io(rq, error); 368 __blk_mq_end_io(rq, error);
410 369 }
411 put_cpu(); 370 put_cpu();
412} 371}
413EXPORT_SYMBOL(blk_mq_end_io); 372EXPORT_SYMBOL(blk_mq_end_io);
@@ -1506,11 +1465,6 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1506 1465
1507static int __init blk_mq_init(void) 1466static int __init blk_mq_init(void)
1508{ 1467{
1509 unsigned int i;
1510
1511 for_each_possible_cpu(i)
1512 init_llist_head(&per_cpu(ipi_lists, i));
1513
1514 blk_mq_cpu_init(); 1468 blk_mq_cpu_init();
1515 1469
1516 /* Must be called after percpu_counter_hotcpu_callback() */ 1470 /* Must be called after percpu_counter_hotcpu_callback() */
diff --git a/block/blk-mq.h b/block/blk-mq.h
index e151a2f4f171..5c3917984b00 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -40,7 +40,6 @@ void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
40void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier); 40void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
41void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier); 41void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier);
42void blk_mq_cpu_init(void); 42void blk_mq_cpu_init(void);
43DECLARE_PER_CPU(struct llist_head, ipi_lists);
44 43
45/* 44/*
46 * CPU -> queue mappings 45 * CPU -> queue mappings