aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/null_blk.c102
-rw-r--r--drivers/block/skd_main.c4
2 files changed, 78 insertions, 28 deletions
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index f370fc13aea5..a2e69d26266d 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -1,4 +1,5 @@
1#include <linux/module.h> 1#include <linux/module.h>
2
2#include <linux/moduleparam.h> 3#include <linux/moduleparam.h>
3#include <linux/sched.h> 4#include <linux/sched.h>
4#include <linux/fs.h> 5#include <linux/fs.h>
@@ -65,7 +66,7 @@ enum {
65 NULL_Q_MQ = 2, 66 NULL_Q_MQ = 2,
66}; 67};
67 68
68static int submit_queues = 1; 69static int submit_queues;
69module_param(submit_queues, int, S_IRUGO); 70module_param(submit_queues, int, S_IRUGO);
70MODULE_PARM_DESC(submit_queues, "Number of submission queues"); 71MODULE_PARM_DESC(submit_queues, "Number of submission queues");
71 72
@@ -101,9 +102,9 @@ static int hw_queue_depth = 64;
101module_param(hw_queue_depth, int, S_IRUGO); 102module_param(hw_queue_depth, int, S_IRUGO);
102MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64"); 103MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
103 104
104static bool use_per_node_hctx = true; 105static bool use_per_node_hctx = false;
105module_param(use_per_node_hctx, bool, S_IRUGO); 106module_param(use_per_node_hctx, bool, S_IRUGO);
106MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true"); 107MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
107 108
108static void put_tag(struct nullb_queue *nq, unsigned int tag) 109static void put_tag(struct nullb_queue *nq, unsigned int tag)
109{ 110{
@@ -346,8 +347,37 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
346 347
347static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index) 348static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index)
348{ 349{
349 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, 350 int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes);
350 hctx_index); 351 int tip = (reg->nr_hw_queues % nr_online_nodes);
352 int node = 0, i, n;
353
354 /*
355 * Split submit queues evenly wrt to the number of nodes. If uneven,
356 * fill the first buckets with one extra, until the rest is filled with
357 * no extra.
358 */
359 for (i = 0, n = 1; i < hctx_index; i++, n++) {
360 if (n % b_size == 0) {
361 n = 0;
362 node++;
363
364 tip--;
365 if (!tip)
366 b_size = reg->nr_hw_queues / nr_online_nodes;
367 }
368 }
369
370 /*
371 * A node might not be online, therefore map the relative node id to the
372 * real node id.
373 */
374 for_each_online_node(n) {
375 if (!node)
376 break;
377 node--;
378 }
379
380 return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n);
351} 381}
352 382
353static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index) 383static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
@@ -355,16 +385,24 @@ static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
355 kfree(hctx); 385 kfree(hctx);
356} 386}
357 387
388static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
389{
390 BUG_ON(!nullb);
391 BUG_ON(!nq);
392
393 init_waitqueue_head(&nq->wait);
394 nq->queue_depth = nullb->queue_depth;
395}
396
358static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, 397static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
359 unsigned int index) 398 unsigned int index)
360{ 399{
361 struct nullb *nullb = data; 400 struct nullb *nullb = data;
362 struct nullb_queue *nq = &nullb->queues[index]; 401 struct nullb_queue *nq = &nullb->queues[index];
363 402
364 init_waitqueue_head(&nq->wait);
365 nq->queue_depth = nullb->queue_depth;
366 nullb->nr_queues++;
367 hctx->driver_data = nq; 403 hctx->driver_data = nq;
404 null_init_queue(nullb, nq);
405 nullb->nr_queues++;
368 406
369 return 0; 407 return 0;
370} 408}
@@ -417,13 +455,13 @@ static int setup_commands(struct nullb_queue *nq)
417 455
418 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL); 456 nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
419 if (!nq->cmds) 457 if (!nq->cmds)
420 return 1; 458 return -ENOMEM;
421 459
422 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG; 460 tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
423 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL); 461 nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
424 if (!nq->tag_map) { 462 if (!nq->tag_map) {
425 kfree(nq->cmds); 463 kfree(nq->cmds);
426 return 1; 464 return -ENOMEM;
427 } 465 }
428 466
429 for (i = 0; i < nq->queue_depth; i++) { 467 for (i = 0; i < nq->queue_depth; i++) {
@@ -454,33 +492,37 @@ static void cleanup_queues(struct nullb *nullb)
454 492
455static int setup_queues(struct nullb *nullb) 493static int setup_queues(struct nullb *nullb)
456{ 494{
457 struct nullb_queue *nq; 495 nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
458 int i; 496 GFP_KERNEL);
459
460 nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL);
461 if (!nullb->queues) 497 if (!nullb->queues)
462 return 1; 498 return -ENOMEM;
463 499
464 nullb->nr_queues = 0; 500 nullb->nr_queues = 0;
465 nullb->queue_depth = hw_queue_depth; 501 nullb->queue_depth = hw_queue_depth;
466 502
467 if (queue_mode == NULL_Q_MQ) 503 return 0;
468 return 0; 504}
505
506static int init_driver_queues(struct nullb *nullb)
507{
508 struct nullb_queue *nq;
509 int i, ret = 0;
469 510
470 for (i = 0; i < submit_queues; i++) { 511 for (i = 0; i < submit_queues; i++) {
471 nq = &nullb->queues[i]; 512 nq = &nullb->queues[i];
472 init_waitqueue_head(&nq->wait); 513
473 nq->queue_depth = hw_queue_depth; 514 null_init_queue(nullb, nq);
474 if (setup_commands(nq)) 515
475 break; 516 ret = setup_commands(nq);
517 if (ret)
518 goto err_queue;
476 nullb->nr_queues++; 519 nullb->nr_queues++;
477 } 520 }
478 521
479 if (i == submit_queues) 522 return 0;
480 return 0; 523err_queue:
481
482 cleanup_queues(nullb); 524 cleanup_queues(nullb);
483 return 1; 525 return ret;
484} 526}
485 527
486static int null_add_dev(void) 528static int null_add_dev(void)
@@ -518,11 +560,13 @@ static int null_add_dev(void)
518 } else if (queue_mode == NULL_Q_BIO) { 560 } else if (queue_mode == NULL_Q_BIO) {
519 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node); 561 nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
520 blk_queue_make_request(nullb->q, null_queue_bio); 562 blk_queue_make_request(nullb->q, null_queue_bio);
563 init_driver_queues(nullb);
521 } else { 564 } else {
522 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); 565 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
523 blk_queue_prep_rq(nullb->q, null_rq_prep_fn); 566 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
524 if (nullb->q) 567 if (nullb->q)
525 blk_queue_softirq_done(nullb->q, null_softirq_done_fn); 568 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
569 init_driver_queues(nullb);
526 } 570 }
527 571
528 if (!nullb->q) 572 if (!nullb->q)
@@ -579,7 +623,13 @@ static int __init null_init(void)
579 } 623 }
580#endif 624#endif
581 625
582 if (submit_queues > nr_cpu_ids) 626 if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
627 if (submit_queues < nr_online_nodes) {
628 pr_warn("null_blk: submit_queues param is set to %u.",
629 nr_online_nodes);
630 submit_queues = nr_online_nodes;
631 }
632 } else if (submit_queues > nr_cpu_ids)
583 submit_queues = nr_cpu_ids; 633 submit_queues = nr_cpu_ids;
584 else if (!submit_queues) 634 else if (!submit_queues)
585 submit_queues = 1; 635 submit_queues = 1;
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 9199c93be926..eb6e1e0e8db2 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -5269,7 +5269,7 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state)
5269 } 5269 }
5270} 5270}
5271 5271
5272const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state) 5272static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
5273{ 5273{
5274 switch (state) { 5274 switch (state) {
5275 case SKD_MSG_STATE_IDLE: 5275 case SKD_MSG_STATE_IDLE:
@@ -5281,7 +5281,7 @@ const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
5281 } 5281 }
5282} 5282}
5283 5283
5284const char *skd_skreq_state_to_str(enum skd_req_state state) 5284static const char *skd_skreq_state_to_str(enum skd_req_state state)
5285{ 5285{
5286 switch (state) { 5286 switch (state) {
5287 case SKD_REQ_STATE_IDLE: 5287 case SKD_REQ_STATE_IDLE: