diff options
| author | Sam Bradshaw <sbradshaw@micron.com> | 2014-06-06 15:28:48 -0400 |
|---|---|---|
| committer | Jens Axboe <axboe@fb.com> | 2014-06-06 15:28:48 -0400 |
| commit | f45c40a92d2c6915a0e88ff8a947095be2ba1c8e (patch) | |
| tree | 20ca1854aefee878ca5606b98cfdfe920f511b47 | |
| parent | f6be4fb4bcb396fc3b1c134b7863351972de081f (diff) | |
mtip32xx: minor performance enhancements
This patch adds the following:
1) Compiler hinting in the fast path.
2) A prefetch of port->flags to eliminate moderate cpu stalling later
in mtip_hw_submit_io().
3) Eliminate a redundant rq_data_dir().
4) Reorder members of driver_data to eliminate false cacheline sharing
between irq_workers_active and unal_qdepth.
With some workload and topology configurations, I'm seeing ~1.5%
throughput improvement in small block random read benchmarks as well
as improved latency std. dev.
Signed-off-by: Sam Bradshaw <sbradshaw@micron.com>
Add include of <linux/prefetch.h>
Signed-off-by: Jens Axboe <axboe@fb.com>
| -rw-r--r-- | drivers/block/mtip32xx/mtip32xx.c | 15 | ||||
| -rw-r--r-- | drivers/block/mtip32xx/mtip32xx.h | 8 |
2 files changed, 13 insertions, 10 deletions
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 74abd49fabdc..295f3afbbef5 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c | |||
| @@ -39,6 +39,7 @@ | |||
| 39 | #include <../drivers/ata/ahci.h> | 39 | #include <../drivers/ata/ahci.h> |
| 40 | #include <linux/export.h> | 40 | #include <linux/export.h> |
| 41 | #include <linux/debugfs.h> | 41 | #include <linux/debugfs.h> |
| 42 | #include <linux/prefetch.h> | ||
| 42 | #include "mtip32xx.h" | 43 | #include "mtip32xx.h" |
| 43 | 44 | ||
| 44 | #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32) | 45 | #define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32) |
| @@ -2380,6 +2381,8 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, | |||
| 2380 | /* Map the scatter list for DMA access */ | 2381 | /* Map the scatter list for DMA access */ |
| 2381 | nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); | 2382 | nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir); |
| 2382 | 2383 | ||
| 2384 | prefetch(&port->flags); | ||
| 2385 | |||
| 2383 | command->scatter_ents = nents; | 2386 | command->scatter_ents = nents; |
| 2384 | 2387 | ||
| 2385 | /* | 2388 | /* |
| @@ -2392,7 +2395,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, | |||
| 2392 | fis = command->command; | 2395 | fis = command->command; |
| 2393 | fis->type = 0x27; | 2396 | fis->type = 0x27; |
| 2394 | fis->opts = 1 << 7; | 2397 | fis->opts = 1 << 7; |
| 2395 | if (rq_data_dir(rq) == READ) | 2398 | if (dma_dir == DMA_FROM_DEVICE) |
| 2396 | fis->command = ATA_CMD_FPDMA_READ; | 2399 | fis->command = ATA_CMD_FPDMA_READ; |
| 2397 | else | 2400 | else |
| 2398 | fis->command = ATA_CMD_FPDMA_WRITE; | 2401 | fis->command = ATA_CMD_FPDMA_WRITE; |
| @@ -2412,7 +2415,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, | |||
| 2412 | fis->res3 = 0; | 2415 | fis->res3 = 0; |
| 2413 | fill_command_sg(dd, command, nents); | 2416 | fill_command_sg(dd, command, nents); |
| 2414 | 2417 | ||
| 2415 | if (command->unaligned) | 2418 | if (unlikely(command->unaligned)) |
| 2416 | fis->device |= 1 << 7; | 2419 | fis->device |= 1 << 7; |
| 2417 | 2420 | ||
| 2418 | /* Populate the command header */ | 2421 | /* Populate the command header */ |
| @@ -2433,7 +2436,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, struct request *rq, | |||
| 2433 | * To prevent this command from being issued | 2436 | * To prevent this command from being issued |
| 2434 | * if an internal command is in progress or error handling is active. | 2437 | * if an internal command is in progress or error handling is active. |
| 2435 | */ | 2438 | */ |
| 2436 | if (port->flags & MTIP_PF_PAUSE_IO) { | 2439 | if (unlikely(port->flags & MTIP_PF_PAUSE_IO)) { |
| 2437 | set_bit(rq->tag, port->cmds_to_issue); | 2440 | set_bit(rq->tag, port->cmds_to_issue); |
| 2438 | set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); | 2441 | set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags); |
| 2439 | return; | 2442 | return; |
| @@ -3754,7 +3757,7 @@ static bool mtip_check_unal_depth(struct blk_mq_hw_ctx *hctx, | |||
| 3754 | struct driver_data *dd = hctx->queue->queuedata; | 3757 | struct driver_data *dd = hctx->queue->queuedata; |
| 3755 | struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); | 3758 | struct mtip_cmd *cmd = blk_mq_rq_to_pdu(rq); |
| 3756 | 3759 | ||
| 3757 | if (!dd->unal_qdepth || rq_data_dir(rq) == READ) | 3760 | if (rq_data_dir(rq) == READ || !dd->unal_qdepth) |
| 3758 | return false; | 3761 | return false; |
| 3759 | 3762 | ||
| 3760 | /* | 3763 | /* |
| @@ -3776,11 +3779,11 @@ static int mtip_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq) | |||
| 3776 | { | 3779 | { |
| 3777 | int ret; | 3780 | int ret; |
| 3778 | 3781 | ||
| 3779 | if (mtip_check_unal_depth(hctx, rq)) | 3782 | if (unlikely(mtip_check_unal_depth(hctx, rq))) |
| 3780 | return BLK_MQ_RQ_QUEUE_BUSY; | 3783 | return BLK_MQ_RQ_QUEUE_BUSY; |
| 3781 | 3784 | ||
| 3782 | ret = mtip_submit_request(hctx, rq); | 3785 | ret = mtip_submit_request(hctx, rq); |
| 3783 | if (!ret) | 3786 | if (likely(!ret)) |
| 3784 | return BLK_MQ_RQ_QUEUE_OK; | 3787 | return BLK_MQ_RQ_QUEUE_OK; |
| 3785 | 3788 | ||
| 3786 | rq->errors = ret; | 3789 | rq->errors = ret; |
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index 4b9b554234bc..ba1b31ee22ec 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h | |||
| @@ -493,19 +493,19 @@ struct driver_data { | |||
| 493 | 493 | ||
| 494 | struct workqueue_struct *isr_workq; | 494 | struct workqueue_struct *isr_workq; |
| 495 | 495 | ||
| 496 | struct mtip_work work[MTIP_MAX_SLOT_GROUPS]; | ||
| 497 | |||
| 498 | atomic_t irq_workers_active; | 496 | atomic_t irq_workers_active; |
| 499 | 497 | ||
| 498 | struct mtip_work work[MTIP_MAX_SLOT_GROUPS]; | ||
| 499 | |||
| 500 | int isr_binding; | 500 | int isr_binding; |
| 501 | 501 | ||
| 502 | struct block_device *bdev; | 502 | struct block_device *bdev; |
| 503 | 503 | ||
| 504 | int unal_qdepth; /* qdepth of unaligned IO queue */ | ||
| 505 | |||
| 506 | struct list_head online_list; /* linkage for online list */ | 504 | struct list_head online_list; /* linkage for online list */ |
| 507 | 505 | ||
| 508 | struct list_head remove_list; /* linkage for removing list */ | 506 | struct list_head remove_list; /* linkage for removing list */ |
| 507 | |||
| 508 | int unal_qdepth; /* qdepth of unaligned IO queue */ | ||
| 509 | }; | 509 | }; |
| 510 | 510 | ||
| 511 | #endif | 511 | #endif |
