summaryrefslogtreecommitdiffstats
path: root/drivers/block/rsxx
diff options
context:
space:
mode:
authorPhilip J Kelleher <pjk1939@linux.vnet.ibm.com>2013-09-04 14:59:35 -0400
committerJens Axboe <axboe@kernel.dk>2013-11-08 11:10:28 -0500
commit1b21f5b2ad6047995b19b15024353a9fa64810f1 (patch)
treee0ecbc01dfe685322485b801540bd7a289283378 /drivers/block/rsxx
parente5feab229f199dadee91073fbef5b507046086fd (diff)
rsxx: Moving pci_map_page to prevent overflow.
The pci_map_page function has been moved into our issued workqueue to prevent an us running out of mappable addresses on non-HWWD PCIe x8 slots. The maximum amount that can possible be mapped at one time now is: 255 dmas X 4 dma channels X 4096 Bytes. Signed-off-by: Philip J Kelleher <pjk1939@linux.vnet.ibm.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers/block/rsxx')
-rw-r--r--drivers/block/rsxx/core.c5
-rw-r--r--drivers/block/rsxx/dma.c70
2 files changed, 28 insertions, 47 deletions
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index e740a650d546..a8de2eec6ff3 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -749,10 +749,6 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
749 749
750 card->eeh_state = 0; 750 card->eeh_state = 0;
751 751
752 st = rsxx_eeh_remap_dmas(card);
753 if (st)
754 goto failed_remap_dmas;
755
756 spin_lock_irqsave(&card->irq_lock, flags); 752 spin_lock_irqsave(&card->irq_lock, flags);
757 if (card->n_targets & RSXX_MAX_TARGETS) 753 if (card->n_targets & RSXX_MAX_TARGETS)
758 rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G); 754 rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
@@ -779,7 +775,6 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
779 return PCI_ERS_RESULT_RECOVERED; 775 return PCI_ERS_RESULT_RECOVERED;
780 776
781failed_hw_buffers_init: 777failed_hw_buffers_init:
782failed_remap_dmas:
783 for (i = 0; i < card->n_targets; i++) { 778 for (i = 0; i < card->n_targets; i++) {
784 if (card->ctrl[i].status.buf) 779 if (card->ctrl[i].status.buf)
785 pci_free_consistent(card->dev, 780 pci_free_consistent(card->dev,
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index 71d1ca2a1444..34fd1018c8e5 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -397,6 +397,7 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
397 int tag; 397 int tag;
398 int cmds_pending = 0; 398 int cmds_pending = 0;
399 struct hw_cmd *hw_cmd_buf; 399 struct hw_cmd *hw_cmd_buf;
400 int dir;
400 401
401 hw_cmd_buf = ctrl->cmd.buf; 402 hw_cmd_buf = ctrl->cmd.buf;
402 403
@@ -433,6 +434,28 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
433 continue; 434 continue;
434 } 435 }
435 436
437 if (dma->cmd == HW_CMD_BLK_WRITE)
438 dir = PCI_DMA_TODEVICE;
439 else
440 dir = PCI_DMA_FROMDEVICE;
441
442 /*
443 * The function pci_map_page is placed here because we can
444 * only, by design, issue up to 255 commands to the hardware
445 * at one time per DMA channel. So the maximum amount of mapped
446 * memory would be 255 * 4 channels * 4096 Bytes which is less
447 * than 2GB, the limit of a x8 Non-HWWD PCIe slot. This way the
448 * pci_map_page function should never fail because of a
449 * lack of mappable memory.
450 */
451 dma->dma_addr = pci_map_page(ctrl->card->dev, dma->page,
452 dma->pg_off, dma->sub_page.cnt << 9, dir);
453 if (pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
454 push_tracker(ctrl->trackers, tag);
455 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
456 continue;
457 }
458
436 set_tracker_dma(ctrl->trackers, tag, dma); 459 set_tracker_dma(ctrl->trackers, tag, dma);
437 hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd; 460 hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd;
438 hw_cmd_buf[ctrl->cmd.idx].tag = tag; 461 hw_cmd_buf[ctrl->cmd.idx].tag = tag;
@@ -629,14 +652,6 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card,
629 if (!dma) 652 if (!dma)
630 return -ENOMEM; 653 return -ENOMEM;
631 654
632 dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len,
633 dir ? PCI_DMA_TODEVICE :
634 PCI_DMA_FROMDEVICE);
635 if (pci_dma_mapping_error(card->dev, dma->dma_addr)) {
636 kmem_cache_free(rsxx_dma_pool, dma);
637 return -ENOMEM;
638 }
639
640 dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ; 655 dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
641 dma->laddr = laddr; 656 dma->laddr = laddr;
642 dma->sub_page.off = (dma_off >> 9); 657 dma->sub_page.off = (dma_off >> 9);
@@ -1039,6 +1054,11 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
1039 else 1054 else
1040 card->ctrl[i].stats.reads_issued--; 1055 card->ctrl[i].stats.reads_issued--;
1041 1056
1057 pci_unmap_page(card->dev, dma->dma_addr,
1058 get_dma_size(dma),
1059 dma->cmd == HW_CMD_BLK_WRITE ?
1060 PCI_DMA_TODEVICE :
1061 PCI_DMA_FROMDEVICE);
1042 list_add_tail(&dma->list, &issued_dmas[i]); 1062 list_add_tail(&dma->list, &issued_dmas[i]);
1043 push_tracker(card->ctrl[i].trackers, j); 1063 push_tracker(card->ctrl[i].trackers, j);
1044 cnt++; 1064 cnt++;
@@ -1050,15 +1070,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
1050 atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); 1070 atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
1051 card->ctrl[i].stats.sw_q_depth += cnt; 1071 card->ctrl[i].stats.sw_q_depth += cnt;
1052 card->ctrl[i].e_cnt = 0; 1072 card->ctrl[i].e_cnt = 0;
1053
1054 list_for_each_entry(dma, &card->ctrl[i].queue, list) {
1055 if (!pci_dma_mapping_error(card->dev, dma->dma_addr))
1056 pci_unmap_page(card->dev, dma->dma_addr,
1057 get_dma_size(dma),
1058 dma->cmd == HW_CMD_BLK_WRITE ?
1059 PCI_DMA_TODEVICE :
1060 PCI_DMA_FROMDEVICE);
1061 }
1062 spin_unlock_bh(&card->ctrl[i].queue_lock); 1073 spin_unlock_bh(&card->ctrl[i].queue_lock);
1063 } 1074 }
1064 1075
@@ -1067,31 +1078,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
1067 return 0; 1078 return 0;
1068} 1079}
1069 1080
1070int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
1071{
1072 struct rsxx_dma *dma;
1073 int i;
1074
1075 for (i = 0; i < card->n_targets; i++) {
1076 spin_lock_bh(&card->ctrl[i].queue_lock);
1077 list_for_each_entry(dma, &card->ctrl[i].queue, list) {
1078 dma->dma_addr = pci_map_page(card->dev, dma->page,
1079 dma->pg_off, get_dma_size(dma),
1080 dma->cmd == HW_CMD_BLK_WRITE ?
1081 PCI_DMA_TODEVICE :
1082 PCI_DMA_FROMDEVICE);
1083 if (pci_dma_mapping_error(card->dev, dma->dma_addr)) {
1084 spin_unlock_bh(&card->ctrl[i].queue_lock);
1085 kmem_cache_free(rsxx_dma_pool, dma);
1086 return -ENOMEM;
1087 }
1088 }
1089 spin_unlock_bh(&card->ctrl[i].queue_lock);
1090 }
1091
1092 return 0;
1093}
1094
1095int rsxx_dma_init(void) 1081int rsxx_dma_init(void)
1096{ 1082{
1097 rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN); 1083 rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);