diff options
Diffstat (limited to 'drivers/block/rsxx/dma.c')
-rw-r--r-- | drivers/block/rsxx/dma.c | 119 |
1 files changed, 60 insertions, 59 deletions
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index bed32f16b084..fc88ba3e1bd2 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c | |||
@@ -221,6 +221,21 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card) | |||
221 | } | 221 | } |
222 | 222 | ||
223 | /*----------------- RSXX DMA Handling -------------------*/ | 223 | /*----------------- RSXX DMA Handling -------------------*/ |
224 | static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma) | ||
225 | { | ||
226 | if (dma->cmd != HW_CMD_BLK_DISCARD) { | ||
227 | if (!pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) { | ||
228 | pci_unmap_page(ctrl->card->dev, dma->dma_addr, | ||
229 | get_dma_size(dma), | ||
230 | dma->cmd == HW_CMD_BLK_WRITE ? | ||
231 | PCI_DMA_TODEVICE : | ||
232 | PCI_DMA_FROMDEVICE); | ||
233 | } | ||
234 | } | ||
235 | |||
236 | kmem_cache_free(rsxx_dma_pool, dma); | ||
237 | } | ||
238 | |||
224 | static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, | 239 | static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, |
225 | struct rsxx_dma *dma, | 240 | struct rsxx_dma *dma, |
226 | unsigned int status) | 241 | unsigned int status) |
@@ -232,21 +247,14 @@ static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, | |||
232 | if (status & DMA_CANCELLED) | 247 | if (status & DMA_CANCELLED) |
233 | ctrl->stats.dma_cancelled++; | 248 | ctrl->stats.dma_cancelled++; |
234 | 249 | ||
235 | if (dma->dma_addr) | ||
236 | pci_unmap_page(ctrl->card->dev, dma->dma_addr, | ||
237 | get_dma_size(dma), | ||
238 | dma->cmd == HW_CMD_BLK_WRITE ? | ||
239 | PCI_DMA_TODEVICE : | ||
240 | PCI_DMA_FROMDEVICE); | ||
241 | |||
242 | if (dma->cb) | 250 | if (dma->cb) |
243 | dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0); | 251 | dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0); |
244 | 252 | ||
245 | kmem_cache_free(rsxx_dma_pool, dma); | 253 | rsxx_free_dma(ctrl, dma); |
246 | } | 254 | } |
247 | 255 | ||
248 | int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, | 256 | int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, |
249 | struct list_head *q) | 257 | struct list_head *q, unsigned int done) |
250 | { | 258 | { |
251 | struct rsxx_dma *dma; | 259 | struct rsxx_dma *dma; |
252 | struct rsxx_dma *tmp; | 260 | struct rsxx_dma *tmp; |
@@ -254,7 +262,10 @@ int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, | |||
254 | 262 | ||
255 | list_for_each_entry_safe(dma, tmp, q, list) { | 263 | list_for_each_entry_safe(dma, tmp, q, list) { |
256 | list_del(&dma->list); | 264 | list_del(&dma->list); |
257 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | 265 | if (done & COMPLETE_DMA) |
266 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | ||
267 | else | ||
268 | rsxx_free_dma(ctrl, dma); | ||
258 | cnt++; | 269 | cnt++; |
259 | } | 270 | } |
260 | 271 | ||
@@ -370,7 +381,7 @@ static void dma_engine_stalled(unsigned long data) | |||
370 | 381 | ||
371 | /* Clean up the DMA queue */ | 382 | /* Clean up the DMA queue */ |
372 | spin_lock(&ctrl->queue_lock); | 383 | spin_lock(&ctrl->queue_lock); |
373 | cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue); | 384 | cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA); |
374 | spin_unlock(&ctrl->queue_lock); | 385 | spin_unlock(&ctrl->queue_lock); |
375 | 386 | ||
376 | cnt += rsxx_dma_cancel(ctrl); | 387 | cnt += rsxx_dma_cancel(ctrl); |
@@ -388,6 +399,7 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl) | |||
388 | int tag; | 399 | int tag; |
389 | int cmds_pending = 0; | 400 | int cmds_pending = 0; |
390 | struct hw_cmd *hw_cmd_buf; | 401 | struct hw_cmd *hw_cmd_buf; |
402 | int dir; | ||
391 | 403 | ||
392 | hw_cmd_buf = ctrl->cmd.buf; | 404 | hw_cmd_buf = ctrl->cmd.buf; |
393 | 405 | ||
@@ -424,6 +436,31 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl) | |||
424 | continue; | 436 | continue; |
425 | } | 437 | } |
426 | 438 | ||
439 | if (dma->cmd != HW_CMD_BLK_DISCARD) { | ||
440 | if (dma->cmd == HW_CMD_BLK_WRITE) | ||
441 | dir = PCI_DMA_TODEVICE; | ||
442 | else | ||
443 | dir = PCI_DMA_FROMDEVICE; | ||
444 | |||
445 | /* | ||
446 | * The function pci_map_page is placed here because we | ||
447 | * can only, by design, issue up to 255 commands to the | ||
448 | * hardware at one time per DMA channel. So the maximum | ||
449 | * amount of mapped memory would be 255 * 4 channels * | ||
450 | * 4096 Bytes which is less than 2GB, the limit of a x8 | ||
451 | * Non-HWWD PCIe slot. This way the pci_map_page | ||
452 | * function should never fail because of a lack of | ||
453 | * mappable memory. | ||
454 | */ | ||
455 | dma->dma_addr = pci_map_page(ctrl->card->dev, dma->page, | ||
456 | dma->pg_off, dma->sub_page.cnt << 9, dir); | ||
457 | if (pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) { | ||
458 | push_tracker(ctrl->trackers, tag); | ||
459 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); | ||
460 | continue; | ||
461 | } | ||
462 | } | ||
463 | |||
427 | set_tracker_dma(ctrl->trackers, tag, dma); | 464 | set_tracker_dma(ctrl->trackers, tag, dma); |
428 | hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd; | 465 | hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd; |
429 | hw_cmd_buf[ctrl->cmd.idx].tag = tag; | 466 | hw_cmd_buf[ctrl->cmd.idx].tag = tag; |
@@ -620,14 +657,6 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card, | |||
620 | if (!dma) | 657 | if (!dma) |
621 | return -ENOMEM; | 658 | return -ENOMEM; |
622 | 659 | ||
623 | dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len, | ||
624 | dir ? PCI_DMA_TODEVICE : | ||
625 | PCI_DMA_FROMDEVICE); | ||
626 | if (!dma->dma_addr) { | ||
627 | kmem_cache_free(rsxx_dma_pool, dma); | ||
628 | return -ENOMEM; | ||
629 | } | ||
630 | |||
631 | dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ; | 660 | dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ; |
632 | dma->laddr = laddr; | 661 | dma->laddr = laddr; |
633 | dma->sub_page.off = (dma_off >> 9); | 662 | dma->sub_page.off = (dma_off >> 9); |
@@ -736,11 +765,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, | |||
736 | return 0; | 765 | return 0; |
737 | 766 | ||
738 | bvec_err: | 767 | bvec_err: |
739 | for (i = 0; i < card->n_targets; i++) { | 768 | for (i = 0; i < card->n_targets; i++) |
740 | spin_lock_bh(&card->ctrl[i].queue_lock); | 769 | rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i], |
741 | rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i]); | 770 | FREE_DMA); |
742 | spin_unlock_bh(&card->ctrl[i].queue_lock); | ||
743 | } | ||
744 | 771 | ||
745 | return st; | 772 | return st; |
746 | } | 773 | } |
@@ -990,7 +1017,7 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card) | |||
990 | 1017 | ||
991 | /* Clean up the DMA queue */ | 1018 | /* Clean up the DMA queue */ |
992 | spin_lock_bh(&ctrl->queue_lock); | 1019 | spin_lock_bh(&ctrl->queue_lock); |
993 | rsxx_cleanup_dma_queue(ctrl, &ctrl->queue); | 1020 | rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA); |
994 | spin_unlock_bh(&ctrl->queue_lock); | 1021 | spin_unlock_bh(&ctrl->queue_lock); |
995 | 1022 | ||
996 | rsxx_dma_cancel(ctrl); | 1023 | rsxx_dma_cancel(ctrl); |
@@ -1032,6 +1059,14 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) | |||
1032 | else | 1059 | else |
1033 | card->ctrl[i].stats.reads_issued--; | 1060 | card->ctrl[i].stats.reads_issued--; |
1034 | 1061 | ||
1062 | if (dma->cmd != HW_CMD_BLK_DISCARD) { | ||
1063 | pci_unmap_page(card->dev, dma->dma_addr, | ||
1064 | get_dma_size(dma), | ||
1065 | dma->cmd == HW_CMD_BLK_WRITE ? | ||
1066 | PCI_DMA_TODEVICE : | ||
1067 | PCI_DMA_FROMDEVICE); | ||
1068 | } | ||
1069 | |||
1035 | list_add_tail(&dma->list, &issued_dmas[i]); | 1070 | list_add_tail(&dma->list, &issued_dmas[i]); |
1036 | push_tracker(card->ctrl[i].trackers, j); | 1071 | push_tracker(card->ctrl[i].trackers, j); |
1037 | cnt++; | 1072 | cnt++; |
@@ -1043,15 +1078,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) | |||
1043 | atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); | 1078 | atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); |
1044 | card->ctrl[i].stats.sw_q_depth += cnt; | 1079 | card->ctrl[i].stats.sw_q_depth += cnt; |
1045 | card->ctrl[i].e_cnt = 0; | 1080 | card->ctrl[i].e_cnt = 0; |
1046 | |||
1047 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { | ||
1048 | if (dma->dma_addr) | ||
1049 | pci_unmap_page(card->dev, dma->dma_addr, | ||
1050 | get_dma_size(dma), | ||
1051 | dma->cmd == HW_CMD_BLK_WRITE ? | ||
1052 | PCI_DMA_TODEVICE : | ||
1053 | PCI_DMA_FROMDEVICE); | ||
1054 | } | ||
1055 | spin_unlock_bh(&card->ctrl[i].queue_lock); | 1081 | spin_unlock_bh(&card->ctrl[i].queue_lock); |
1056 | } | 1082 | } |
1057 | 1083 | ||
@@ -1060,31 +1086,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) | |||
1060 | return 0; | 1086 | return 0; |
1061 | } | 1087 | } |
1062 | 1088 | ||
1063 | int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) | ||
1064 | { | ||
1065 | struct rsxx_dma *dma; | ||
1066 | int i; | ||
1067 | |||
1068 | for (i = 0; i < card->n_targets; i++) { | ||
1069 | spin_lock_bh(&card->ctrl[i].queue_lock); | ||
1070 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { | ||
1071 | dma->dma_addr = pci_map_page(card->dev, dma->page, | ||
1072 | dma->pg_off, get_dma_size(dma), | ||
1073 | dma->cmd == HW_CMD_BLK_WRITE ? | ||
1074 | PCI_DMA_TODEVICE : | ||
1075 | PCI_DMA_FROMDEVICE); | ||
1076 | if (!dma->dma_addr) { | ||
1077 | spin_unlock_bh(&card->ctrl[i].queue_lock); | ||
1078 | kmem_cache_free(rsxx_dma_pool, dma); | ||
1079 | return -ENOMEM; | ||
1080 | } | ||
1081 | } | ||
1082 | spin_unlock_bh(&card->ctrl[i].queue_lock); | ||
1083 | } | ||
1084 | |||
1085 | return 0; | ||
1086 | } | ||
1087 | |||
1088 | int rsxx_dma_init(void) | 1089 | int rsxx_dma_init(void) |
1089 | { | 1090 | { |
1090 | rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN); | 1091 | rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN); |