diff options
Diffstat (limited to 'drivers/block/rsxx/dma.c')
-rw-r--r-- | drivers/block/rsxx/dma.c | 216 |
1 files changed, 158 insertions, 58 deletions
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index efd75b55a670..60d344d002ec 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c | |||
@@ -81,9 +81,6 @@ enum rsxx_hw_status { | |||
81 | HW_STATUS_FAULT = 0x08, | 81 | HW_STATUS_FAULT = 0x08, |
82 | }; | 82 | }; |
83 | 83 | ||
84 | #define STATUS_BUFFER_SIZE8 4096 | ||
85 | #define COMMAND_BUFFER_SIZE8 4096 | ||
86 | |||
87 | static struct kmem_cache *rsxx_dma_pool; | 84 | static struct kmem_cache *rsxx_dma_pool; |
88 | 85 | ||
89 | struct dma_tracker { | 86 | struct dma_tracker { |
@@ -122,7 +119,7 @@ static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8) | |||
122 | return tgt; | 119 | return tgt; |
123 | } | 120 | } |
124 | 121 | ||
125 | static void rsxx_dma_queue_reset(struct rsxx_cardinfo *card) | 122 | void rsxx_dma_queue_reset(struct rsxx_cardinfo *card) |
126 | { | 123 | { |
127 | /* Reset all DMA Command/Status Queues */ | 124 | /* Reset all DMA Command/Status Queues */ |
128 | iowrite32(DMA_QUEUE_RESET, card->regmap + RESET); | 125 | iowrite32(DMA_QUEUE_RESET, card->regmap + RESET); |
@@ -210,7 +207,8 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card) | |||
210 | u32 q_depth = 0; | 207 | u32 q_depth = 0; |
211 | u32 intr_coal; | 208 | u32 intr_coal; |
212 | 209 | ||
213 | if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE) | 210 | if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE || |
211 | unlikely(card->eeh_state)) | ||
214 | return; | 212 | return; |
215 | 213 | ||
216 | for (i = 0; i < card->n_targets; i++) | 214 | for (i = 0; i < card->n_targets; i++) |
@@ -223,31 +221,26 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card) | |||
223 | } | 221 | } |
224 | 222 | ||
225 | /*----------------- RSXX DMA Handling -------------------*/ | 223 | /*----------------- RSXX DMA Handling -------------------*/ |
226 | static void rsxx_complete_dma(struct rsxx_cardinfo *card, | 224 | static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, |
227 | struct rsxx_dma *dma, | 225 | struct rsxx_dma *dma, |
228 | unsigned int status) | 226 | unsigned int status) |
229 | { | 227 | { |
230 | if (status & DMA_SW_ERR) | 228 | if (status & DMA_SW_ERR) |
231 | printk_ratelimited(KERN_ERR | 229 | ctrl->stats.dma_sw_err++; |
232 | "SW Error in DMA(cmd x%02x, laddr x%08x)\n", | ||
233 | dma->cmd, dma->laddr); | ||
234 | if (status & DMA_HW_FAULT) | 230 | if (status & DMA_HW_FAULT) |
235 | printk_ratelimited(KERN_ERR | 231 | ctrl->stats.dma_hw_fault++; |
236 | "HW Fault in DMA(cmd x%02x, laddr x%08x)\n", | ||
237 | dma->cmd, dma->laddr); | ||
238 | if (status & DMA_CANCELLED) | 232 | if (status & DMA_CANCELLED) |
239 | printk_ratelimited(KERN_ERR | 233 | ctrl->stats.dma_cancelled++; |
240 | "DMA Cancelled(cmd x%02x, laddr x%08x)\n", | ||
241 | dma->cmd, dma->laddr); | ||
242 | 234 | ||
243 | if (dma->dma_addr) | 235 | if (dma->dma_addr) |
244 | pci_unmap_page(card->dev, dma->dma_addr, get_dma_size(dma), | 236 | pci_unmap_page(ctrl->card->dev, dma->dma_addr, |
237 | get_dma_size(dma), | ||
245 | dma->cmd == HW_CMD_BLK_WRITE ? | 238 | dma->cmd == HW_CMD_BLK_WRITE ? |
246 | PCI_DMA_TODEVICE : | 239 | PCI_DMA_TODEVICE : |
247 | PCI_DMA_FROMDEVICE); | 240 | PCI_DMA_FROMDEVICE); |
248 | 241 | ||
249 | if (dma->cb) | 242 | if (dma->cb) |
250 | dma->cb(card, dma->cb_data, status ? 1 : 0); | 243 | dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0); |
251 | 244 | ||
252 | kmem_cache_free(rsxx_dma_pool, dma); | 245 | kmem_cache_free(rsxx_dma_pool, dma); |
253 | } | 246 | } |
@@ -330,14 +323,15 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, | |||
330 | if (requeue_cmd) | 323 | if (requeue_cmd) |
331 | rsxx_requeue_dma(ctrl, dma); | 324 | rsxx_requeue_dma(ctrl, dma); |
332 | else | 325 | else |
333 | rsxx_complete_dma(ctrl->card, dma, status); | 326 | rsxx_complete_dma(ctrl, dma, status); |
334 | } | 327 | } |
335 | 328 | ||
336 | static void dma_engine_stalled(unsigned long data) | 329 | static void dma_engine_stalled(unsigned long data) |
337 | { | 330 | { |
338 | struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; | 331 | struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; |
339 | 332 | ||
340 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0) | 333 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0 || |
334 | unlikely(ctrl->card->eeh_state)) | ||
341 | return; | 335 | return; |
342 | 336 | ||
343 | if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) { | 337 | if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) { |
@@ -369,7 +363,8 @@ static void rsxx_issue_dmas(struct work_struct *work) | |||
369 | ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); | 363 | ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); |
370 | hw_cmd_buf = ctrl->cmd.buf; | 364 | hw_cmd_buf = ctrl->cmd.buf; |
371 | 365 | ||
372 | if (unlikely(ctrl->card->halt)) | 366 | if (unlikely(ctrl->card->halt) || |
367 | unlikely(ctrl->card->eeh_state)) | ||
373 | return; | 368 | return; |
374 | 369 | ||
375 | while (1) { | 370 | while (1) { |
@@ -397,7 +392,7 @@ static void rsxx_issue_dmas(struct work_struct *work) | |||
397 | */ | 392 | */ |
398 | if (unlikely(ctrl->card->dma_fault)) { | 393 | if (unlikely(ctrl->card->dma_fault)) { |
399 | push_tracker(ctrl->trackers, tag); | 394 | push_tracker(ctrl->trackers, tag); |
400 | rsxx_complete_dma(ctrl->card, dma, DMA_CANCELLED); | 395 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); |
401 | continue; | 396 | continue; |
402 | } | 397 | } |
403 | 398 | ||
@@ -435,6 +430,12 @@ static void rsxx_issue_dmas(struct work_struct *work) | |||
435 | atomic_add(cmds_pending, &ctrl->stats.hw_q_depth); | 430 | atomic_add(cmds_pending, &ctrl->stats.hw_q_depth); |
436 | mod_timer(&ctrl->activity_timer, | 431 | mod_timer(&ctrl->activity_timer, |
437 | jiffies + DMA_ACTIVITY_TIMEOUT); | 432 | jiffies + DMA_ACTIVITY_TIMEOUT); |
433 | |||
434 | if (unlikely(ctrl->card->eeh_state)) { | ||
435 | del_timer_sync(&ctrl->activity_timer); | ||
436 | return; | ||
437 | } | ||
438 | |||
438 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | 439 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); |
439 | } | 440 | } |
440 | } | 441 | } |
@@ -453,7 +454,8 @@ static void rsxx_dma_done(struct work_struct *work) | |||
453 | hw_st_buf = ctrl->status.buf; | 454 | hw_st_buf = ctrl->status.buf; |
454 | 455 | ||
455 | if (unlikely(ctrl->card->halt) || | 456 | if (unlikely(ctrl->card->halt) || |
456 | unlikely(ctrl->card->dma_fault)) | 457 | unlikely(ctrl->card->dma_fault) || |
458 | unlikely(ctrl->card->eeh_state)) | ||
457 | return; | 459 | return; |
458 | 460 | ||
459 | count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); | 461 | count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); |
@@ -498,7 +500,7 @@ static void rsxx_dma_done(struct work_struct *work) | |||
498 | if (status) | 500 | if (status) |
499 | rsxx_handle_dma_error(ctrl, dma, status); | 501 | rsxx_handle_dma_error(ctrl, dma, status); |
500 | else | 502 | else |
501 | rsxx_complete_dma(ctrl->card, dma, 0); | 503 | rsxx_complete_dma(ctrl, dma, 0); |
502 | 504 | ||
503 | push_tracker(ctrl->trackers, tag); | 505 | push_tracker(ctrl->trackers, tag); |
504 | 506 | ||
@@ -717,20 +719,54 @@ bvec_err: | |||
717 | 719 | ||
718 | 720 | ||
719 | /*----------------- DMA Engine Initialization & Setup -------------------*/ | 721 | /*----------------- DMA Engine Initialization & Setup -------------------*/ |
722 | int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl) | ||
723 | { | ||
724 | ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8, | ||
725 | &ctrl->status.dma_addr); | ||
726 | ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8, | ||
727 | &ctrl->cmd.dma_addr); | ||
728 | if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL) | ||
729 | return -ENOMEM; | ||
730 | |||
731 | memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); | ||
732 | iowrite32(lower_32_bits(ctrl->status.dma_addr), | ||
733 | ctrl->regmap + SB_ADD_LO); | ||
734 | iowrite32(upper_32_bits(ctrl->status.dma_addr), | ||
735 | ctrl->regmap + SB_ADD_HI); | ||
736 | |||
737 | memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8); | ||
738 | iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO); | ||
739 | iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI); | ||
740 | |||
741 | ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT); | ||
742 | if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) { | ||
743 | dev_crit(&dev->dev, "Failed reading status cnt x%x\n", | ||
744 | ctrl->status.idx); | ||
745 | return -EINVAL; | ||
746 | } | ||
747 | iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT); | ||
748 | iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT); | ||
749 | |||
750 | ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX); | ||
751 | if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) { | ||
752 | dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n", | ||
753 | ctrl->status.idx); | ||
754 | return -EINVAL; | ||
755 | } | ||
756 | iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX); | ||
757 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | ||
758 | |||
759 | return 0; | ||
760 | } | ||
761 | |||
720 | static int rsxx_dma_ctrl_init(struct pci_dev *dev, | 762 | static int rsxx_dma_ctrl_init(struct pci_dev *dev, |
721 | struct rsxx_dma_ctrl *ctrl) | 763 | struct rsxx_dma_ctrl *ctrl) |
722 | { | 764 | { |
723 | int i; | 765 | int i; |
766 | int st; | ||
724 | 767 | ||
725 | memset(&ctrl->stats, 0, sizeof(ctrl->stats)); | 768 | memset(&ctrl->stats, 0, sizeof(ctrl->stats)); |
726 | 769 | ||
727 | ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8, | ||
728 | &ctrl->status.dma_addr); | ||
729 | ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8, | ||
730 | &ctrl->cmd.dma_addr); | ||
731 | if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL) | ||
732 | return -ENOMEM; | ||
733 | |||
734 | ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8); | 770 | ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8); |
735 | if (!ctrl->trackers) | 771 | if (!ctrl->trackers) |
736 | return -ENOMEM; | 772 | return -ENOMEM; |
@@ -760,33 +796,9 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev, | |||
760 | INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas); | 796 | INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas); |
761 | INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done); | 797 | INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done); |
762 | 798 | ||
763 | memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); | 799 | st = rsxx_hw_buffers_init(dev, ctrl); |
764 | iowrite32(lower_32_bits(ctrl->status.dma_addr), | 800 | if (st) |
765 | ctrl->regmap + SB_ADD_LO); | 801 | return st; |
766 | iowrite32(upper_32_bits(ctrl->status.dma_addr), | ||
767 | ctrl->regmap + SB_ADD_HI); | ||
768 | |||
769 | memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8); | ||
770 | iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO); | ||
771 | iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI); | ||
772 | |||
773 | ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT); | ||
774 | if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) { | ||
775 | dev_crit(&dev->dev, "Failed reading status cnt x%x\n", | ||
776 | ctrl->status.idx); | ||
777 | return -EINVAL; | ||
778 | } | ||
779 | iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT); | ||
780 | iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT); | ||
781 | |||
782 | ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX); | ||
783 | if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) { | ||
784 | dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n", | ||
785 | ctrl->status.idx); | ||
786 | return -EINVAL; | ||
787 | } | ||
788 | iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX); | ||
789 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | ||
790 | 802 | ||
791 | return 0; | 803 | return 0; |
792 | } | 804 | } |
@@ -822,7 +834,7 @@ static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card, | |||
822 | return 0; | 834 | return 0; |
823 | } | 835 | } |
824 | 836 | ||
825 | static int rsxx_dma_configure(struct rsxx_cardinfo *card) | 837 | int rsxx_dma_configure(struct rsxx_cardinfo *card) |
826 | { | 838 | { |
827 | u32 intr_coal; | 839 | u32 intr_coal; |
828 | 840 | ||
@@ -968,6 +980,94 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card) | |||
968 | } | 980 | } |
969 | } | 981 | } |
970 | 982 | ||
983 | void rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) | ||
984 | { | ||
985 | int i; | ||
986 | int j; | ||
987 | int cnt; | ||
988 | struct rsxx_dma *dma; | ||
989 | struct list_head issued_dmas[card->n_targets]; | ||
990 | |||
991 | for (i = 0; i < card->n_targets; i++) { | ||
992 | INIT_LIST_HEAD(&issued_dmas[i]); | ||
993 | cnt = 0; | ||
994 | for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) { | ||
995 | dma = get_tracker_dma(card->ctrl[i].trackers, j); | ||
996 | if (dma == NULL) | ||
997 | continue; | ||
998 | |||
999 | if (dma->cmd == HW_CMD_BLK_WRITE) | ||
1000 | card->ctrl[i].stats.writes_issued--; | ||
1001 | else if (dma->cmd == HW_CMD_BLK_DISCARD) | ||
1002 | card->ctrl[i].stats.discards_issued--; | ||
1003 | else | ||
1004 | card->ctrl[i].stats.reads_issued--; | ||
1005 | |||
1006 | list_add_tail(&dma->list, &issued_dmas[i]); | ||
1007 | push_tracker(card->ctrl[i].trackers, j); | ||
1008 | cnt++; | ||
1009 | } | ||
1010 | |||
1011 | spin_lock(&card->ctrl[i].queue_lock); | ||
1012 | list_splice(&issued_dmas[i], &card->ctrl[i].queue); | ||
1013 | |||
1014 | atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); | ||
1015 | card->ctrl[i].stats.sw_q_depth += cnt; | ||
1016 | card->ctrl[i].e_cnt = 0; | ||
1017 | |||
1018 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { | ||
1019 | if (dma->dma_addr) | ||
1020 | pci_unmap_page(card->dev, dma->dma_addr, | ||
1021 | get_dma_size(dma), | ||
1022 | dma->cmd == HW_CMD_BLK_WRITE ? | ||
1023 | PCI_DMA_TODEVICE : | ||
1024 | PCI_DMA_FROMDEVICE); | ||
1025 | } | ||
1026 | spin_unlock(&card->ctrl[i].queue_lock); | ||
1027 | } | ||
1028 | } | ||
1029 | |||
1030 | void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card) | ||
1031 | { | ||
1032 | struct rsxx_dma *dma; | ||
1033 | struct rsxx_dma *tmp; | ||
1034 | int i; | ||
1035 | |||
1036 | for (i = 0; i < card->n_targets; i++) { | ||
1037 | spin_lock(&card->ctrl[i].queue_lock); | ||
1038 | list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) { | ||
1039 | list_del(&dma->list); | ||
1040 | |||
1041 | rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED); | ||
1042 | } | ||
1043 | spin_unlock(&card->ctrl[i].queue_lock); | ||
1044 | } | ||
1045 | } | ||
1046 | |||
1047 | int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) | ||
1048 | { | ||
1049 | struct rsxx_dma *dma; | ||
1050 | struct rsxx_dma *tmp; | ||
1051 | int i; | ||
1052 | |||
1053 | for (i = 0; i < card->n_targets; i++) { | ||
1054 | spin_lock(&card->ctrl[i].queue_lock); | ||
1055 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { | ||
1056 | dma->dma_addr = pci_map_page(card->dev, dma->page, | ||
1057 | dma->pg_off, get_dma_size(dma), | ||
1058 | dma->cmd == HW_CMD_BLK_WRITE ? | ||
1059 | PCI_DMA_TODEVICE : | ||
1060 | PCI_DMA_FROMDEVICE); | ||
1061 | if (!dma->dma_addr) { | ||
1062 | kmem_cache_free(rsxx_dma_pool, dma); | ||
1063 | return -ENOMEM; | ||
1064 | } | ||
1065 | } | ||
1066 | spin_unlock(&card->ctrl[i].queue_lock); | ||
1067 | } | ||
1068 | |||
1069 | return 0; | ||
1070 | } | ||
971 | 1071 | ||
972 | int rsxx_dma_init(void) | 1072 | int rsxx_dma_init(void) |
973 | { | 1073 | { |