diff options
Diffstat (limited to 'drivers/block/rsxx/dma.c')
-rw-r--r-- | drivers/block/rsxx/dma.c | 239 |
1 files changed, 168 insertions, 71 deletions
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index 63176e67662f..0607513cfb41 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c | |||
@@ -28,7 +28,7 @@ | |||
28 | struct rsxx_dma { | 28 | struct rsxx_dma { |
29 | struct list_head list; | 29 | struct list_head list; |
30 | u8 cmd; | 30 | u8 cmd; |
31 | unsigned int laddr; /* Logical address on the ramsan */ | 31 | unsigned int laddr; /* Logical address */ |
32 | struct { | 32 | struct { |
33 | u32 off; | 33 | u32 off; |
34 | u32 cnt; | 34 | u32 cnt; |
@@ -81,9 +81,6 @@ enum rsxx_hw_status { | |||
81 | HW_STATUS_FAULT = 0x08, | 81 | HW_STATUS_FAULT = 0x08, |
82 | }; | 82 | }; |
83 | 83 | ||
84 | #define STATUS_BUFFER_SIZE8 4096 | ||
85 | #define COMMAND_BUFFER_SIZE8 4096 | ||
86 | |||
87 | static struct kmem_cache *rsxx_dma_pool; | 84 | static struct kmem_cache *rsxx_dma_pool; |
88 | 85 | ||
89 | struct dma_tracker { | 86 | struct dma_tracker { |
@@ -122,7 +119,7 @@ static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8) | |||
122 | return tgt; | 119 | return tgt; |
123 | } | 120 | } |
124 | 121 | ||
125 | static void rsxx_dma_queue_reset(struct rsxx_cardinfo *card) | 122 | void rsxx_dma_queue_reset(struct rsxx_cardinfo *card) |
126 | { | 123 | { |
127 | /* Reset all DMA Command/Status Queues */ | 124 | /* Reset all DMA Command/Status Queues */ |
128 | iowrite32(DMA_QUEUE_RESET, card->regmap + RESET); | 125 | iowrite32(DMA_QUEUE_RESET, card->regmap + RESET); |
@@ -210,7 +207,8 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card) | |||
210 | u32 q_depth = 0; | 207 | u32 q_depth = 0; |
211 | u32 intr_coal; | 208 | u32 intr_coal; |
212 | 209 | ||
213 | if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE) | 210 | if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE || |
211 | unlikely(card->eeh_state)) | ||
214 | return; | 212 | return; |
215 | 213 | ||
216 | for (i = 0; i < card->n_targets; i++) | 214 | for (i = 0; i < card->n_targets; i++) |
@@ -223,31 +221,26 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card) | |||
223 | } | 221 | } |
224 | 222 | ||
225 | /*----------------- RSXX DMA Handling -------------------*/ | 223 | /*----------------- RSXX DMA Handling -------------------*/ |
226 | static void rsxx_complete_dma(struct rsxx_cardinfo *card, | 224 | static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, |
227 | struct rsxx_dma *dma, | 225 | struct rsxx_dma *dma, |
228 | unsigned int status) | 226 | unsigned int status) |
229 | { | 227 | { |
230 | if (status & DMA_SW_ERR) | 228 | if (status & DMA_SW_ERR) |
231 | printk_ratelimited(KERN_ERR | 229 | ctrl->stats.dma_sw_err++; |
232 | "SW Error in DMA(cmd x%02x, laddr x%08x)\n", | ||
233 | dma->cmd, dma->laddr); | ||
234 | if (status & DMA_HW_FAULT) | 230 | if (status & DMA_HW_FAULT) |
235 | printk_ratelimited(KERN_ERR | 231 | ctrl->stats.dma_hw_fault++; |
236 | "HW Fault in DMA(cmd x%02x, laddr x%08x)\n", | ||
237 | dma->cmd, dma->laddr); | ||
238 | if (status & DMA_CANCELLED) | 232 | if (status & DMA_CANCELLED) |
239 | printk_ratelimited(KERN_ERR | 233 | ctrl->stats.dma_cancelled++; |
240 | "DMA Cancelled(cmd x%02x, laddr x%08x)\n", | ||
241 | dma->cmd, dma->laddr); | ||
242 | 234 | ||
243 | if (dma->dma_addr) | 235 | if (dma->dma_addr) |
244 | pci_unmap_page(card->dev, dma->dma_addr, get_dma_size(dma), | 236 | pci_unmap_page(ctrl->card->dev, dma->dma_addr, |
237 | get_dma_size(dma), | ||
245 | dma->cmd == HW_CMD_BLK_WRITE ? | 238 | dma->cmd == HW_CMD_BLK_WRITE ? |
246 | PCI_DMA_TODEVICE : | 239 | PCI_DMA_TODEVICE : |
247 | PCI_DMA_FROMDEVICE); | 240 | PCI_DMA_FROMDEVICE); |
248 | 241 | ||
249 | if (dma->cb) | 242 | if (dma->cb) |
250 | dma->cb(card, dma->cb_data, status ? 1 : 0); | 243 | dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0); |
251 | 244 | ||
252 | kmem_cache_free(rsxx_dma_pool, dma); | 245 | kmem_cache_free(rsxx_dma_pool, dma); |
253 | } | 246 | } |
@@ -330,14 +323,15 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, | |||
330 | if (requeue_cmd) | 323 | if (requeue_cmd) |
331 | rsxx_requeue_dma(ctrl, dma); | 324 | rsxx_requeue_dma(ctrl, dma); |
332 | else | 325 | else |
333 | rsxx_complete_dma(ctrl->card, dma, status); | 326 | rsxx_complete_dma(ctrl, dma, status); |
334 | } | 327 | } |
335 | 328 | ||
336 | static void dma_engine_stalled(unsigned long data) | 329 | static void dma_engine_stalled(unsigned long data) |
337 | { | 330 | { |
338 | struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; | 331 | struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; |
339 | 332 | ||
340 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0) | 333 | if (atomic_read(&ctrl->stats.hw_q_depth) == 0 || |
334 | unlikely(ctrl->card->eeh_state)) | ||
341 | return; | 335 | return; |
342 | 336 | ||
343 | if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) { | 337 | if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) { |
@@ -369,7 +363,8 @@ static void rsxx_issue_dmas(struct work_struct *work) | |||
369 | ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); | 363 | ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); |
370 | hw_cmd_buf = ctrl->cmd.buf; | 364 | hw_cmd_buf = ctrl->cmd.buf; |
371 | 365 | ||
372 | if (unlikely(ctrl->card->halt)) | 366 | if (unlikely(ctrl->card->halt) || |
367 | unlikely(ctrl->card->eeh_state)) | ||
373 | return; | 368 | return; |
374 | 369 | ||
375 | while (1) { | 370 | while (1) { |
@@ -397,7 +392,7 @@ static void rsxx_issue_dmas(struct work_struct *work) | |||
397 | */ | 392 | */ |
398 | if (unlikely(ctrl->card->dma_fault)) { | 393 | if (unlikely(ctrl->card->dma_fault)) { |
399 | push_tracker(ctrl->trackers, tag); | 394 | push_tracker(ctrl->trackers, tag); |
400 | rsxx_complete_dma(ctrl->card, dma, DMA_CANCELLED); | 395 | rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); |
401 | continue; | 396 | continue; |
402 | } | 397 | } |
403 | 398 | ||
@@ -432,19 +427,15 @@ static void rsxx_issue_dmas(struct work_struct *work) | |||
432 | 427 | ||
433 | /* Let HW know we've queued commands. */ | 428 | /* Let HW know we've queued commands. */ |
434 | if (cmds_pending) { | 429 | if (cmds_pending) { |
435 | /* | ||
436 | * We must guarantee that the CPU writes to 'ctrl->cmd.buf' | ||
437 | * (which is in PCI-consistent system-memory) from the loop | ||
438 | * above make it into the coherency domain before the | ||
439 | * following PIO "trigger" updating the cmd.idx. A WMB is | ||
440 | * sufficient. We need not explicitly CPU cache-flush since | ||
441 | * the memory is a PCI-consistent (ie; coherent) mapping. | ||
442 | */ | ||
443 | wmb(); | ||
444 | |||
445 | atomic_add(cmds_pending, &ctrl->stats.hw_q_depth); | 430 | atomic_add(cmds_pending, &ctrl->stats.hw_q_depth); |
446 | mod_timer(&ctrl->activity_timer, | 431 | mod_timer(&ctrl->activity_timer, |
447 | jiffies + DMA_ACTIVITY_TIMEOUT); | 432 | jiffies + DMA_ACTIVITY_TIMEOUT); |
433 | |||
434 | if (unlikely(ctrl->card->eeh_state)) { | ||
435 | del_timer_sync(&ctrl->activity_timer); | ||
436 | return; | ||
437 | } | ||
438 | |||
448 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | 439 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); |
449 | } | 440 | } |
450 | } | 441 | } |
@@ -463,7 +454,8 @@ static void rsxx_dma_done(struct work_struct *work) | |||
463 | hw_st_buf = ctrl->status.buf; | 454 | hw_st_buf = ctrl->status.buf; |
464 | 455 | ||
465 | if (unlikely(ctrl->card->halt) || | 456 | if (unlikely(ctrl->card->halt) || |
466 | unlikely(ctrl->card->dma_fault)) | 457 | unlikely(ctrl->card->dma_fault) || |
458 | unlikely(ctrl->card->eeh_state)) | ||
467 | return; | 459 | return; |
468 | 460 | ||
469 | count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); | 461 | count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); |
@@ -508,7 +500,7 @@ static void rsxx_dma_done(struct work_struct *work) | |||
508 | if (status) | 500 | if (status) |
509 | rsxx_handle_dma_error(ctrl, dma, status); | 501 | rsxx_handle_dma_error(ctrl, dma, status); |
510 | else | 502 | else |
511 | rsxx_complete_dma(ctrl->card, dma, 0); | 503 | rsxx_complete_dma(ctrl, dma, 0); |
512 | 504 | ||
513 | push_tracker(ctrl->trackers, tag); | 505 | push_tracker(ctrl->trackers, tag); |
514 | 506 | ||
@@ -727,20 +719,54 @@ bvec_err: | |||
727 | 719 | ||
728 | 720 | ||
729 | /*----------------- DMA Engine Initialization & Setup -------------------*/ | 721 | /*----------------- DMA Engine Initialization & Setup -------------------*/ |
722 | int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl) | ||
723 | { | ||
724 | ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8, | ||
725 | &ctrl->status.dma_addr); | ||
726 | ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8, | ||
727 | &ctrl->cmd.dma_addr); | ||
728 | if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL) | ||
729 | return -ENOMEM; | ||
730 | |||
731 | memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); | ||
732 | iowrite32(lower_32_bits(ctrl->status.dma_addr), | ||
733 | ctrl->regmap + SB_ADD_LO); | ||
734 | iowrite32(upper_32_bits(ctrl->status.dma_addr), | ||
735 | ctrl->regmap + SB_ADD_HI); | ||
736 | |||
737 | memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8); | ||
738 | iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO); | ||
739 | iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI); | ||
740 | |||
741 | ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT); | ||
742 | if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) { | ||
743 | dev_crit(&dev->dev, "Failed reading status cnt x%x\n", | ||
744 | ctrl->status.idx); | ||
745 | return -EINVAL; | ||
746 | } | ||
747 | iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT); | ||
748 | iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT); | ||
749 | |||
750 | ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX); | ||
751 | if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) { | ||
752 | dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n", | ||
753 | ctrl->status.idx); | ||
754 | return -EINVAL; | ||
755 | } | ||
756 | iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX); | ||
757 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | ||
758 | |||
759 | return 0; | ||
760 | } | ||
761 | |||
730 | static int rsxx_dma_ctrl_init(struct pci_dev *dev, | 762 | static int rsxx_dma_ctrl_init(struct pci_dev *dev, |
731 | struct rsxx_dma_ctrl *ctrl) | 763 | struct rsxx_dma_ctrl *ctrl) |
732 | { | 764 | { |
733 | int i; | 765 | int i; |
766 | int st; | ||
734 | 767 | ||
735 | memset(&ctrl->stats, 0, sizeof(ctrl->stats)); | 768 | memset(&ctrl->stats, 0, sizeof(ctrl->stats)); |
736 | 769 | ||
737 | ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8, | ||
738 | &ctrl->status.dma_addr); | ||
739 | ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8, | ||
740 | &ctrl->cmd.dma_addr); | ||
741 | if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL) | ||
742 | return -ENOMEM; | ||
743 | |||
744 | ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8); | 770 | ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8); |
745 | if (!ctrl->trackers) | 771 | if (!ctrl->trackers) |
746 | return -ENOMEM; | 772 | return -ENOMEM; |
@@ -770,35 +796,9 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev, | |||
770 | INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas); | 796 | INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas); |
771 | INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done); | 797 | INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done); |
772 | 798 | ||
773 | memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); | 799 | st = rsxx_hw_buffers_init(dev, ctrl); |
774 | iowrite32(lower_32_bits(ctrl->status.dma_addr), | 800 | if (st) |
775 | ctrl->regmap + SB_ADD_LO); | 801 | return st; |
776 | iowrite32(upper_32_bits(ctrl->status.dma_addr), | ||
777 | ctrl->regmap + SB_ADD_HI); | ||
778 | |||
779 | memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8); | ||
780 | iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO); | ||
781 | iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI); | ||
782 | |||
783 | ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT); | ||
784 | if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) { | ||
785 | dev_crit(&dev->dev, "Failed reading status cnt x%x\n", | ||
786 | ctrl->status.idx); | ||
787 | return -EINVAL; | ||
788 | } | ||
789 | iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT); | ||
790 | iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT); | ||
791 | |||
792 | ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX); | ||
793 | if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) { | ||
794 | dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n", | ||
795 | ctrl->status.idx); | ||
796 | return -EINVAL; | ||
797 | } | ||
798 | iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX); | ||
799 | iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); | ||
800 | |||
801 | wmb(); | ||
802 | 802 | ||
803 | return 0; | 803 | return 0; |
804 | } | 804 | } |
@@ -834,7 +834,7 @@ static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card, | |||
834 | return 0; | 834 | return 0; |
835 | } | 835 | } |
836 | 836 | ||
837 | static int rsxx_dma_configure(struct rsxx_cardinfo *card) | 837 | int rsxx_dma_configure(struct rsxx_cardinfo *card) |
838 | { | 838 | { |
839 | u32 intr_coal; | 839 | u32 intr_coal; |
840 | 840 | ||
@@ -980,6 +980,103 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card) | |||
980 | } | 980 | } |
981 | } | 981 | } |
982 | 982 | ||
983 | int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) | ||
984 | { | ||
985 | int i; | ||
986 | int j; | ||
987 | int cnt; | ||
988 | struct rsxx_dma *dma; | ||
989 | struct list_head *issued_dmas; | ||
990 | |||
991 | issued_dmas = kzalloc(sizeof(*issued_dmas) * card->n_targets, | ||
992 | GFP_KERNEL); | ||
993 | if (!issued_dmas) | ||
994 | return -ENOMEM; | ||
995 | |||
996 | for (i = 0; i < card->n_targets; i++) { | ||
997 | INIT_LIST_HEAD(&issued_dmas[i]); | ||
998 | cnt = 0; | ||
999 | for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) { | ||
1000 | dma = get_tracker_dma(card->ctrl[i].trackers, j); | ||
1001 | if (dma == NULL) | ||
1002 | continue; | ||
1003 | |||
1004 | if (dma->cmd == HW_CMD_BLK_WRITE) | ||
1005 | card->ctrl[i].stats.writes_issued--; | ||
1006 | else if (dma->cmd == HW_CMD_BLK_DISCARD) | ||
1007 | card->ctrl[i].stats.discards_issued--; | ||
1008 | else | ||
1009 | card->ctrl[i].stats.reads_issued--; | ||
1010 | |||
1011 | list_add_tail(&dma->list, &issued_dmas[i]); | ||
1012 | push_tracker(card->ctrl[i].trackers, j); | ||
1013 | cnt++; | ||
1014 | } | ||
1015 | |||
1016 | spin_lock(&card->ctrl[i].queue_lock); | ||
1017 | list_splice(&issued_dmas[i], &card->ctrl[i].queue); | ||
1018 | |||
1019 | atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); | ||
1020 | card->ctrl[i].stats.sw_q_depth += cnt; | ||
1021 | card->ctrl[i].e_cnt = 0; | ||
1022 | |||
1023 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { | ||
1024 | if (dma->dma_addr) | ||
1025 | pci_unmap_page(card->dev, dma->dma_addr, | ||
1026 | get_dma_size(dma), | ||
1027 | dma->cmd == HW_CMD_BLK_WRITE ? | ||
1028 | PCI_DMA_TODEVICE : | ||
1029 | PCI_DMA_FROMDEVICE); | ||
1030 | } | ||
1031 | spin_unlock(&card->ctrl[i].queue_lock); | ||
1032 | } | ||
1033 | |||
1034 | kfree(issued_dmas); | ||
1035 | |||
1036 | return 0; | ||
1037 | } | ||
1038 | |||
1039 | void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card) | ||
1040 | { | ||
1041 | struct rsxx_dma *dma; | ||
1042 | struct rsxx_dma *tmp; | ||
1043 | int i; | ||
1044 | |||
1045 | for (i = 0; i < card->n_targets; i++) { | ||
1046 | spin_lock(&card->ctrl[i].queue_lock); | ||
1047 | list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) { | ||
1048 | list_del(&dma->list); | ||
1049 | |||
1050 | rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED); | ||
1051 | } | ||
1052 | spin_unlock(&card->ctrl[i].queue_lock); | ||
1053 | } | ||
1054 | } | ||
1055 | |||
1056 | int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) | ||
1057 | { | ||
1058 | struct rsxx_dma *dma; | ||
1059 | int i; | ||
1060 | |||
1061 | for (i = 0; i < card->n_targets; i++) { | ||
1062 | spin_lock(&card->ctrl[i].queue_lock); | ||
1063 | list_for_each_entry(dma, &card->ctrl[i].queue, list) { | ||
1064 | dma->dma_addr = pci_map_page(card->dev, dma->page, | ||
1065 | dma->pg_off, get_dma_size(dma), | ||
1066 | dma->cmd == HW_CMD_BLK_WRITE ? | ||
1067 | PCI_DMA_TODEVICE : | ||
1068 | PCI_DMA_FROMDEVICE); | ||
1069 | if (!dma->dma_addr) { | ||
1070 | spin_unlock(&card->ctrl[i].queue_lock); | ||
1071 | kmem_cache_free(rsxx_dma_pool, dma); | ||
1072 | return -ENOMEM; | ||
1073 | } | ||
1074 | } | ||
1075 | spin_unlock(&card->ctrl[i].queue_lock); | ||
1076 | } | ||
1077 | |||
1078 | return 0; | ||
1079 | } | ||
983 | 1080 | ||
984 | int rsxx_dma_init(void) | 1081 | int rsxx_dma_init(void) |
985 | { | 1082 | { |