aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/rsxx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/rsxx')
-rw-r--r--drivers/block/rsxx/core.c8
-rw-r--r--drivers/block/rsxx/dev.c8
-rw-r--r--drivers/block/rsxx/dma.c119
-rw-r--r--drivers/block/rsxx/rsxx_priv.h11
4 files changed, 76 insertions, 70 deletions
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index 6e85e21445eb..a8de2eec6ff3 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -654,7 +654,8 @@ static void rsxx_eeh_failure(struct pci_dev *dev)
654 for (i = 0; i < card->n_targets; i++) { 654 for (i = 0; i < card->n_targets; i++) {
655 spin_lock_bh(&card->ctrl[i].queue_lock); 655 spin_lock_bh(&card->ctrl[i].queue_lock);
656 cnt = rsxx_cleanup_dma_queue(&card->ctrl[i], 656 cnt = rsxx_cleanup_dma_queue(&card->ctrl[i],
657 &card->ctrl[i].queue); 657 &card->ctrl[i].queue,
658 COMPLETE_DMA);
658 spin_unlock_bh(&card->ctrl[i].queue_lock); 659 spin_unlock_bh(&card->ctrl[i].queue_lock);
659 660
660 cnt += rsxx_dma_cancel(&card->ctrl[i]); 661 cnt += rsxx_dma_cancel(&card->ctrl[i]);
@@ -748,10 +749,6 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
748 749
749 card->eeh_state = 0; 750 card->eeh_state = 0;
750 751
751 st = rsxx_eeh_remap_dmas(card);
752 if (st)
753 goto failed_remap_dmas;
754
755 spin_lock_irqsave(&card->irq_lock, flags); 752 spin_lock_irqsave(&card->irq_lock, flags);
756 if (card->n_targets & RSXX_MAX_TARGETS) 753 if (card->n_targets & RSXX_MAX_TARGETS)
757 rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G); 754 rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
@@ -778,7 +775,6 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
778 return PCI_ERS_RESULT_RECOVERED; 775 return PCI_ERS_RESULT_RECOVERED;
779 776
780failed_hw_buffers_init: 777failed_hw_buffers_init:
781failed_remap_dmas:
782 for (i = 0; i < card->n_targets; i++) { 778 for (i = 0; i < card->n_targets; i++) {
783 if (card->ctrl[i].status.buf) 779 if (card->ctrl[i].status.buf)
784 pci_free_consistent(card->dev, 780 pci_free_consistent(card->dev,
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index d7af441880be..2284f5d3a54a 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -295,13 +295,15 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card)
295 return -ENOMEM; 295 return -ENOMEM;
296 } 296 }
297 297
298 blk_size = card->config.data.block_size; 298 if (card->config_valid) {
299 blk_size = card->config.data.block_size;
300 blk_queue_dma_alignment(card->queue, blk_size - 1);
301 blk_queue_logical_block_size(card->queue, blk_size);
302 }
299 303
300 blk_queue_make_request(card->queue, rsxx_make_request); 304 blk_queue_make_request(card->queue, rsxx_make_request);
301 blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY); 305 blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY);
302 blk_queue_dma_alignment(card->queue, blk_size - 1);
303 blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors); 306 blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
304 blk_queue_logical_block_size(card->queue, blk_size);
305 blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE); 307 blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
306 308
307 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue); 309 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue);
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index bed32f16b084..fc88ba3e1bd2 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -221,6 +221,21 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
221} 221}
222 222
223/*----------------- RSXX DMA Handling -------------------*/ 223/*----------------- RSXX DMA Handling -------------------*/
224static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma)
225{
226 if (dma->cmd != HW_CMD_BLK_DISCARD) {
227 if (!pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
228 pci_unmap_page(ctrl->card->dev, dma->dma_addr,
229 get_dma_size(dma),
230 dma->cmd == HW_CMD_BLK_WRITE ?
231 PCI_DMA_TODEVICE :
232 PCI_DMA_FROMDEVICE);
233 }
234 }
235
236 kmem_cache_free(rsxx_dma_pool, dma);
237}
238
224static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, 239static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
225 struct rsxx_dma *dma, 240 struct rsxx_dma *dma,
226 unsigned int status) 241 unsigned int status)
@@ -232,21 +247,14 @@ static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
232 if (status & DMA_CANCELLED) 247 if (status & DMA_CANCELLED)
233 ctrl->stats.dma_cancelled++; 248 ctrl->stats.dma_cancelled++;
234 249
235 if (dma->dma_addr)
236 pci_unmap_page(ctrl->card->dev, dma->dma_addr,
237 get_dma_size(dma),
238 dma->cmd == HW_CMD_BLK_WRITE ?
239 PCI_DMA_TODEVICE :
240 PCI_DMA_FROMDEVICE);
241
242 if (dma->cb) 250 if (dma->cb)
243 dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0); 251 dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
244 252
245 kmem_cache_free(rsxx_dma_pool, dma); 253 rsxx_free_dma(ctrl, dma);
246} 254}
247 255
248int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, 256int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
249 struct list_head *q) 257 struct list_head *q, unsigned int done)
250{ 258{
251 struct rsxx_dma *dma; 259 struct rsxx_dma *dma;
252 struct rsxx_dma *tmp; 260 struct rsxx_dma *tmp;
@@ -254,7 +262,10 @@ int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
254 262
255 list_for_each_entry_safe(dma, tmp, q, list) { 263 list_for_each_entry_safe(dma, tmp, q, list) {
256 list_del(&dma->list); 264 list_del(&dma->list);
257 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); 265 if (done & COMPLETE_DMA)
266 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
267 else
268 rsxx_free_dma(ctrl, dma);
258 cnt++; 269 cnt++;
259 } 270 }
260 271
@@ -370,7 +381,7 @@ static void dma_engine_stalled(unsigned long data)
370 381
371 /* Clean up the DMA queue */ 382 /* Clean up the DMA queue */
372 spin_lock(&ctrl->queue_lock); 383 spin_lock(&ctrl->queue_lock);
373 cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue); 384 cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
374 spin_unlock(&ctrl->queue_lock); 385 spin_unlock(&ctrl->queue_lock);
375 386
376 cnt += rsxx_dma_cancel(ctrl); 387 cnt += rsxx_dma_cancel(ctrl);
@@ -388,6 +399,7 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
388 int tag; 399 int tag;
389 int cmds_pending = 0; 400 int cmds_pending = 0;
390 struct hw_cmd *hw_cmd_buf; 401 struct hw_cmd *hw_cmd_buf;
402 int dir;
391 403
392 hw_cmd_buf = ctrl->cmd.buf; 404 hw_cmd_buf = ctrl->cmd.buf;
393 405
@@ -424,6 +436,31 @@ static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl)
424 continue; 436 continue;
425 } 437 }
426 438
439 if (dma->cmd != HW_CMD_BLK_DISCARD) {
440 if (dma->cmd == HW_CMD_BLK_WRITE)
441 dir = PCI_DMA_TODEVICE;
442 else
443 dir = PCI_DMA_FROMDEVICE;
444
445 /*
446 * The function pci_map_page is placed here because we
447 * can only, by design, issue up to 255 commands to the
448 * hardware at one time per DMA channel. So the maximum
449 * amount of mapped memory would be 255 * 4 channels *
450 * 4096 Bytes which is less than 2GB, the limit of a x8
451 * Non-HWWD PCIe slot. This way the pci_map_page
452 * function should never fail because of a lack of
453 * mappable memory.
454 */
455 dma->dma_addr = pci_map_page(ctrl->card->dev, dma->page,
456 dma->pg_off, dma->sub_page.cnt << 9, dir);
457 if (pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) {
458 push_tracker(ctrl->trackers, tag);
459 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
460 continue;
461 }
462 }
463
427 set_tracker_dma(ctrl->trackers, tag, dma); 464 set_tracker_dma(ctrl->trackers, tag, dma);
428 hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd; 465 hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd;
429 hw_cmd_buf[ctrl->cmd.idx].tag = tag; 466 hw_cmd_buf[ctrl->cmd.idx].tag = tag;
@@ -620,14 +657,6 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card,
620 if (!dma) 657 if (!dma)
621 return -ENOMEM; 658 return -ENOMEM;
622 659
623 dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len,
624 dir ? PCI_DMA_TODEVICE :
625 PCI_DMA_FROMDEVICE);
626 if (!dma->dma_addr) {
627 kmem_cache_free(rsxx_dma_pool, dma);
628 return -ENOMEM;
629 }
630
631 dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ; 660 dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
632 dma->laddr = laddr; 661 dma->laddr = laddr;
633 dma->sub_page.off = (dma_off >> 9); 662 dma->sub_page.off = (dma_off >> 9);
@@ -736,11 +765,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
736 return 0; 765 return 0;
737 766
738bvec_err: 767bvec_err:
739 for (i = 0; i < card->n_targets; i++) { 768 for (i = 0; i < card->n_targets; i++)
740 spin_lock_bh(&card->ctrl[i].queue_lock); 769 rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i],
741 rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i]); 770 FREE_DMA);
742 spin_unlock_bh(&card->ctrl[i].queue_lock);
743 }
744 771
745 return st; 772 return st;
746} 773}
@@ -990,7 +1017,7 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card)
990 1017
991 /* Clean up the DMA queue */ 1018 /* Clean up the DMA queue */
992 spin_lock_bh(&ctrl->queue_lock); 1019 spin_lock_bh(&ctrl->queue_lock);
993 rsxx_cleanup_dma_queue(ctrl, &ctrl->queue); 1020 rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA);
994 spin_unlock_bh(&ctrl->queue_lock); 1021 spin_unlock_bh(&ctrl->queue_lock);
995 1022
996 rsxx_dma_cancel(ctrl); 1023 rsxx_dma_cancel(ctrl);
@@ -1032,6 +1059,14 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
1032 else 1059 else
1033 card->ctrl[i].stats.reads_issued--; 1060 card->ctrl[i].stats.reads_issued--;
1034 1061
1062 if (dma->cmd != HW_CMD_BLK_DISCARD) {
1063 pci_unmap_page(card->dev, dma->dma_addr,
1064 get_dma_size(dma),
1065 dma->cmd == HW_CMD_BLK_WRITE ?
1066 PCI_DMA_TODEVICE :
1067 PCI_DMA_FROMDEVICE);
1068 }
1069
1035 list_add_tail(&dma->list, &issued_dmas[i]); 1070 list_add_tail(&dma->list, &issued_dmas[i]);
1036 push_tracker(card->ctrl[i].trackers, j); 1071 push_tracker(card->ctrl[i].trackers, j);
1037 cnt++; 1072 cnt++;
@@ -1043,15 +1078,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
1043 atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); 1078 atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
1044 card->ctrl[i].stats.sw_q_depth += cnt; 1079 card->ctrl[i].stats.sw_q_depth += cnt;
1045 card->ctrl[i].e_cnt = 0; 1080 card->ctrl[i].e_cnt = 0;
1046
1047 list_for_each_entry(dma, &card->ctrl[i].queue, list) {
1048 if (dma->dma_addr)
1049 pci_unmap_page(card->dev, dma->dma_addr,
1050 get_dma_size(dma),
1051 dma->cmd == HW_CMD_BLK_WRITE ?
1052 PCI_DMA_TODEVICE :
1053 PCI_DMA_FROMDEVICE);
1054 }
1055 spin_unlock_bh(&card->ctrl[i].queue_lock); 1081 spin_unlock_bh(&card->ctrl[i].queue_lock);
1056 } 1082 }
1057 1083
@@ -1060,31 +1086,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
1060 return 0; 1086 return 0;
1061} 1087}
1062 1088
1063int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
1064{
1065 struct rsxx_dma *dma;
1066 int i;
1067
1068 for (i = 0; i < card->n_targets; i++) {
1069 spin_lock_bh(&card->ctrl[i].queue_lock);
1070 list_for_each_entry(dma, &card->ctrl[i].queue, list) {
1071 dma->dma_addr = pci_map_page(card->dev, dma->page,
1072 dma->pg_off, get_dma_size(dma),
1073 dma->cmd == HW_CMD_BLK_WRITE ?
1074 PCI_DMA_TODEVICE :
1075 PCI_DMA_FROMDEVICE);
1076 if (!dma->dma_addr) {
1077 spin_unlock_bh(&card->ctrl[i].queue_lock);
1078 kmem_cache_free(rsxx_dma_pool, dma);
1079 return -ENOMEM;
1080 }
1081 }
1082 spin_unlock_bh(&card->ctrl[i].queue_lock);
1083 }
1084
1085 return 0;
1086}
1087
1088int rsxx_dma_init(void) 1089int rsxx_dma_init(void)
1089{ 1090{
1090 rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN); 1091 rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h
index 5ad5055a4104..6bbc64d0f690 100644
--- a/drivers/block/rsxx/rsxx_priv.h
+++ b/drivers/block/rsxx/rsxx_priv.h
@@ -52,7 +52,7 @@ struct proc_cmd;
52#define RS70_PCI_REV_SUPPORTED 4 52#define RS70_PCI_REV_SUPPORTED 4
53 53
54#define DRIVER_NAME "rsxx" 54#define DRIVER_NAME "rsxx"
55#define DRIVER_VERSION "4.0" 55#define DRIVER_VERSION "4.0.3.2516"
56 56
57/* Block size is 4096 */ 57/* Block size is 4096 */
58#define RSXX_HW_BLK_SHIFT 12 58#define RSXX_HW_BLK_SHIFT 12
@@ -345,6 +345,11 @@ enum rsxx_creg_stat {
345 CREG_STAT_TAG_MASK = 0x0000ff00, 345 CREG_STAT_TAG_MASK = 0x0000ff00,
346}; 346};
347 347
348enum rsxx_dma_finish {
349 FREE_DMA = 0x0,
350 COMPLETE_DMA = 0x1,
351};
352
348static inline unsigned int CREG_DATA(int N) 353static inline unsigned int CREG_DATA(int N)
349{ 354{
350 return CREG_DATA0 + (N << 2); 355 return CREG_DATA0 + (N << 2);
@@ -379,7 +384,9 @@ typedef void (*rsxx_dma_cb)(struct rsxx_cardinfo *card,
379int rsxx_dma_setup(struct rsxx_cardinfo *card); 384int rsxx_dma_setup(struct rsxx_cardinfo *card);
380void rsxx_dma_destroy(struct rsxx_cardinfo *card); 385void rsxx_dma_destroy(struct rsxx_cardinfo *card);
381int rsxx_dma_init(void); 386int rsxx_dma_init(void);
382int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, struct list_head *q); 387int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl,
388 struct list_head *q,
389 unsigned int done);
383int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl); 390int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl);
384void rsxx_dma_cleanup(void); 391void rsxx_dma_cleanup(void);
385void rsxx_dma_queue_reset(struct rsxx_cardinfo *card); 392void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);