aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/rsxx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/block/rsxx')
-rw-r--r--drivers/block/rsxx/Makefile2
-rw-r--r--drivers/block/rsxx/config.c8
-rw-r--r--drivers/block/rsxx/core.c237
-rw-r--r--drivers/block/rsxx/cregs.c112
-rw-r--r--drivers/block/rsxx/dma.c239
-rw-r--r--drivers/block/rsxx/rsxx.h6
-rw-r--r--drivers/block/rsxx/rsxx_cfg.h2
-rw-r--r--drivers/block/rsxx/rsxx_priv.h34
8 files changed, 500 insertions, 140 deletions
diff --git a/drivers/block/rsxx/Makefile b/drivers/block/rsxx/Makefile
index f35cd0b71f7b..b1c53c0aa450 100644
--- a/drivers/block/rsxx/Makefile
+++ b/drivers/block/rsxx/Makefile
@@ -1,2 +1,2 @@
1obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o 1obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o
2rsxx-y := config.o core.o cregs.o dev.o dma.o 2rsxx-objs := config.o core.o cregs.o dev.o dma.o
diff --git a/drivers/block/rsxx/config.c b/drivers/block/rsxx/config.c
index a295e7e9ee41..10cd530d3e10 100644
--- a/drivers/block/rsxx/config.c
+++ b/drivers/block/rsxx/config.c
@@ -29,15 +29,13 @@
29#include "rsxx_priv.h" 29#include "rsxx_priv.h"
30#include "rsxx_cfg.h" 30#include "rsxx_cfg.h"
31 31
32static void initialize_config(void *config) 32static void initialize_config(struct rsxx_card_cfg *cfg)
33{ 33{
34 struct rsxx_card_cfg *cfg = config;
35
36 cfg->hdr.version = RSXX_CFG_VERSION; 34 cfg->hdr.version = RSXX_CFG_VERSION;
37 35
38 cfg->data.block_size = RSXX_HW_BLK_SIZE; 36 cfg->data.block_size = RSXX_HW_BLK_SIZE;
39 cfg->data.stripe_size = RSXX_HW_BLK_SIZE; 37 cfg->data.stripe_size = RSXX_HW_BLK_SIZE;
40 cfg->data.vendor_id = RSXX_VENDOR_ID_TMS_IBM; 38 cfg->data.vendor_id = RSXX_VENDOR_ID_IBM;
41 cfg->data.cache_order = (-1); 39 cfg->data.cache_order = (-1);
42 cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED; 40 cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED;
43 cfg->data.intr_coal.count = 0; 41 cfg->data.intr_coal.count = 0;
@@ -181,7 +179,7 @@ int rsxx_load_config(struct rsxx_cardinfo *card)
181 } else { 179 } else {
182 dev_info(CARD_TO_DEV(card), 180 dev_info(CARD_TO_DEV(card),
183 "Initializing card configuration.\n"); 181 "Initializing card configuration.\n");
184 initialize_config(card); 182 initialize_config(&card->config);
185 st = rsxx_save_config(card); 183 st = rsxx_save_config(card);
186 if (st) 184 if (st)
187 return st; 185 return st;
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
index e5162487686a..5af21f2db29c 100644
--- a/drivers/block/rsxx/core.c
+++ b/drivers/block/rsxx/core.c
@@ -30,6 +30,7 @@
30#include <linux/reboot.h> 30#include <linux/reboot.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/bitops.h> 32#include <linux/bitops.h>
33#include <linux/delay.h>
33 34
34#include <linux/genhd.h> 35#include <linux/genhd.h>
35#include <linux/idr.h> 36#include <linux/idr.h>
@@ -39,8 +40,8 @@
39 40
40#define NO_LEGACY 0 41#define NO_LEGACY 0
41 42
42MODULE_DESCRIPTION("IBM RamSan PCIe Flash SSD Device Driver"); 43MODULE_DESCRIPTION("IBM FlashSystem 70/80 PCIe SSD Device Driver");
43MODULE_AUTHOR("IBM <support@ramsan.com>"); 44MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM");
44MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
45MODULE_VERSION(DRIVER_VERSION); 46MODULE_VERSION(DRIVER_VERSION);
46 47
@@ -52,6 +53,13 @@ static DEFINE_IDA(rsxx_disk_ida);
52static DEFINE_SPINLOCK(rsxx_ida_lock); 53static DEFINE_SPINLOCK(rsxx_ida_lock);
53 54
54/*----------------- Interrupt Control & Handling -------------------*/ 55/*----------------- Interrupt Control & Handling -------------------*/
56
57static void rsxx_mask_interrupts(struct rsxx_cardinfo *card)
58{
59 card->isr_mask = 0;
60 card->ier_mask = 0;
61}
62
55static void __enable_intr(unsigned int *mask, unsigned int intr) 63static void __enable_intr(unsigned int *mask, unsigned int intr)
56{ 64{
57 *mask |= intr; 65 *mask |= intr;
@@ -71,7 +79,8 @@ static void __disable_intr(unsigned int *mask, unsigned int intr)
71 */ 79 */
72void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr) 80void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
73{ 81{
74 if (unlikely(card->halt)) 82 if (unlikely(card->halt) ||
83 unlikely(card->eeh_state))
75 return; 84 return;
76 85
77 __enable_intr(&card->ier_mask, intr); 86 __enable_intr(&card->ier_mask, intr);
@@ -80,6 +89,9 @@ void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
80 89
81void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr) 90void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
82{ 91{
92 if (unlikely(card->eeh_state))
93 return;
94
83 __disable_intr(&card->ier_mask, intr); 95 __disable_intr(&card->ier_mask, intr);
84 iowrite32(card->ier_mask, card->regmap + IER); 96 iowrite32(card->ier_mask, card->regmap + IER);
85} 97}
@@ -87,7 +99,8 @@ void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
87void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card, 99void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
88 unsigned int intr) 100 unsigned int intr)
89{ 101{
90 if (unlikely(card->halt)) 102 if (unlikely(card->halt) ||
103 unlikely(card->eeh_state))
91 return; 104 return;
92 105
93 __enable_intr(&card->isr_mask, intr); 106 __enable_intr(&card->isr_mask, intr);
@@ -97,6 +110,9 @@ void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
97void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card, 110void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
98 unsigned int intr) 111 unsigned int intr)
99{ 112{
113 if (unlikely(card->eeh_state))
114 return;
115
100 __disable_intr(&card->isr_mask, intr); 116 __disable_intr(&card->isr_mask, intr);
101 __disable_intr(&card->ier_mask, intr); 117 __disable_intr(&card->ier_mask, intr);
102 iowrite32(card->ier_mask, card->regmap + IER); 118 iowrite32(card->ier_mask, card->regmap + IER);
@@ -115,6 +131,9 @@ static irqreturn_t rsxx_isr(int irq, void *pdata)
115 do { 131 do {
116 reread_isr = 0; 132 reread_isr = 0;
117 133
134 if (unlikely(card->eeh_state))
135 break;
136
118 isr = ioread32(card->regmap + ISR); 137 isr = ioread32(card->regmap + ISR);
119 if (isr == 0xffffffff) { 138 if (isr == 0xffffffff) {
120 /* 139 /*
@@ -161,9 +180,9 @@ static irqreturn_t rsxx_isr(int irq, void *pdata)
161} 180}
162 181
163/*----------------- Card Event Handler -------------------*/ 182/*----------------- Card Event Handler -------------------*/
164static char *rsxx_card_state_to_str(unsigned int state) 183static const char * const rsxx_card_state_to_str(unsigned int state)
165{ 184{
166 static char *state_strings[] = { 185 static const char * const state_strings[] = {
167 "Unknown", "Shutdown", "Starting", "Formatting", 186 "Unknown", "Shutdown", "Starting", "Formatting",
168 "Uninitialized", "Good", "Shutting Down", 187 "Uninitialized", "Good", "Shutting Down",
169 "Fault", "Read Only Fault", "dStroying" 188 "Fault", "Read Only Fault", "dStroying"
@@ -304,6 +323,192 @@ static int card_shutdown(struct rsxx_cardinfo *card)
304 return 0; 323 return 0;
305} 324}
306 325
326static int rsxx_eeh_frozen(struct pci_dev *dev)
327{
328 struct rsxx_cardinfo *card = pci_get_drvdata(dev);
329 int i;
330 int st;
331
332 dev_warn(&dev->dev, "IBM FlashSystem PCI: preparing for slot reset.\n");
333
334 card->eeh_state = 1;
335 rsxx_mask_interrupts(card);
336
337 /*
338 * We need to guarantee that the write for eeh_state and masking
339 * interrupts does not become reordered. This will prevent a possible
340 * race condition with the EEH code.
341 */
342 wmb();
343
344 pci_disable_device(dev);
345
346 st = rsxx_eeh_save_issued_dmas(card);
347 if (st)
348 return st;
349
350 rsxx_eeh_save_issued_creg(card);
351
352 for (i = 0; i < card->n_targets; i++) {
353 if (card->ctrl[i].status.buf)
354 pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
355 card->ctrl[i].status.buf,
356 card->ctrl[i].status.dma_addr);
357 if (card->ctrl[i].cmd.buf)
358 pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
359 card->ctrl[i].cmd.buf,
360 card->ctrl[i].cmd.dma_addr);
361 }
362
363 return 0;
364}
365
366static void rsxx_eeh_failure(struct pci_dev *dev)
367{
368 struct rsxx_cardinfo *card = pci_get_drvdata(dev);
369 int i;
370
371 dev_err(&dev->dev, "IBM FlashSystem PCI: disabling failed card.\n");
372
373 card->eeh_state = 1;
374
375 for (i = 0; i < card->n_targets; i++)
376 del_timer_sync(&card->ctrl[i].activity_timer);
377
378 rsxx_eeh_cancel_dmas(card);
379}
380
381static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card)
382{
383 unsigned int status;
384 int iter = 0;
385
386 /* We need to wait for the hardware to reset */
387 while (iter++ < 10) {
388 status = ioread32(card->regmap + PCI_RECONFIG);
389
390 if (status & RSXX_FLUSH_BUSY) {
391 ssleep(1);
392 continue;
393 }
394
395 if (status & RSXX_FLUSH_TIMEOUT)
396 dev_warn(CARD_TO_DEV(card), "HW: flash controller timeout\n");
397 return 0;
398 }
399
400 /* Hardware failed resetting itself. */
401 return -1;
402}
403
404static pci_ers_result_t rsxx_error_detected(struct pci_dev *dev,
405 enum pci_channel_state error)
406{
407 int st;
408
409 if (dev->revision < RSXX_EEH_SUPPORT)
410 return PCI_ERS_RESULT_NONE;
411
412 if (error == pci_channel_io_perm_failure) {
413 rsxx_eeh_failure(dev);
414 return PCI_ERS_RESULT_DISCONNECT;
415 }
416
417 st = rsxx_eeh_frozen(dev);
418 if (st) {
419 dev_err(&dev->dev, "Slot reset setup failed\n");
420 rsxx_eeh_failure(dev);
421 return PCI_ERS_RESULT_DISCONNECT;
422 }
423
424 return PCI_ERS_RESULT_NEED_RESET;
425}
426
427static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev)
428{
429 struct rsxx_cardinfo *card = pci_get_drvdata(dev);
430 unsigned long flags;
431 int i;
432 int st;
433
434 dev_warn(&dev->dev,
435 "IBM FlashSystem PCI: recovering from slot reset.\n");
436
437 st = pci_enable_device(dev);
438 if (st)
439 goto failed_hw_setup;
440
441 pci_set_master(dev);
442
443 st = rsxx_eeh_fifo_flush_poll(card);
444 if (st)
445 goto failed_hw_setup;
446
447 rsxx_dma_queue_reset(card);
448
449 for (i = 0; i < card->n_targets; i++) {
450 st = rsxx_hw_buffers_init(dev, &card->ctrl[i]);
451 if (st)
452 goto failed_hw_buffers_init;
453 }
454
455 if (card->config_valid)
456 rsxx_dma_configure(card);
457
458 /* Clears the ISR register from spurious interrupts */
459 st = ioread32(card->regmap + ISR);
460
461 card->eeh_state = 0;
462
463 st = rsxx_eeh_remap_dmas(card);
464 if (st)
465 goto failed_remap_dmas;
466
467 spin_lock_irqsave(&card->irq_lock, flags);
468 if (card->n_targets & RSXX_MAX_TARGETS)
469 rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G);
470 else
471 rsxx_enable_ier_and_isr(card, CR_INTR_ALL_C);
472 spin_unlock_irqrestore(&card->irq_lock, flags);
473
474 rsxx_kick_creg_queue(card);
475
476 for (i = 0; i < card->n_targets; i++) {
477 spin_lock(&card->ctrl[i].queue_lock);
478 if (list_empty(&card->ctrl[i].queue)) {
479 spin_unlock(&card->ctrl[i].queue_lock);
480 continue;
481 }
482 spin_unlock(&card->ctrl[i].queue_lock);
483
484 queue_work(card->ctrl[i].issue_wq,
485 &card->ctrl[i].issue_dma_work);
486 }
487
488 dev_info(&dev->dev, "IBM FlashSystem PCI: recovery complete.\n");
489
490 return PCI_ERS_RESULT_RECOVERED;
491
492failed_hw_buffers_init:
493failed_remap_dmas:
494 for (i = 0; i < card->n_targets; i++) {
495 if (card->ctrl[i].status.buf)
496 pci_free_consistent(card->dev,
497 STATUS_BUFFER_SIZE8,
498 card->ctrl[i].status.buf,
499 card->ctrl[i].status.dma_addr);
500 if (card->ctrl[i].cmd.buf)
501 pci_free_consistent(card->dev,
502 COMMAND_BUFFER_SIZE8,
503 card->ctrl[i].cmd.buf,
504 card->ctrl[i].cmd.dma_addr);
505 }
506failed_hw_setup:
507 rsxx_eeh_failure(dev);
508 return PCI_ERS_RESULT_DISCONNECT;
509
510}
511
307/*----------------- Driver Initialization & Setup -------------------*/ 512/*----------------- Driver Initialization & Setup -------------------*/
308/* Returns: 0 if the driver is compatible with the device 513/* Returns: 0 if the driver is compatible with the device
309 -1 if the driver is NOT compatible with the device */ 514 -1 if the driver is NOT compatible with the device */
@@ -383,6 +588,7 @@ static int rsxx_pci_probe(struct pci_dev *dev,
383 588
384 spin_lock_init(&card->irq_lock); 589 spin_lock_init(&card->irq_lock);
385 card->halt = 0; 590 card->halt = 0;
591 card->eeh_state = 0;
386 592
387 spin_lock_irq(&card->irq_lock); 593 spin_lock_irq(&card->irq_lock);
388 rsxx_disable_ier_and_isr(card, CR_INTR_ALL); 594 rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
@@ -538,9 +744,6 @@ static void rsxx_pci_remove(struct pci_dev *dev)
538 rsxx_disable_ier_and_isr(card, CR_INTR_EVENT); 744 rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
539 spin_unlock_irqrestore(&card->irq_lock, flags); 745 spin_unlock_irqrestore(&card->irq_lock, flags);
540 746
541 /* Prevent work_structs from re-queuing themselves. */
542 card->halt = 1;
543
544 cancel_work_sync(&card->event_work); 747 cancel_work_sync(&card->event_work);
545 748
546 rsxx_destroy_dev(card); 749 rsxx_destroy_dev(card);
@@ -549,6 +752,10 @@ static void rsxx_pci_remove(struct pci_dev *dev)
549 spin_lock_irqsave(&card->irq_lock, flags); 752 spin_lock_irqsave(&card->irq_lock, flags);
550 rsxx_disable_ier_and_isr(card, CR_INTR_ALL); 753 rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
551 spin_unlock_irqrestore(&card->irq_lock, flags); 754 spin_unlock_irqrestore(&card->irq_lock, flags);
755
756 /* Prevent work_structs from re-queuing themselves. */
757 card->halt = 1;
758
552 free_irq(dev->irq, card); 759 free_irq(dev->irq, card);
553 760
554 if (!force_legacy) 761 if (!force_legacy)
@@ -592,11 +799,14 @@ static void rsxx_pci_shutdown(struct pci_dev *dev)
592 card_shutdown(card); 799 card_shutdown(card);
593} 800}
594 801
802static const struct pci_error_handlers rsxx_err_handler = {
803 .error_detected = rsxx_error_detected,
804 .slot_reset = rsxx_slot_reset,
805};
806
595static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = { 807static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = {
596 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70_FLASH)}, 808 {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS70_FLASH)},
597 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70D_FLASH)}, 809 {PCI_DEVICE(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_FS80_FLASH)},
598 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS80_FLASH)},
599 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS81_FLASH)},
600 {0,}, 810 {0,},
601}; 811};
602 812
@@ -609,6 +819,7 @@ static struct pci_driver rsxx_pci_driver = {
609 .remove = rsxx_pci_remove, 819 .remove = rsxx_pci_remove,
610 .suspend = rsxx_pci_suspend, 820 .suspend = rsxx_pci_suspend,
611 .shutdown = rsxx_pci_shutdown, 821 .shutdown = rsxx_pci_shutdown,
822 .err_handler = &rsxx_err_handler,
612}; 823};
613 824
614static int __init rsxx_core_init(void) 825static int __init rsxx_core_init(void)
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c
index 80bbe639fccd..4b5c020a0a65 100644
--- a/drivers/block/rsxx/cregs.c
+++ b/drivers/block/rsxx/cregs.c
@@ -58,7 +58,7 @@ static struct kmem_cache *creg_cmd_pool;
58#error Unknown endianess!!! Aborting... 58#error Unknown endianess!!! Aborting...
59#endif 59#endif
60 60
61static void copy_to_creg_data(struct rsxx_cardinfo *card, 61static int copy_to_creg_data(struct rsxx_cardinfo *card,
62 int cnt8, 62 int cnt8,
63 void *buf, 63 void *buf,
64 unsigned int stream) 64 unsigned int stream)
@@ -66,6 +66,9 @@ static void copy_to_creg_data(struct rsxx_cardinfo *card,
66 int i = 0; 66 int i = 0;
67 u32 *data = buf; 67 u32 *data = buf;
68 68
69 if (unlikely(card->eeh_state))
70 return -EIO;
71
69 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) { 72 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
70 /* 73 /*
71 * Firmware implementation makes it necessary to byte swap on 74 * Firmware implementation makes it necessary to byte swap on
@@ -76,10 +79,12 @@ static void copy_to_creg_data(struct rsxx_cardinfo *card,
76 else 79 else
77 iowrite32(data[i], card->regmap + CREG_DATA(i)); 80 iowrite32(data[i], card->regmap + CREG_DATA(i));
78 } 81 }
82
83 return 0;
79} 84}
80 85
81 86
82static void copy_from_creg_data(struct rsxx_cardinfo *card, 87static int copy_from_creg_data(struct rsxx_cardinfo *card,
83 int cnt8, 88 int cnt8,
84 void *buf, 89 void *buf,
85 unsigned int stream) 90 unsigned int stream)
@@ -87,6 +92,9 @@ static void copy_from_creg_data(struct rsxx_cardinfo *card,
87 int i = 0; 92 int i = 0;
88 u32 *data = buf; 93 u32 *data = buf;
89 94
95 if (unlikely(card->eeh_state))
96 return -EIO;
97
90 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) { 98 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
91 /* 99 /*
92 * Firmware implementation makes it necessary to byte swap on 100 * Firmware implementation makes it necessary to byte swap on
@@ -97,41 +105,31 @@ static void copy_from_creg_data(struct rsxx_cardinfo *card,
97 else 105 else
98 data[i] = ioread32(card->regmap + CREG_DATA(i)); 106 data[i] = ioread32(card->regmap + CREG_DATA(i));
99 } 107 }
100}
101
102static struct creg_cmd *pop_active_cmd(struct rsxx_cardinfo *card)
103{
104 struct creg_cmd *cmd;
105 108
106 /* 109 return 0;
107 * Spin lock is needed because this can be called in atomic/interrupt
108 * context.
109 */
110 spin_lock_bh(&card->creg_ctrl.lock);
111 cmd = card->creg_ctrl.active_cmd;
112 card->creg_ctrl.active_cmd = NULL;
113 spin_unlock_bh(&card->creg_ctrl.lock);
114
115 return cmd;
116} 110}
117 111
118static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd) 112static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
119{ 113{
114 int st;
115
116 if (unlikely(card->eeh_state))
117 return;
118
120 iowrite32(cmd->addr, card->regmap + CREG_ADD); 119 iowrite32(cmd->addr, card->regmap + CREG_ADD);
121 iowrite32(cmd->cnt8, card->regmap + CREG_CNT); 120 iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
122 121
123 if (cmd->op == CREG_OP_WRITE) { 122 if (cmd->op == CREG_OP_WRITE) {
124 if (cmd->buf) 123 if (cmd->buf) {
125 copy_to_creg_data(card, cmd->cnt8, 124 st = copy_to_creg_data(card, cmd->cnt8,
126 cmd->buf, cmd->stream); 125 cmd->buf, cmd->stream);
126 if (st)
127 return;
128 }
127 } 129 }
128 130
129 /* 131 if (unlikely(card->eeh_state))
130 * Data copy must complete before initiating the command. This is 132 return;
131 * needed for weakly ordered processors (i.e. PowerPC), so that all
132 * neccessary registers are written before we kick the hardware.
133 */
134 wmb();
135 133
136 /* Setting the valid bit will kick off the command. */ 134 /* Setting the valid bit will kick off the command. */
137 iowrite32(cmd->op, card->regmap + CREG_CMD); 135 iowrite32(cmd->op, card->regmap + CREG_CMD);
@@ -196,11 +194,11 @@ static int creg_queue_cmd(struct rsxx_cardinfo *card,
196 cmd->cb_private = cb_private; 194 cmd->cb_private = cb_private;
197 cmd->status = 0; 195 cmd->status = 0;
198 196
199 spin_lock(&card->creg_ctrl.lock); 197 spin_lock_bh(&card->creg_ctrl.lock);
200 list_add_tail(&cmd->list, &card->creg_ctrl.queue); 198 list_add_tail(&cmd->list, &card->creg_ctrl.queue);
201 card->creg_ctrl.q_depth++; 199 card->creg_ctrl.q_depth++;
202 creg_kick_queue(card); 200 creg_kick_queue(card);
203 spin_unlock(&card->creg_ctrl.lock); 201 spin_unlock_bh(&card->creg_ctrl.lock);
204 202
205 return 0; 203 return 0;
206} 204}
@@ -210,7 +208,11 @@ static void creg_cmd_timed_out(unsigned long data)
210 struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data; 208 struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
211 struct creg_cmd *cmd; 209 struct creg_cmd *cmd;
212 210
213 cmd = pop_active_cmd(card); 211 spin_lock(&card->creg_ctrl.lock);
212 cmd = card->creg_ctrl.active_cmd;
213 card->creg_ctrl.active_cmd = NULL;
214 spin_unlock(&card->creg_ctrl.lock);
215
214 if (cmd == NULL) { 216 if (cmd == NULL) {
215 card->creg_ctrl.creg_stats.creg_timeout++; 217 card->creg_ctrl.creg_stats.creg_timeout++;
216 dev_warn(CARD_TO_DEV(card), 218 dev_warn(CARD_TO_DEV(card),
@@ -247,7 +249,11 @@ static void creg_cmd_done(struct work_struct *work)
247 if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0) 249 if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
248 card->creg_ctrl.creg_stats.failed_cancel_timer++; 250 card->creg_ctrl.creg_stats.failed_cancel_timer++;
249 251
250 cmd = pop_active_cmd(card); 252 spin_lock_bh(&card->creg_ctrl.lock);
253 cmd = card->creg_ctrl.active_cmd;
254 card->creg_ctrl.active_cmd = NULL;
255 spin_unlock_bh(&card->creg_ctrl.lock);
256
251 if (cmd == NULL) { 257 if (cmd == NULL) {
252 dev_err(CARD_TO_DEV(card), 258 dev_err(CARD_TO_DEV(card),
253 "Spurious creg interrupt!\n"); 259 "Spurious creg interrupt!\n");
@@ -287,7 +293,7 @@ static void creg_cmd_done(struct work_struct *work)
287 goto creg_done; 293 goto creg_done;
288 } 294 }
289 295
290 copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream); 296 st = copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
291 } 297 }
292 298
293creg_done: 299creg_done:
@@ -296,10 +302,10 @@ creg_done:
296 302
297 kmem_cache_free(creg_cmd_pool, cmd); 303 kmem_cache_free(creg_cmd_pool, cmd);
298 304
299 spin_lock(&card->creg_ctrl.lock); 305 spin_lock_bh(&card->creg_ctrl.lock);
300 card->creg_ctrl.active = 0; 306 card->creg_ctrl.active = 0;
301 creg_kick_queue(card); 307 creg_kick_queue(card);
302 spin_unlock(&card->creg_ctrl.lock); 308 spin_unlock_bh(&card->creg_ctrl.lock);
303} 309}
304 310
305static void creg_reset(struct rsxx_cardinfo *card) 311static void creg_reset(struct rsxx_cardinfo *card)
@@ -324,7 +330,7 @@ static void creg_reset(struct rsxx_cardinfo *card)
324 "Resetting creg interface for recovery\n"); 330 "Resetting creg interface for recovery\n");
325 331
326 /* Cancel outstanding commands */ 332 /* Cancel outstanding commands */
327 spin_lock(&card->creg_ctrl.lock); 333 spin_lock_bh(&card->creg_ctrl.lock);
328 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) { 334 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
329 list_del(&cmd->list); 335 list_del(&cmd->list);
330 card->creg_ctrl.q_depth--; 336 card->creg_ctrl.q_depth--;
@@ -345,7 +351,7 @@ static void creg_reset(struct rsxx_cardinfo *card)
345 351
346 card->creg_ctrl.active = 0; 352 card->creg_ctrl.active = 0;
347 } 353 }
348 spin_unlock(&card->creg_ctrl.lock); 354 spin_unlock_bh(&card->creg_ctrl.lock);
349 355
350 card->creg_ctrl.reset = 0; 356 card->creg_ctrl.reset = 0;
351 spin_lock_irqsave(&card->irq_lock, flags); 357 spin_lock_irqsave(&card->irq_lock, flags);
@@ -399,12 +405,12 @@ static int __issue_creg_rw(struct rsxx_cardinfo *card,
399 return st; 405 return st;
400 406
401 /* 407 /*
402 * This timeout is neccessary for unresponsive hardware. The additional 408 * This timeout is necessary for unresponsive hardware. The additional
403 * 20 seconds to used to guarantee that each cregs requests has time to 409 * 20 seconds to used to guarantee that each cregs requests has time to
404 * complete. 410 * complete.
405 */ 411 */
406 timeout = msecs_to_jiffies((CREG_TIMEOUT_MSEC * 412 timeout = msecs_to_jiffies(CREG_TIMEOUT_MSEC *
407 card->creg_ctrl.q_depth) + 20000); 413 card->creg_ctrl.q_depth + 20000);
408 414
409 /* 415 /*
410 * The creg interface is guaranteed to complete. It has a timeout 416 * The creg interface is guaranteed to complete. It has a timeout
@@ -690,6 +696,32 @@ int rsxx_reg_access(struct rsxx_cardinfo *card,
690 return 0; 696 return 0;
691} 697}
692 698
699void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card)
700{
701 struct creg_cmd *cmd = NULL;
702
703 cmd = card->creg_ctrl.active_cmd;
704 card->creg_ctrl.active_cmd = NULL;
705
706 if (cmd) {
707 del_timer_sync(&card->creg_ctrl.cmd_timer);
708
709 spin_lock_bh(&card->creg_ctrl.lock);
710 list_add(&cmd->list, &card->creg_ctrl.queue);
711 card->creg_ctrl.q_depth++;
712 card->creg_ctrl.active = 0;
713 spin_unlock_bh(&card->creg_ctrl.lock);
714 }
715}
716
717void rsxx_kick_creg_queue(struct rsxx_cardinfo *card)
718{
719 spin_lock_bh(&card->creg_ctrl.lock);
720 if (!list_empty(&card->creg_ctrl.queue))
721 creg_kick_queue(card);
722 spin_unlock_bh(&card->creg_ctrl.lock);
723}
724
693/*------------ Initialization & Setup --------------*/ 725/*------------ Initialization & Setup --------------*/
694int rsxx_creg_setup(struct rsxx_cardinfo *card) 726int rsxx_creg_setup(struct rsxx_cardinfo *card)
695{ 727{
@@ -712,7 +744,7 @@ void rsxx_creg_destroy(struct rsxx_cardinfo *card)
712 int cnt = 0; 744 int cnt = 0;
713 745
714 /* Cancel outstanding commands */ 746 /* Cancel outstanding commands */
715 spin_lock(&card->creg_ctrl.lock); 747 spin_lock_bh(&card->creg_ctrl.lock);
716 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) { 748 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
717 list_del(&cmd->list); 749 list_del(&cmd->list);
718 if (cmd->cb) 750 if (cmd->cb)
@@ -737,7 +769,7 @@ void rsxx_creg_destroy(struct rsxx_cardinfo *card)
737 "Canceled active creg command\n"); 769 "Canceled active creg command\n");
738 kmem_cache_free(creg_cmd_pool, cmd); 770 kmem_cache_free(creg_cmd_pool, cmd);
739 } 771 }
740 spin_unlock(&card->creg_ctrl.lock); 772 spin_unlock_bh(&card->creg_ctrl.lock);
741 773
742 cancel_work_sync(&card->creg_ctrl.done_work); 774 cancel_work_sync(&card->creg_ctrl.done_work);
743} 775}
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index 63176e67662f..0607513cfb41 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -28,7 +28,7 @@
28struct rsxx_dma { 28struct rsxx_dma {
29 struct list_head list; 29 struct list_head list;
30 u8 cmd; 30 u8 cmd;
31 unsigned int laddr; /* Logical address on the ramsan */ 31 unsigned int laddr; /* Logical address */
32 struct { 32 struct {
33 u32 off; 33 u32 off;
34 u32 cnt; 34 u32 cnt;
@@ -81,9 +81,6 @@ enum rsxx_hw_status {
81 HW_STATUS_FAULT = 0x08, 81 HW_STATUS_FAULT = 0x08,
82}; 82};
83 83
84#define STATUS_BUFFER_SIZE8 4096
85#define COMMAND_BUFFER_SIZE8 4096
86
87static struct kmem_cache *rsxx_dma_pool; 84static struct kmem_cache *rsxx_dma_pool;
88 85
89struct dma_tracker { 86struct dma_tracker {
@@ -122,7 +119,7 @@ static unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8)
122 return tgt; 119 return tgt;
123} 120}
124 121
125static void rsxx_dma_queue_reset(struct rsxx_cardinfo *card) 122void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
126{ 123{
127 /* Reset all DMA Command/Status Queues */ 124 /* Reset all DMA Command/Status Queues */
128 iowrite32(DMA_QUEUE_RESET, card->regmap + RESET); 125 iowrite32(DMA_QUEUE_RESET, card->regmap + RESET);
@@ -210,7 +207,8 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
210 u32 q_depth = 0; 207 u32 q_depth = 0;
211 u32 intr_coal; 208 u32 intr_coal;
212 209
213 if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE) 210 if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE ||
211 unlikely(card->eeh_state))
214 return; 212 return;
215 213
216 for (i = 0; i < card->n_targets; i++) 214 for (i = 0; i < card->n_targets; i++)
@@ -223,31 +221,26 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
223} 221}
224 222
225/*----------------- RSXX DMA Handling -------------------*/ 223/*----------------- RSXX DMA Handling -------------------*/
226static void rsxx_complete_dma(struct rsxx_cardinfo *card, 224static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl,
227 struct rsxx_dma *dma, 225 struct rsxx_dma *dma,
228 unsigned int status) 226 unsigned int status)
229{ 227{
230 if (status & DMA_SW_ERR) 228 if (status & DMA_SW_ERR)
231 printk_ratelimited(KERN_ERR 229 ctrl->stats.dma_sw_err++;
232 "SW Error in DMA(cmd x%02x, laddr x%08x)\n",
233 dma->cmd, dma->laddr);
234 if (status & DMA_HW_FAULT) 230 if (status & DMA_HW_FAULT)
235 printk_ratelimited(KERN_ERR 231 ctrl->stats.dma_hw_fault++;
236 "HW Fault in DMA(cmd x%02x, laddr x%08x)\n",
237 dma->cmd, dma->laddr);
238 if (status & DMA_CANCELLED) 232 if (status & DMA_CANCELLED)
239 printk_ratelimited(KERN_ERR 233 ctrl->stats.dma_cancelled++;
240 "DMA Cancelled(cmd x%02x, laddr x%08x)\n",
241 dma->cmd, dma->laddr);
242 234
243 if (dma->dma_addr) 235 if (dma->dma_addr)
244 pci_unmap_page(card->dev, dma->dma_addr, get_dma_size(dma), 236 pci_unmap_page(ctrl->card->dev, dma->dma_addr,
237 get_dma_size(dma),
245 dma->cmd == HW_CMD_BLK_WRITE ? 238 dma->cmd == HW_CMD_BLK_WRITE ?
246 PCI_DMA_TODEVICE : 239 PCI_DMA_TODEVICE :
247 PCI_DMA_FROMDEVICE); 240 PCI_DMA_FROMDEVICE);
248 241
249 if (dma->cb) 242 if (dma->cb)
250 dma->cb(card, dma->cb_data, status ? 1 : 0); 243 dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0);
251 244
252 kmem_cache_free(rsxx_dma_pool, dma); 245 kmem_cache_free(rsxx_dma_pool, dma);
253} 246}
@@ -330,14 +323,15 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
330 if (requeue_cmd) 323 if (requeue_cmd)
331 rsxx_requeue_dma(ctrl, dma); 324 rsxx_requeue_dma(ctrl, dma);
332 else 325 else
333 rsxx_complete_dma(ctrl->card, dma, status); 326 rsxx_complete_dma(ctrl, dma, status);
334} 327}
335 328
336static void dma_engine_stalled(unsigned long data) 329static void dma_engine_stalled(unsigned long data)
337{ 330{
338 struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; 331 struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
339 332
340 if (atomic_read(&ctrl->stats.hw_q_depth) == 0) 333 if (atomic_read(&ctrl->stats.hw_q_depth) == 0 ||
334 unlikely(ctrl->card->eeh_state))
341 return; 335 return;
342 336
343 if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) { 337 if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) {
@@ -369,7 +363,8 @@ static void rsxx_issue_dmas(struct work_struct *work)
369 ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); 363 ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
370 hw_cmd_buf = ctrl->cmd.buf; 364 hw_cmd_buf = ctrl->cmd.buf;
371 365
372 if (unlikely(ctrl->card->halt)) 366 if (unlikely(ctrl->card->halt) ||
367 unlikely(ctrl->card->eeh_state))
373 return; 368 return;
374 369
375 while (1) { 370 while (1) {
@@ -397,7 +392,7 @@ static void rsxx_issue_dmas(struct work_struct *work)
397 */ 392 */
398 if (unlikely(ctrl->card->dma_fault)) { 393 if (unlikely(ctrl->card->dma_fault)) {
399 push_tracker(ctrl->trackers, tag); 394 push_tracker(ctrl->trackers, tag);
400 rsxx_complete_dma(ctrl->card, dma, DMA_CANCELLED); 395 rsxx_complete_dma(ctrl, dma, DMA_CANCELLED);
401 continue; 396 continue;
402 } 397 }
403 398
@@ -432,19 +427,15 @@ static void rsxx_issue_dmas(struct work_struct *work)
432 427
433 /* Let HW know we've queued commands. */ 428 /* Let HW know we've queued commands. */
434 if (cmds_pending) { 429 if (cmds_pending) {
435 /*
436 * We must guarantee that the CPU writes to 'ctrl->cmd.buf'
437 * (which is in PCI-consistent system-memory) from the loop
438 * above make it into the coherency domain before the
439 * following PIO "trigger" updating the cmd.idx. A WMB is
440 * sufficient. We need not explicitly CPU cache-flush since
441 * the memory is a PCI-consistent (ie; coherent) mapping.
442 */
443 wmb();
444
445 atomic_add(cmds_pending, &ctrl->stats.hw_q_depth); 430 atomic_add(cmds_pending, &ctrl->stats.hw_q_depth);
446 mod_timer(&ctrl->activity_timer, 431 mod_timer(&ctrl->activity_timer,
447 jiffies + DMA_ACTIVITY_TIMEOUT); 432 jiffies + DMA_ACTIVITY_TIMEOUT);
433
434 if (unlikely(ctrl->card->eeh_state)) {
435 del_timer_sync(&ctrl->activity_timer);
436 return;
437 }
438
448 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX); 439 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
449 } 440 }
450} 441}
@@ -463,7 +454,8 @@ static void rsxx_dma_done(struct work_struct *work)
463 hw_st_buf = ctrl->status.buf; 454 hw_st_buf = ctrl->status.buf;
464 455
465 if (unlikely(ctrl->card->halt) || 456 if (unlikely(ctrl->card->halt) ||
466 unlikely(ctrl->card->dma_fault)) 457 unlikely(ctrl->card->dma_fault) ||
458 unlikely(ctrl->card->eeh_state))
467 return; 459 return;
468 460
469 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count); 461 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
@@ -508,7 +500,7 @@ static void rsxx_dma_done(struct work_struct *work)
508 if (status) 500 if (status)
509 rsxx_handle_dma_error(ctrl, dma, status); 501 rsxx_handle_dma_error(ctrl, dma, status);
510 else 502 else
511 rsxx_complete_dma(ctrl->card, dma, 0); 503 rsxx_complete_dma(ctrl, dma, 0);
512 504
513 push_tracker(ctrl->trackers, tag); 505 push_tracker(ctrl->trackers, tag);
514 506
@@ -727,20 +719,54 @@ bvec_err:
727 719
728 720
729/*----------------- DMA Engine Initialization & Setup -------------------*/ 721/*----------------- DMA Engine Initialization & Setup -------------------*/
722int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl)
723{
724 ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
725 &ctrl->status.dma_addr);
726 ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
727 &ctrl->cmd.dma_addr);
728 if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
729 return -ENOMEM;
730
731 memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
732 iowrite32(lower_32_bits(ctrl->status.dma_addr),
733 ctrl->regmap + SB_ADD_LO);
734 iowrite32(upper_32_bits(ctrl->status.dma_addr),
735 ctrl->regmap + SB_ADD_HI);
736
737 memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
738 iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
739 iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
740
741 ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
742 if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
743 dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
744 ctrl->status.idx);
745 return -EINVAL;
746 }
747 iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
748 iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
749
750 ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
751 if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
752 dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
753 ctrl->status.idx);
754 return -EINVAL;
755 }
756 iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
757 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
758
759 return 0;
760}
761
730static int rsxx_dma_ctrl_init(struct pci_dev *dev, 762static int rsxx_dma_ctrl_init(struct pci_dev *dev,
731 struct rsxx_dma_ctrl *ctrl) 763 struct rsxx_dma_ctrl *ctrl)
732{ 764{
733 int i; 765 int i;
766 int st;
734 767
735 memset(&ctrl->stats, 0, sizeof(ctrl->stats)); 768 memset(&ctrl->stats, 0, sizeof(ctrl->stats));
736 769
737 ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
738 &ctrl->status.dma_addr);
739 ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
740 &ctrl->cmd.dma_addr);
741 if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
742 return -ENOMEM;
743
744 ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8); 770 ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8);
745 if (!ctrl->trackers) 771 if (!ctrl->trackers)
746 return -ENOMEM; 772 return -ENOMEM;
@@ -770,35 +796,9 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev,
770 INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas); 796 INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas);
771 INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done); 797 INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done);
772 798
773 memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8); 799 st = rsxx_hw_buffers_init(dev, ctrl);
774 iowrite32(lower_32_bits(ctrl->status.dma_addr), 800 if (st)
775 ctrl->regmap + SB_ADD_LO); 801 return st;
776 iowrite32(upper_32_bits(ctrl->status.dma_addr),
777 ctrl->regmap + SB_ADD_HI);
778
779 memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
780 iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
781 iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
782
783 ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
784 if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
785 dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
786 ctrl->status.idx);
787 return -EINVAL;
788 }
789 iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
790 iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
791
792 ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
793 if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
794 dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
795 ctrl->status.idx);
796 return -EINVAL;
797 }
798 iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
799 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
800
801 wmb();
802 802
803 return 0; 803 return 0;
804} 804}
@@ -834,7 +834,7 @@ static int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
834 return 0; 834 return 0;
835} 835}
836 836
837static int rsxx_dma_configure(struct rsxx_cardinfo *card) 837int rsxx_dma_configure(struct rsxx_cardinfo *card)
838{ 838{
839 u32 intr_coal; 839 u32 intr_coal;
840 840
@@ -980,6 +980,103 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card)
980 } 980 }
981} 981}
982 982
983int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card)
984{
985 int i;
986 int j;
987 int cnt;
988 struct rsxx_dma *dma;
989 struct list_head *issued_dmas;
990
991 issued_dmas = kzalloc(sizeof(*issued_dmas) * card->n_targets,
992 GFP_KERNEL);
993 if (!issued_dmas)
994 return -ENOMEM;
995
996 for (i = 0; i < card->n_targets; i++) {
997 INIT_LIST_HEAD(&issued_dmas[i]);
998 cnt = 0;
999 for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
1000 dma = get_tracker_dma(card->ctrl[i].trackers, j);
1001 if (dma == NULL)
1002 continue;
1003
1004 if (dma->cmd == HW_CMD_BLK_WRITE)
1005 card->ctrl[i].stats.writes_issued--;
1006 else if (dma->cmd == HW_CMD_BLK_DISCARD)
1007 card->ctrl[i].stats.discards_issued--;
1008 else
1009 card->ctrl[i].stats.reads_issued--;
1010
1011 list_add_tail(&dma->list, &issued_dmas[i]);
1012 push_tracker(card->ctrl[i].trackers, j);
1013 cnt++;
1014 }
1015
1016 spin_lock(&card->ctrl[i].queue_lock);
1017 list_splice(&issued_dmas[i], &card->ctrl[i].queue);
1018
1019 atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth);
1020 card->ctrl[i].stats.sw_q_depth += cnt;
1021 card->ctrl[i].e_cnt = 0;
1022
1023 list_for_each_entry(dma, &card->ctrl[i].queue, list) {
1024 if (dma->dma_addr)
1025 pci_unmap_page(card->dev, dma->dma_addr,
1026 get_dma_size(dma),
1027 dma->cmd == HW_CMD_BLK_WRITE ?
1028 PCI_DMA_TODEVICE :
1029 PCI_DMA_FROMDEVICE);
1030 }
1031 spin_unlock(&card->ctrl[i].queue_lock);
1032 }
1033
1034 kfree(issued_dmas);
1035
1036 return 0;
1037}
1038
1039void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card)
1040{
1041 struct rsxx_dma *dma;
1042 struct rsxx_dma *tmp;
1043 int i;
1044
1045 for (i = 0; i < card->n_targets; i++) {
1046 spin_lock(&card->ctrl[i].queue_lock);
1047 list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) {
1048 list_del(&dma->list);
1049
1050 rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED);
1051 }
1052 spin_unlock(&card->ctrl[i].queue_lock);
1053 }
1054}
1055
1056int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card)
1057{
1058 struct rsxx_dma *dma;
1059 int i;
1060
1061 for (i = 0; i < card->n_targets; i++) {
1062 spin_lock(&card->ctrl[i].queue_lock);
1063 list_for_each_entry(dma, &card->ctrl[i].queue, list) {
1064 dma->dma_addr = pci_map_page(card->dev, dma->page,
1065 dma->pg_off, get_dma_size(dma),
1066 dma->cmd == HW_CMD_BLK_WRITE ?
1067 PCI_DMA_TODEVICE :
1068 PCI_DMA_FROMDEVICE);
1069 if (!dma->dma_addr) {
1070 spin_unlock(&card->ctrl[i].queue_lock);
1071 kmem_cache_free(rsxx_dma_pool, dma);
1072 return -ENOMEM;
1073 }
1074 }
1075 spin_unlock(&card->ctrl[i].queue_lock);
1076 }
1077
1078 return 0;
1079}
983 1080
984int rsxx_dma_init(void) 1081int rsxx_dma_init(void)
985{ 1082{
diff --git a/drivers/block/rsxx/rsxx.h b/drivers/block/rsxx/rsxx.h
index 2e50b65902b7..24ba3642bd89 100644
--- a/drivers/block/rsxx/rsxx.h
+++ b/drivers/block/rsxx/rsxx.h
@@ -27,15 +27,17 @@
27 27
28/*----------------- IOCTL Definitions -------------------*/ 28/*----------------- IOCTL Definitions -------------------*/
29 29
30#define RSXX_MAX_DATA 8
31
30struct rsxx_reg_access { 32struct rsxx_reg_access {
31 __u32 addr; 33 __u32 addr;
32 __u32 cnt; 34 __u32 cnt;
33 __u32 stat; 35 __u32 stat;
34 __u32 stream; 36 __u32 stream;
35 __u32 data[8]; 37 __u32 data[RSXX_MAX_DATA];
36}; 38};
37 39
38#define RSXX_MAX_REG_CNT (8 * (sizeof(__u32))) 40#define RSXX_MAX_REG_CNT (RSXX_MAX_DATA * (sizeof(__u32)))
39 41
40#define RSXX_IOC_MAGIC 'r' 42#define RSXX_IOC_MAGIC 'r'
41 43
diff --git a/drivers/block/rsxx/rsxx_cfg.h b/drivers/block/rsxx/rsxx_cfg.h
index c025fe5fdb70..f384c943846d 100644
--- a/drivers/block/rsxx/rsxx_cfg.h
+++ b/drivers/block/rsxx/rsxx_cfg.h
@@ -58,7 +58,7 @@ struct rsxx_card_cfg {
58}; 58};
59 59
60/* Vendor ID Values */ 60/* Vendor ID Values */
61#define RSXX_VENDOR_ID_TMS_IBM 0 61#define RSXX_VENDOR_ID_IBM 0
62#define RSXX_VENDOR_ID_DSI 1 62#define RSXX_VENDOR_ID_DSI 1
63#define RSXX_VENDOR_COUNT 2 63#define RSXX_VENDOR_COUNT 2
64 64
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h
index a1ac907d8f4c..382e8bf5c03b 100644
--- a/drivers/block/rsxx/rsxx_priv.h
+++ b/drivers/block/rsxx/rsxx_priv.h
@@ -45,16 +45,13 @@
45 45
46struct proc_cmd; 46struct proc_cmd;
47 47
48#define PCI_VENDOR_ID_TMS_IBM 0x15B6 48#define PCI_DEVICE_ID_FS70_FLASH 0x04A9
49#define PCI_DEVICE_ID_RS70_FLASH 0x0019 49#define PCI_DEVICE_ID_FS80_FLASH 0x04AA
50#define PCI_DEVICE_ID_RS70D_FLASH 0x001A
51#define PCI_DEVICE_ID_RS80_FLASH 0x001C
52#define PCI_DEVICE_ID_RS81_FLASH 0x001E
53 50
54#define RS70_PCI_REV_SUPPORTED 4 51#define RS70_PCI_REV_SUPPORTED 4
55 52
56#define DRIVER_NAME "rsxx" 53#define DRIVER_NAME "rsxx"
57#define DRIVER_VERSION "3.7" 54#define DRIVER_VERSION "4.0"
58 55
59/* Block size is 4096 */ 56/* Block size is 4096 */
60#define RSXX_HW_BLK_SHIFT 12 57#define RSXX_HW_BLK_SHIFT 12
@@ -67,6 +64,9 @@ struct proc_cmd;
67#define RSXX_MAX_OUTSTANDING_CMDS 255 64#define RSXX_MAX_OUTSTANDING_CMDS 255
68#define RSXX_CS_IDX_MASK 0xff 65#define RSXX_CS_IDX_MASK 0xff
69 66
67#define STATUS_BUFFER_SIZE8 4096
68#define COMMAND_BUFFER_SIZE8 4096
69
70#define RSXX_MAX_TARGETS 8 70#define RSXX_MAX_TARGETS 8
71 71
72struct dma_tracker_list; 72struct dma_tracker_list;
@@ -91,6 +91,9 @@ struct rsxx_dma_stats {
91 u32 discards_failed; 91 u32 discards_failed;
92 u32 done_rescheduled; 92 u32 done_rescheduled;
93 u32 issue_rescheduled; 93 u32 issue_rescheduled;
94 u32 dma_sw_err;
95 u32 dma_hw_fault;
96 u32 dma_cancelled;
94 u32 sw_q_depth; /* Number of DMAs on the SW queue. */ 97 u32 sw_q_depth; /* Number of DMAs on the SW queue. */
95 atomic_t hw_q_depth; /* Number of DMAs queued to HW. */ 98 atomic_t hw_q_depth; /* Number of DMAs queued to HW. */
96}; 99};
@@ -116,6 +119,7 @@ struct rsxx_dma_ctrl {
116struct rsxx_cardinfo { 119struct rsxx_cardinfo {
117 struct pci_dev *dev; 120 struct pci_dev *dev;
118 unsigned int halt; 121 unsigned int halt;
122 unsigned int eeh_state;
119 123
120 void __iomem *regmap; 124 void __iomem *regmap;
121 spinlock_t irq_lock; 125 spinlock_t irq_lock;
@@ -224,6 +228,7 @@ enum rsxx_pci_regmap {
224 PERF_RD512_HI = 0xac, 228 PERF_RD512_HI = 0xac,
225 PERF_WR512_LO = 0xb0, 229 PERF_WR512_LO = 0xb0,
226 PERF_WR512_HI = 0xb4, 230 PERF_WR512_HI = 0xb4,
231 PCI_RECONFIG = 0xb8,
227}; 232};
228 233
229enum rsxx_intr { 234enum rsxx_intr {
@@ -237,6 +242,8 @@ enum rsxx_intr {
237 CR_INTR_DMA5 = 0x00000080, 242 CR_INTR_DMA5 = 0x00000080,
238 CR_INTR_DMA6 = 0x00000100, 243 CR_INTR_DMA6 = 0x00000100,
239 CR_INTR_DMA7 = 0x00000200, 244 CR_INTR_DMA7 = 0x00000200,
245 CR_INTR_ALL_C = 0x0000003f,
246 CR_INTR_ALL_G = 0x000003ff,
240 CR_INTR_DMA_ALL = 0x000003f5, 247 CR_INTR_DMA_ALL = 0x000003f5,
241 CR_INTR_ALL = 0xffffffff, 248 CR_INTR_ALL = 0xffffffff,
242}; 249};
@@ -253,8 +260,14 @@ enum rsxx_pci_reset {
253 DMA_QUEUE_RESET = 0x00000001, 260 DMA_QUEUE_RESET = 0x00000001,
254}; 261};
255 262
263enum rsxx_hw_fifo_flush {
264 RSXX_FLUSH_BUSY = 0x00000002,
265 RSXX_FLUSH_TIMEOUT = 0x00000004,
266};
267
256enum rsxx_pci_revision { 268enum rsxx_pci_revision {
257 RSXX_DISCARD_SUPPORT = 2, 269 RSXX_DISCARD_SUPPORT = 2,
270 RSXX_EEH_SUPPORT = 3,
258}; 271};
259 272
260enum rsxx_creg_cmd { 273enum rsxx_creg_cmd {
@@ -360,11 +373,17 @@ int rsxx_dma_setup(struct rsxx_cardinfo *card);
360void rsxx_dma_destroy(struct rsxx_cardinfo *card); 373void rsxx_dma_destroy(struct rsxx_cardinfo *card);
361int rsxx_dma_init(void); 374int rsxx_dma_init(void);
362void rsxx_dma_cleanup(void); 375void rsxx_dma_cleanup(void);
376void rsxx_dma_queue_reset(struct rsxx_cardinfo *card);
377int rsxx_dma_configure(struct rsxx_cardinfo *card);
363int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, 378int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
364 struct bio *bio, 379 struct bio *bio,
365 atomic_t *n_dmas, 380 atomic_t *n_dmas,
366 rsxx_dma_cb cb, 381 rsxx_dma_cb cb,
367 void *cb_data); 382 void *cb_data);
383int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl);
384int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card);
385void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card);
386int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card);
368 387
369/***** cregs.c *****/ 388/***** cregs.c *****/
370int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr, 389int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr,
@@ -389,10 +408,11 @@ int rsxx_creg_setup(struct rsxx_cardinfo *card);
389void rsxx_creg_destroy(struct rsxx_cardinfo *card); 408void rsxx_creg_destroy(struct rsxx_cardinfo *card);
390int rsxx_creg_init(void); 409int rsxx_creg_init(void);
391void rsxx_creg_cleanup(void); 410void rsxx_creg_cleanup(void);
392
393int rsxx_reg_access(struct rsxx_cardinfo *card, 411int rsxx_reg_access(struct rsxx_cardinfo *card,
394 struct rsxx_reg_access __user *ucmd, 412 struct rsxx_reg_access __user *ucmd,
395 int read); 413 int read);
414void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card);
415void rsxx_kick_creg_queue(struct rsxx_cardinfo *card);
396 416
397 417
398 418