aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorjosh.h.morris@us.ibm.com <josh.h.morris@us.ibm.com>2013-02-05 08:15:02 -0500
committerJens Axboe <axboe@kernel.dk>2013-02-05 08:16:05 -0500
commit8722ff8cdbfac9c1b20e67bb067b455c48cb8e93 (patch)
tree5b85a6366f1337d3d56d67a76755e82e11a8b324 /drivers
parent478c030eecbec927d62561c5f48a4515ea0fa21a (diff)
block: IBM RamSan 70/80 device driver
This patch includes the device driver for the IBM RamSan family of PCI SSD flash storage cards. This driver will include support for the RamSan 70 and 80. The driver presents a block device for device I/O. Signed-off-by: Philip J Kelleher <pjk1939@linux.vnet.ibm.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/Kconfig10
-rw-r--r--drivers/block/Makefile2
-rw-r--r--drivers/block/rsxx/Makefile2
-rw-r--r--drivers/block/rsxx/config.c213
-rw-r--r--drivers/block/rsxx/core.c651
-rw-r--r--drivers/block/rsxx/cregs.c743
-rw-r--r--drivers/block/rsxx/dev.c367
-rw-r--r--drivers/block/rsxx/dma.c997
-rw-r--r--drivers/block/rsxx/rsxx.h43
-rw-r--r--drivers/block/rsxx/rsxx_cfg.h72
-rw-r--r--drivers/block/rsxx/rsxx_priv.h408
11 files changed, 3508 insertions, 0 deletions
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 824e09c4d0d7..94f587454385 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -544,4 +544,14 @@ config BLK_DEV_RBD
544 544
545 If unsure, say N. 545 If unsure, say N.
546 546
547config BLK_DEV_RSXX
548 tristate "RamSam PCIe Flash SSD Device Driver"
549 depends on PCI
550 help
551 Device driver for IBM's high speed PCIe SSD
552 storage devices: RamSan-70 and RamSan-80.
553
554 To compile this driver as a module, choose M here: the
555 module will be called rsxx.
556
547endif # BLK_DEV 557endif # BLK_DEV
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 17e82df3df74..9473b0b8788f 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -41,4 +41,6 @@ obj-$(CONFIG_BLK_DEV_DRBD) += drbd/
41obj-$(CONFIG_BLK_DEV_RBD) += rbd.o 41obj-$(CONFIG_BLK_DEV_RBD) += rbd.o
42obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/ 42obj-$(CONFIG_BLK_DEV_PCIESSD_MTIP32XX) += mtip32xx/
43 43
44obj-$(CONFIG_BLK_DEV_RSXX) += rsxx/
45
44swim_mod-y := swim.o swim_asm.o 46swim_mod-y := swim.o swim_asm.o
diff --git a/drivers/block/rsxx/Makefile b/drivers/block/rsxx/Makefile
new file mode 100644
index 000000000000..f35cd0b71f7b
--- /dev/null
+++ b/drivers/block/rsxx/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_BLK_DEV_RSXX) += rsxx.o
2rsxx-y := config.o core.o cregs.o dev.o dma.o
diff --git a/drivers/block/rsxx/config.c b/drivers/block/rsxx/config.c
new file mode 100644
index 000000000000..c8829cd4db11
--- /dev/null
+++ b/drivers/block/rsxx/config.c
@@ -0,0 +1,213 @@
1/*
2* Filename: config.c
3*
4*
5* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
7*
8* (C) Copyright 2013 IBM Corporation
9*
10* This program is free software; you can redistribute it and/or
11* modify it under the terms of the GNU General Public License as
12* published by the Free Software Foundation; either version 2 of the
13* License, or (at your option) any later version.
14*
15* This program is distributed in the hope that it will be useful, but
16* WITHOUT ANY WARRANTY; without even the implied warranty of
17* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18* General Public License for more details.
19*
20* You should have received a copy of the GNU General Public License
21* along with this program; if not, write to the Free Software Foundation,
22* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23*/
24
25#include <linux/types.h>
26#include <linux/crc32.h>
27#include <linux/swab.h>
28
29#include "rsxx_priv.h"
30#include "rsxx_cfg.h"
31
32static void initialize_config(void *config)
33{
34 struct rsxx_card_cfg *cfg = (struct rsxx_card_cfg *) config;
35
36 cfg->hdr.version = RSXX_CFG_VERSION;
37
38 cfg->data.block_size = RSXX_HW_BLK_SIZE;
39 cfg->data.stripe_size = RSXX_HW_BLK_SIZE;
40 cfg->data.vendor_id = RSXX_VENDOR_ID_TMS_IBM;
41 cfg->data.cache_order = (-1);
42 cfg->data.intr_coal.mode = RSXX_INTR_COAL_DISABLED;
43 cfg->data.intr_coal.count = 0;
44 cfg->data.intr_coal.latency = 0;
45}
46
47static u32 config_data_crc32(struct rsxx_card_cfg *cfg)
48{
49 /*
50 * Return the compliment of the CRC to ensure compatibility
51 * (i.e. this is how early rsxx drivers did it.)
52 */
53
54 return ~crc32(~0, &cfg->data, sizeof(cfg->data));
55}
56
57
58/*----------------- Config Byte Swap Functions -------------------*/
59static void config_hdr_be_to_cpu(struct card_cfg_hdr *hdr)
60{
61 hdr->version = be32_to_cpu((__force __be32) hdr->version);
62 hdr->crc = be32_to_cpu((__force __be32) hdr->crc);
63}
64
65static void config_hdr_cpu_to_be(struct card_cfg_hdr *hdr)
66{
67 hdr->version = (__force u32) cpu_to_be32(hdr->version);
68 hdr->crc = (__force u32) cpu_to_be32(hdr->crc);
69}
70
71static void config_data_swab(struct rsxx_card_cfg *cfg)
72{
73 u32 *data = (u32 *) &cfg->data;
74 int i;
75
76 for (i = 0; i < (sizeof(cfg->data) / 4); i++)
77 data[i] = swab32(data[i]);
78}
79
80static void config_data_le_to_cpu(struct rsxx_card_cfg *cfg)
81{
82 u32 *data = (u32 *) &cfg->data;
83 int i;
84
85 for (i = 0; i < (sizeof(cfg->data) / 4); i++)
86 data[i] = le32_to_cpu((__force __le32) data[i]);
87}
88
89static void config_data_cpu_to_le(struct rsxx_card_cfg *cfg)
90{
91 u32 *data = (u32 *) &cfg->data;
92 int i;
93
94 for (i = 0; i < (sizeof(cfg->data) / 4); i++)
95 data[i] = (__force u32) cpu_to_le32(data[i]);
96}
97
98
99/*----------------- Config Operations ------------------*/
100int rsxx_save_config(struct rsxx_cardinfo *card)
101{
102 struct rsxx_card_cfg cfg;
103 int st;
104
105 memcpy(&cfg, &card->config, sizeof(cfg));
106
107 if (unlikely(cfg.hdr.version != RSXX_CFG_VERSION)) {
108 dev_err(CARD_TO_DEV(card),
109 "Cannot save config with invalid version %d\n",
110 cfg.hdr.version);
111 return -EINVAL;
112 }
113
114 /* Convert data to little endian for the CRC calculation. */
115 config_data_cpu_to_le(&cfg);
116
117 cfg.hdr.crc = config_data_crc32(&cfg);
118
119 /*
120 * Swap the data from little endian to big endian so it can be
121 * stored.
122 */
123 config_data_swab(&cfg);
124 config_hdr_cpu_to_be(&cfg.hdr);
125
126 st = rsxx_creg_write(card, CREG_ADD_CONFIG, sizeof(cfg), &cfg, 1);
127 if (st)
128 return st;
129
130 return 0;
131}
132
133int rsxx_load_config(struct rsxx_cardinfo *card)
134{
135 int st;
136 u32 crc;
137
138 st = rsxx_creg_read(card, CREG_ADD_CONFIG, sizeof(card->config),
139 &card->config, 1);
140 if (st) {
141 dev_err(CARD_TO_DEV(card),
142 "Failed reading card config.\n");
143 return st;
144 }
145
146 config_hdr_be_to_cpu(&card->config.hdr);
147
148 if (card->config.hdr.version == RSXX_CFG_VERSION) {
149 /*
150 * We calculate the CRC with the data in little endian, because
151 * early drivers did not take big endian CPUs into account.
152 * The data is always stored in big endian, so we need to byte
153 * swap it before calculating the CRC.
154 */
155
156 config_data_swab(&card->config);
157
158 /* Check the CRC */
159 crc = config_data_crc32(&card->config);
160 if (crc != card->config.hdr.crc) {
161 dev_err(CARD_TO_DEV(card),
162 "Config corruption detected!\n");
163 dev_info(CARD_TO_DEV(card),
164 "CRC (sb x%08x is x%08x)\n",
165 card->config.hdr.crc, crc);
166 return -EIO;
167 }
168
169 /* Convert the data to CPU byteorder */
170 config_data_le_to_cpu(&card->config);
171
172 } else if (card->config.hdr.version != 0) {
173 dev_err(CARD_TO_DEV(card),
174 "Invalid config version %d.\n",
175 card->config.hdr.version);
176 /*
177 * Config version changes require special handling from the
178 * user
179 */
180 return -EINVAL;
181 } else {
182 dev_info(CARD_TO_DEV(card),
183 "Initializing card configuration.\n");
184 initialize_config(card);
185 st = rsxx_save_config(card);
186 if (st)
187 return st;
188 }
189
190 card->config_valid = 1;
191
192 dev_dbg(CARD_TO_DEV(card), "version: x%08x\n",
193 card->config.hdr.version);
194 dev_dbg(CARD_TO_DEV(card), "crc: x%08x\n",
195 card->config.hdr.crc);
196 dev_dbg(CARD_TO_DEV(card), "block_size: x%08x\n",
197 card->config.data.block_size);
198 dev_dbg(CARD_TO_DEV(card), "stripe_size: x%08x\n",
199 card->config.data.stripe_size);
200 dev_dbg(CARD_TO_DEV(card), "vendor_id: x%08x\n",
201 card->config.data.vendor_id);
202 dev_dbg(CARD_TO_DEV(card), "cache_order: x%08x\n",
203 card->config.data.cache_order);
204 dev_dbg(CARD_TO_DEV(card), "mode: x%08x\n",
205 card->config.data.intr_coal.mode);
206 dev_dbg(CARD_TO_DEV(card), "count: x%08x\n",
207 card->config.data.intr_coal.count);
208 dev_dbg(CARD_TO_DEV(card), "latency: x%08x\n",
209 card->config.data.intr_coal.latency);
210
211 return 0;
212}
213
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c
new file mode 100644
index 000000000000..f75219140e70
--- /dev/null
+++ b/drivers/block/rsxx/core.c
@@ -0,0 +1,651 @@
1/*
2* Filename: core.c
3*
4*
5* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
7*
8* (C) Copyright 2013 IBM Corporation
9*
10* This program is free software; you can redistribute it and/or
11* modify it under the terms of the GNU General Public License as
12* published by the Free Software Foundation; either version 2 of the
13* License, or (at your option) any later version.
14*
15* This program is distributed in the hope that it will be useful, but
16* WITHOUT ANY WARRANTY; without even the implied warranty of
17* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18* General Public License for more details.
19*
20* You should have received a copy of the GNU General Public License
21* along with this program; if not, write to the Free Software Foundation,
22* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23*/
24
25#include <linux/kernel.h>
26#include <linux/init.h>
27#include <linux/interrupt.h>
28#include <linux/module.h>
29#include <linux/pci.h>
30#include <linux/reboot.h>
31#include <linux/slab.h>
32#include <linux/bitops.h>
33
34#include <linux/genhd.h>
35#include <linux/idr.h>
36
37#include "rsxx_priv.h"
38#include "rsxx_cfg.h"
39
40#define NO_LEGACY 0
41
42MODULE_DESCRIPTION("IBM RamSan PCIe Flash SSD Device Driver");
43MODULE_AUTHOR("IBM <support@ramsan.com>");
44MODULE_LICENSE("GPL");
45MODULE_VERSION(DRIVER_VERSION);
46
47static unsigned int force_legacy = NO_LEGACY;
48module_param(force_legacy, uint, 0444);
49MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts");
50
51static DEFINE_IDA(rsxx_disk_ida);
52static DEFINE_SPINLOCK(rsxx_ida_lock);
53
54/*----------------- Interrupt Control & Handling -------------------*/
55static void __enable_intr(unsigned int *mask, unsigned int intr)
56{
57 *mask |= intr;
58}
59
60static void __disable_intr(unsigned int *mask, unsigned int intr)
61{
62 *mask &= ~intr;
63}
64
65/*
66 * NOTE: Disabling the IER will disable the hardware interrupt.
67 * Disabling the ISR will disable the software handling of the ISR bit.
68 *
69 * Enable/Disable interrupt functions assume the card->irq_lock
70 * is held by the caller.
71 */
72void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr)
73{
74 if (unlikely(card->halt))
75 return;
76
77 __enable_intr(&card->ier_mask, intr);
78 iowrite32(card->ier_mask, card->regmap + IER);
79}
80
81void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr)
82{
83 __disable_intr(&card->ier_mask, intr);
84 iowrite32(card->ier_mask, card->regmap + IER);
85}
86
87void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
88 unsigned int intr)
89{
90 if (unlikely(card->halt))
91 return;
92
93 __enable_intr(&card->isr_mask, intr);
94 __enable_intr(&card->ier_mask, intr);
95 iowrite32(card->ier_mask, card->regmap + IER);
96}
97void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
98 unsigned int intr)
99{
100 __disable_intr(&card->isr_mask, intr);
101 __disable_intr(&card->ier_mask, intr);
102 iowrite32(card->ier_mask, card->regmap + IER);
103}
104
105irqreturn_t rsxx_isr(int irq, void *pdata)
106{
107 struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) pdata;
108 unsigned int isr;
109 int handled = 0;
110 int reread_isr;
111 int i;
112
113 spin_lock(&card->irq_lock);
114
115 do {
116 reread_isr = 0;
117
118 isr = ioread32(card->regmap + ISR);
119 if (isr == 0xffffffff) {
120 /*
121 * A few systems seem to have an intermittent issue
122 * where PCI reads return all Fs, but retrying the read
123 * a little later will return as expected.
124 */
125 dev_info(CARD_TO_DEV(card),
126 "ISR = 0xFFFFFFFF, retrying later\n");
127 break;
128 }
129
130 isr &= card->isr_mask;
131 if (!isr)
132 break;
133
134 for (i = 0; i < card->n_targets; i++) {
135 if (isr & CR_INTR_DMA(i)) {
136 if (card->ier_mask & CR_INTR_DMA(i)) {
137 rsxx_disable_ier(card, CR_INTR_DMA(i));
138 reread_isr = 1;
139 }
140 queue_work(card->ctrl[i].done_wq,
141 &card->ctrl[i].dma_done_work);
142 handled++;
143 }
144 }
145
146 if (isr & CR_INTR_CREG) {
147 schedule_work(&card->creg_ctrl.done_work);
148 handled++;
149 }
150
151 if (isr & CR_INTR_EVENT) {
152 schedule_work(&card->event_work);
153 rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
154 handled++;
155 }
156 } while (reread_isr);
157
158 spin_unlock(&card->irq_lock);
159
160 return handled ? IRQ_HANDLED : IRQ_NONE;
161}
162
163/*----------------- Card Event Handler -------------------*/
164static void card_state_change(struct rsxx_cardinfo *card,
165 unsigned int new_state)
166{
167 int st;
168
169 dev_info(CARD_TO_DEV(card),
170 "card state change detected.(%s -> %s)\n",
171 rsxx_card_state_to_str(card->state),
172 rsxx_card_state_to_str(new_state));
173
174 card->state = new_state;
175
176 /* Don't attach DMA interfaces if the card has an invalid config */
177 if (!card->config_valid)
178 return;
179
180 switch (new_state) {
181 case CARD_STATE_RD_ONLY_FAULT:
182 dev_crit(CARD_TO_DEV(card),
183 "Hardware has entered read-only mode!\n");
184 /*
185 * Fall through so the DMA devices can be attached and
186 * the user can attempt to pull off their data.
187 */
188 case CARD_STATE_GOOD:
189 st = rsxx_get_card_size8(card, &card->size8);
190 if (st)
191 dev_err(CARD_TO_DEV(card),
192 "Failed attaching DMA devices\n");
193
194 if (card->config_valid)
195 set_capacity(card->gendisk, card->size8 >> 9);
196 break;
197
198 case CARD_STATE_FAULT:
199 dev_crit(CARD_TO_DEV(card),
200 "Hardware Fault reported!\n");
201 /* Fall through. */
202
203 /* Everything else, detach DMA interface if it's attached. */
204 case CARD_STATE_SHUTDOWN:
205 case CARD_STATE_STARTING:
206 case CARD_STATE_FORMATTING:
207 case CARD_STATE_UNINITIALIZED:
208 case CARD_STATE_SHUTTING_DOWN:
209 /*
210 * dStroy is a term coined by marketing to represent the low level
211 * secure erase.
212 */
213 case CARD_STATE_DSTROYING:
214 set_capacity(card->gendisk, 0);
215 break;
216 }
217}
218
219static void card_event_handler(struct work_struct *work)
220{
221 struct rsxx_cardinfo *card;
222 unsigned int state;
223 unsigned long flags;
224 int st;
225
226 card = container_of(work, struct rsxx_cardinfo, event_work);
227
228 if (unlikely(card->halt))
229 return;
230
231 /*
232 * Enable the interrupt now to avoid any weird race conditions where a
233 * state change might occur while rsxx_get_card_state() is
234 * processing a returned creg cmd.
235 */
236 spin_lock_irqsave(&card->irq_lock, flags);
237 rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
238 spin_unlock_irqrestore(&card->irq_lock, flags);
239
240 st = rsxx_get_card_state(card, &state);
241 if (st) {
242 dev_info(CARD_TO_DEV(card),
243 "Failed reading state after event.\n");
244 return;
245 }
246
247 if (card->state != state)
248 card_state_change(card, state);
249
250 if (card->creg_ctrl.creg_stats.stat & CREG_STAT_LOG_PENDING)
251 rsxx_read_hw_log(card);
252}
253
254
255char *rsxx_card_state_to_str(unsigned int state)
256{
257 static char *state_strings[] = {
258 "Unknown", "Shutdown", "Starting", "Formatting",
259 "Uninitialized", "Good", "Shutting Down",
260 "Fault", "Read Only Fault", "dStroying"
261 };
262
263 return state_strings[ffs(state)];
264}
265
266/*----------------- Card Operations -------------------*/
267static int card_shutdown(struct rsxx_cardinfo *card)
268{
269 unsigned int state;
270 signed long start;
271 const int timeout = msecs_to_jiffies(120000);
272 int st;
273
274 /* We can't issue a shutdown if the card is in a transition state */
275 start = jiffies;
276 do {
277 st = rsxx_get_card_state(card, &state);
278 if (st)
279 return st;
280 } while (state == CARD_STATE_STARTING &&
281 (jiffies - start < timeout));
282
283 if (state == CARD_STATE_STARTING)
284 return -ETIMEDOUT;
285
286 /* Only issue a shutdown if we need to */
287 if ((state != CARD_STATE_SHUTTING_DOWN) &&
288 (state != CARD_STATE_SHUTDOWN)) {
289 st = rsxx_issue_card_cmd(card, CARD_CMD_SHUTDOWN);
290 if (st)
291 return st;
292 }
293
294 start = jiffies;
295 do {
296 st = rsxx_get_card_state(card, &state);
297 if (st)
298 return st;
299 } while (state != CARD_STATE_SHUTDOWN &&
300 (jiffies - start < timeout));
301
302 if (state != CARD_STATE_SHUTDOWN)
303 return -ETIMEDOUT;
304
305 return 0;
306}
307
308/*----------------- Driver Initialization & Setup -------------------*/
309/* Returns: 0 if the driver is compatible with the device
310 -1 if the driver is NOT compatible with the device */
311static int rsxx_compatibility_check(struct rsxx_cardinfo *card)
312{
313 unsigned char pci_rev;
314
315 pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
316
317 if (pci_rev > RS70_PCI_REV_SUPPORTED)
318 return -1;
319 return 0;
320}
321
322static int __devinit rsxx_pci_probe(struct pci_dev *dev,
323 const struct pci_device_id *id)
324{
325 struct rsxx_cardinfo *card;
326 unsigned long flags;
327 int st;
328
329 dev_info(&dev->dev, "PCI-Flash SSD discovered\n");
330
331 card = kzalloc(sizeof(*card), GFP_KERNEL);
332 if (!card)
333 return -ENOMEM;
334
335 card->dev = dev;
336 pci_set_drvdata(dev, card);
337
338 do {
339 if (!ida_pre_get(&rsxx_disk_ida, GFP_KERNEL)) {
340 st = -ENOMEM;
341 goto failed_ida_get;
342 }
343
344 spin_lock(&rsxx_ida_lock);
345 st = ida_get_new(&rsxx_disk_ida, &card->disk_id);
346 spin_unlock(&rsxx_ida_lock);
347 } while (st == -EAGAIN);
348
349 if (st)
350 goto failed_ida_get;
351
352 st = pci_enable_device(dev);
353 if (st)
354 goto failed_enable;
355
356 pci_set_master(dev);
357 pci_set_dma_max_seg_size(dev, RSXX_HW_BLK_SIZE);
358
359 st = pci_set_dma_mask(dev, DMA_BIT_MASK(64));
360 if (st) {
361 dev_err(CARD_TO_DEV(card),
362 "No usable DMA configuration,aborting\n");
363 goto failed_dma_mask;
364 }
365
366 st = pci_request_regions(dev, DRIVER_NAME);
367 if (st) {
368 dev_err(CARD_TO_DEV(card),
369 "Failed to request memory region\n");
370 goto failed_request_regions;
371 }
372
373 if (pci_resource_len(dev, 0) == 0) {
374 dev_err(CARD_TO_DEV(card), "BAR0 has length 0!\n");
375 st = -ENOMEM;
376 goto failed_iomap;
377 }
378
379 card->regmap = pci_iomap(dev, 0, 0);
380 if (!card->regmap) {
381 dev_err(CARD_TO_DEV(card), "Failed to map BAR0\n");
382 st = -ENOMEM;
383 goto failed_iomap;
384 }
385
386 spin_lock_init(&card->irq_lock);
387 card->halt = 0;
388
389 spin_lock_irqsave(&card->irq_lock, flags);
390 rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
391 spin_unlock_irqrestore(&card->irq_lock, flags);
392
393 if (!force_legacy) {
394 st = pci_enable_msi(dev);
395 if (st)
396 dev_warn(CARD_TO_DEV(card),
397 "Failed to enable MSI\n");
398 }
399
400 st = request_irq(dev->irq, rsxx_isr, IRQF_DISABLED | IRQF_SHARED,
401 DRIVER_NAME, card);
402 if (st) {
403 dev_err(CARD_TO_DEV(card),
404 "Failed requesting IRQ%d\n", dev->irq);
405 goto failed_irq;
406 }
407
408 /************* Setup Processor Command Interface *************/
409 rsxx_creg_setup(card);
410
411 spin_lock_irqsave(&card->irq_lock, flags);
412 rsxx_enable_ier_and_isr(card, CR_INTR_CREG);
413 spin_unlock_irqrestore(&card->irq_lock, flags);
414
415 st = rsxx_compatibility_check(card);
416 if (st) {
417 dev_warn(CARD_TO_DEV(card),
418 "Incompatible driver detected. Please update the driver.\n");
419 st = -EINVAL;
420 goto failed_compatiblity_check;
421 }
422
423 /************* Load Card Config *************/
424 st = rsxx_load_config(card);
425 if (st)
426 dev_err(CARD_TO_DEV(card),
427 "Failed loading card config\n");
428
429 /************* Setup DMA Engine *************/
430 st = rsxx_get_num_targets(card, &card->n_targets);
431 if (st)
432 dev_info(CARD_TO_DEV(card),
433 "Failed reading the number of DMA targets\n");
434
435 card->ctrl = kzalloc(card->n_targets * sizeof(*card->ctrl), GFP_KERNEL);
436 if (!card->ctrl) {
437 st = -ENOMEM;
438 goto failed_dma_setup;
439 }
440
441 st = rsxx_dma_setup(card);
442 if (st) {
443 dev_info(CARD_TO_DEV(card),
444 "Failed to setup DMA engine\n");
445 goto failed_dma_setup;
446 }
447
448 /************* Setup Card Event Handler *************/
449 INIT_WORK(&card->event_work, card_event_handler);
450
451 st = rsxx_setup_dev(card);
452 if (st)
453 goto failed_create_dev;
454
455 rsxx_get_card_state(card, &card->state);
456
457 dev_info(CARD_TO_DEV(card),
458 "card state: %s\n",
459 rsxx_card_state_to_str(card->state));
460
461 /*
462 * Now that the DMA Engine and devices have been setup,
463 * we can enable the event interrupt(it kicks off actions in
464 * those layers so we couldn't enable it right away.)
465 */
466 spin_lock_irqsave(&card->irq_lock, flags);
467 rsxx_enable_ier_and_isr(card, CR_INTR_EVENT);
468 spin_unlock_irqrestore(&card->irq_lock, flags);
469
470 if (card->state == CARD_STATE_SHUTDOWN) {
471 st = rsxx_issue_card_cmd(card, CARD_CMD_STARTUP);
472 if (st)
473 dev_crit(CARD_TO_DEV(card),
474 "Failed issuing card startup\n");
475 } else if (card->state == CARD_STATE_GOOD ||
476 card->state == CARD_STATE_RD_ONLY_FAULT) {
477 st = rsxx_get_card_size8(card, &card->size8);
478 if (st)
479 card->size8 = 0;
480 }
481
482 rsxx_attach_dev(card);
483
484 return 0;
485
486failed_create_dev:
487 rsxx_dma_destroy(card);
488failed_dma_setup:
489failed_compatiblity_check:
490 spin_lock_irqsave(&card->irq_lock, flags);
491 rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
492 spin_unlock_irqrestore(&card->irq_lock, flags);
493 free_irq(dev->irq, card);
494 if (!force_legacy)
495 pci_disable_msi(dev);
496failed_irq:
497 pci_iounmap(dev, card->regmap);
498failed_iomap:
499 pci_release_regions(dev);
500failed_request_regions:
501failed_dma_mask:
502 pci_disable_device(dev);
503failed_enable:
504 spin_lock(&rsxx_ida_lock);
505 ida_remove(&rsxx_disk_ida, card->disk_id);
506 spin_unlock(&rsxx_ida_lock);
507failed_ida_get:
508 kfree(card);
509
510 return st;
511}
512
513static void __devexit rsxx_pci_remove(struct pci_dev *dev)
514{
515 struct rsxx_cardinfo *card = pci_get_drvdata(dev);
516 unsigned long flags;
517 int st;
518 int i;
519
520 if (!card)
521 return;
522
523 dev_info(CARD_TO_DEV(card),
524 "Removing PCI-Flash SSD.\n");
525
526 rsxx_detach_dev(card);
527
528 for (i = 0; i < card->n_targets; i++) {
529 spin_lock_irqsave(&card->irq_lock, flags);
530 rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
531 spin_unlock_irqrestore(&card->irq_lock, flags);
532 }
533
534 st = card_shutdown(card);
535 if (st)
536 dev_crit(CARD_TO_DEV(card), "Shutdown failed!\n");
537
538 /* Sync outstanding event handlers. */
539 spin_lock_irqsave(&card->irq_lock, flags);
540 rsxx_disable_ier_and_isr(card, CR_INTR_EVENT);
541 spin_unlock_irqrestore(&card->irq_lock, flags);
542
543 /* Prevent work_structs from re-queuing themselves. */
544 card->halt = 1;
545
546 cancel_work_sync(&card->event_work);
547
548 rsxx_destroy_dev(card);
549 rsxx_dma_destroy(card);
550
551 spin_lock_irqsave(&card->irq_lock, flags);
552 rsxx_disable_ier_and_isr(card, CR_INTR_ALL);
553 spin_unlock_irqrestore(&card->irq_lock, flags);
554 free_irq(dev->irq, card);
555
556 if (!force_legacy)
557 pci_disable_msi(dev);
558
559 rsxx_creg_destroy(card);
560
561 pci_iounmap(dev, card->regmap);
562
563 pci_disable_device(dev);
564 pci_release_regions(dev);
565
566 kfree(card);
567}
568
569static int rsxx_pci_suspend(struct pci_dev *dev, pm_message_t state)
570{
571 /* We don't support suspend at this time. */
572 return -ENOSYS;
573}
574
575static void rsxx_pci_shutdown(struct pci_dev *dev)
576{
577 struct rsxx_cardinfo *card = pci_get_drvdata(dev);
578 unsigned long flags;
579 int i;
580
581 if (!card)
582 return;
583
584 dev_info(CARD_TO_DEV(card), "Shutting down PCI-Flash SSD.\n");
585
586 rsxx_detach_dev(card);
587
588 for (i = 0; i < card->n_targets; i++) {
589 spin_lock_irqsave(&card->irq_lock, flags);
590 rsxx_disable_ier_and_isr(card, CR_INTR_DMA(i));
591 spin_unlock_irqrestore(&card->irq_lock, flags);
592 }
593
594 card_shutdown(card);
595}
596
597static DEFINE_PCI_DEVICE_TABLE(rsxx_pci_ids) = {
598 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70_FLASH)},
599 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS70D_FLASH)},
600 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS80_FLASH)},
601 {PCI_DEVICE(PCI_VENDOR_ID_TMS_IBM, PCI_DEVICE_ID_RS81_FLASH)},
602 {0,},
603};
604
605MODULE_DEVICE_TABLE(pci, rsxx_pci_ids);
606
607static struct pci_driver rsxx_pci_driver = {
608 .name = DRIVER_NAME,
609 .id_table = rsxx_pci_ids,
610 .probe = rsxx_pci_probe,
611 .remove = __devexit_p(rsxx_pci_remove),
612 .suspend = rsxx_pci_suspend,
613 .shutdown = rsxx_pci_shutdown,
614};
615
616static int __init rsxx_core_init(void)
617{
618 int st;
619
620 st = rsxx_dev_init();
621 if (st)
622 return st;
623
624 st = rsxx_dma_init();
625 if (st)
626 goto dma_init_failed;
627
628 st = rsxx_creg_init();
629 if (st)
630 goto creg_init_failed;
631
632 return pci_register_driver(&rsxx_pci_driver);
633
634creg_init_failed:
635 rsxx_dma_cleanup();
636dma_init_failed:
637 rsxx_dev_cleanup();
638
639 return st;
640}
641
642static void __exit rsxx_core_cleanup(void)
643{
644 pci_unregister_driver(&rsxx_pci_driver);
645 rsxx_creg_cleanup();
646 rsxx_dma_cleanup();
647 rsxx_dev_cleanup();
648}
649
650module_init(rsxx_core_init);
651module_exit(rsxx_core_cleanup);
diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c
new file mode 100644
index 000000000000..a31fd727e804
--- /dev/null
+++ b/drivers/block/rsxx/cregs.c
@@ -0,0 +1,743 @@
1/*
2* Filename: cregs.c
3*
4*
5* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
7*
8* (C) Copyright 2013 IBM Corporation
9*
10* This program is free software; you can redistribute it and/or
11* modify it under the terms of the GNU General Public License as
12* published by the Free Software Foundation; either version 2 of the
13* License, or (at your option) any later version.
14*
15* This program is distributed in the hope that it will be useful, but
16* WITHOUT ANY WARRANTY; without even the implied warranty of
17* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18* General Public License for more details.
19*
20* You should have received a copy of the GNU General Public License
21* along with this program; if not, write to the Free Software Foundation,
22* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23*/
24
25#include <linux/completion.h>
26#include <linux/slab.h>
27
28#include "rsxx_priv.h"
29
30#define CREG_TIMEOUT_MSEC 10000
31
32typedef void (*creg_cmd_cb)(struct rsxx_cardinfo *card,
33 struct creg_cmd *cmd,
34 int st);
35
36struct creg_cmd {
37 struct list_head list;
38 creg_cmd_cb cb;
39 void *cb_private;
40 unsigned int op;
41 unsigned int addr;
42 int cnt8;
43 void *buf;
44 unsigned int stream;
45 unsigned int status;
46};
47
48static struct kmem_cache *creg_cmd_pool;
49
50
51/*------------ Private Functions --------------*/
52
53#if defined(__LITTLE_ENDIAN)
54#define LITTLE_ENDIAN 1
55#elif defined(__BIG_ENDIAN)
56#define LITTLE_ENDIAN 0
57#else
58#error Unknown endianess!!! Aborting...
59#endif
60
61static void copy_to_creg_data(struct rsxx_cardinfo *card,
62 int cnt8,
63 void *buf,
64 unsigned int stream)
65{
66 int i = 0;
67 u32 *data = buf;
68
69 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
70 /*
71 * Firmware implementation makes it necessary to byte swap on
72 * little endian processors.
73 */
74 if (LITTLE_ENDIAN && stream)
75 iowrite32be(data[i], card->regmap + CREG_DATA(i));
76 else
77 iowrite32(data[i], card->regmap + CREG_DATA(i));
78 }
79}
80
81
82static void copy_from_creg_data(struct rsxx_cardinfo *card,
83 int cnt8,
84 void *buf,
85 unsigned int stream)
86{
87 int i = 0;
88 u32 *data = buf;
89
90 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
91 /*
92 * Firmware implementation makes it necessary to byte swap on
93 * little endian processors.
94 */
95 if (LITTLE_ENDIAN && stream)
96 data[i] = ioread32be(card->regmap + CREG_DATA(i));
97 else
98 data[i] = ioread32(card->regmap + CREG_DATA(i));
99 }
100}
101
102static struct creg_cmd *pop_active_cmd(struct rsxx_cardinfo *card)
103{
104 struct creg_cmd *cmd;
105
106 /*
107 * Spin lock is needed because this can be called in atomic/interrupt
108 * context.
109 */
110 spin_lock_bh(&card->creg_ctrl.pop_lock);
111 cmd = card->creg_ctrl.active_cmd;
112 card->creg_ctrl.active_cmd = NULL;
113 spin_unlock_bh(&card->creg_ctrl.pop_lock);
114
115 return cmd;
116}
117
118static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
119{
120 iowrite32(cmd->addr, card->regmap + CREG_ADD);
121 iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
122
123 if (cmd->op == CREG_OP_WRITE) {
124 if (cmd->buf)
125 copy_to_creg_data(card, cmd->cnt8,
126 cmd->buf, cmd->stream);
127 }
128
129 /* Data copy must complete before initiating the command. */
130 wmb();
131
132 /* Setting the valid bit will kick off the command. */
133 iowrite32(cmd->op, card->regmap + CREG_CMD);
134}
135
136static void creg_kick_queue(struct rsxx_cardinfo *card)
137{
138 if (card->creg_ctrl.active || list_empty(&card->creg_ctrl.queue))
139 return;
140
141 card->creg_ctrl.active = 1;
142 card->creg_ctrl.active_cmd = list_first_entry(&card->creg_ctrl.queue,
143 struct creg_cmd, list);
144 list_del(&card->creg_ctrl.active_cmd->list);
145 card->creg_ctrl.q_depth--;
146
147 /*
148 * We have to set the timer before we push the new command. Otherwise,
149 * we could create a race condition that would occur if the timer
150 * was not canceled, and expired after the new command was pushed,
151 * but before the command was issued to hardware.
152 */
153 mod_timer(&card->creg_ctrl.cmd_timer,
154 jiffies + msecs_to_jiffies(CREG_TIMEOUT_MSEC));
155
156 creg_issue_cmd(card, card->creg_ctrl.active_cmd);
157}
158
159static int creg_queue_cmd(struct rsxx_cardinfo *card,
160 unsigned int op,
161 unsigned int addr,
162 unsigned int cnt8,
163 void *buf,
164 int stream,
165 creg_cmd_cb callback,
166 void *cb_private)
167{
168 struct creg_cmd *cmd;
169
170 /* Don't queue stuff up if we're halted. */
171 if (unlikely(card->halt))
172 return -EINVAL;
173
174 if (card->creg_ctrl.reset)
175 return -EAGAIN;
176
177 if (cnt8 > MAX_CREG_DATA8)
178 return -EINVAL;
179
180 cmd = kmem_cache_alloc(creg_cmd_pool, GFP_KERNEL);
181 if (!cmd)
182 return -ENOMEM;
183
184 INIT_LIST_HEAD(&cmd->list);
185
186 cmd->op = op;
187 cmd->addr = addr;
188 cmd->cnt8 = cnt8;
189 cmd->buf = buf;
190 cmd->stream = stream;
191 cmd->cb = callback;
192 cmd->cb_private = cb_private;
193 cmd->status = 0;
194
195 mutex_lock(&card->creg_ctrl.lock);
196 list_add_tail(&cmd->list, &card->creg_ctrl.queue);
197 card->creg_ctrl.q_depth++;
198 creg_kick_queue(card);
199 mutex_unlock(&card->creg_ctrl.lock);
200
201 return 0;
202}
203
204static void creg_cmd_timed_out(unsigned long data)
205{
206 struct rsxx_cardinfo *card = (struct rsxx_cardinfo *) data;
207 struct creg_cmd *cmd;
208
209 cmd = pop_active_cmd(card);
210 if (cmd == NULL) {
211 card->creg_ctrl.creg_stats.creg_timeout++;
212 dev_warn(CARD_TO_DEV(card),
213 "No active command associated with timeout!\n");
214 return;
215 }
216
217 if (cmd->cb)
218 cmd->cb(card, cmd, -ETIMEDOUT);
219
220 kmem_cache_free(creg_cmd_pool, cmd);
221
222 spin_lock(&card->creg_ctrl.pop_lock);
223 card->creg_ctrl.active = 0;
224 creg_kick_queue(card);
225 spin_unlock(&card->creg_ctrl.pop_lock);
226}
227
228
229static void creg_cmd_done(struct work_struct *work)
230{
231 struct rsxx_cardinfo *card;
232 struct creg_cmd *cmd;
233 int st = 0;
234
235 card = container_of(work, struct rsxx_cardinfo,
236 creg_ctrl.done_work);
237
238 /*
239 * The timer could not be cancelled for some reason,
240 * race to pop the active command.
241 */
242 if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
243 card->creg_ctrl.creg_stats.failed_cancel_timer++;
244
245 cmd = pop_active_cmd(card);
246 if (cmd == NULL) {
247 dev_err(CARD_TO_DEV(card),
248 "Spurious creg interrupt!\n");
249 return;
250 }
251
252 card->creg_ctrl.creg_stats.stat = ioread32(card->regmap + CREG_STAT);
253 cmd->status = card->creg_ctrl.creg_stats.stat;
254 if ((cmd->status & CREG_STAT_STATUS_MASK) == 0) {
255 dev_err(CARD_TO_DEV(card),
256 "Invalid status on creg command\n");
257 /*
258 * At this point we're probably reading garbage from HW. Don't
259 * do anything else that could mess up the system and let
260 * the sync function return an error.
261 */
262 st = -EIO;
263 goto creg_done;
264 } else if (cmd->status & CREG_STAT_ERROR) {
265 st = -EIO;
266 }
267
268 if ((cmd->op == CREG_OP_READ)) {
269 unsigned int cnt8 = ioread32(card->regmap + CREG_CNT);
270
271 /* Paranoid Sanity Checks */
272 if (!cmd->buf) {
273 dev_err(CARD_TO_DEV(card),
274 "Buffer not given for read.\n");
275 st = -EIO;
276 goto creg_done;
277 }
278 if (cnt8 != cmd->cnt8) {
279 dev_err(CARD_TO_DEV(card),
280 "count mismatch\n");
281 st = -EIO;
282 goto creg_done;
283 }
284
285 copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
286 }
287
288creg_done:
289 if (cmd->cb)
290 cmd->cb(card, cmd, st);
291
292 kmem_cache_free(creg_cmd_pool, cmd);
293
294 mutex_lock(&card->creg_ctrl.lock);
295 card->creg_ctrl.active = 0;
296 creg_kick_queue(card);
297 mutex_unlock(&card->creg_ctrl.lock);
298}
299
300static void creg_reset(struct rsxx_cardinfo *card)
301{
302 struct creg_cmd *cmd = NULL;
303 struct creg_cmd *tmp;
304 unsigned long flags;
305
306 if (!mutex_trylock(&card->creg_ctrl.reset_lock))
307 return;
308
309 card->creg_ctrl.reset = 1;
310 spin_lock_irqsave(&card->irq_lock, flags);
311 rsxx_disable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
312 spin_unlock_irqrestore(&card->irq_lock, flags);
313
314 dev_warn(CARD_TO_DEV(card),
315 "Resetting creg interface for recovery\n");
316
317 /* Cancel outstanding commands */
318 mutex_lock(&card->creg_ctrl.lock);
319 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
320 list_del(&cmd->list);
321 card->creg_ctrl.q_depth--;
322 if (cmd->cb)
323 cmd->cb(card, cmd, -ECANCELED);
324 kmem_cache_free(creg_cmd_pool, cmd);
325 }
326
327 cmd = card->creg_ctrl.active_cmd;
328 card->creg_ctrl.active_cmd = NULL;
329 if (cmd) {
330 if (timer_pending(&card->creg_ctrl.cmd_timer))
331 del_timer_sync(&card->creg_ctrl.cmd_timer);
332
333 if (cmd->cb)
334 cmd->cb(card, cmd, -ECANCELED);
335 kmem_cache_free(creg_cmd_pool, cmd);
336
337 card->creg_ctrl.active = 0;
338 }
339 mutex_unlock(&card->creg_ctrl.lock);
340
341 card->creg_ctrl.reset = 0;
342 spin_lock_irqsave(&card->irq_lock, flags);
343 rsxx_enable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
344 spin_unlock_irqrestore(&card->irq_lock, flags);
345
346 mutex_unlock(&card->creg_ctrl.reset_lock);
347}
348
349/* Used for synchronous accesses */
350struct creg_completion {
351 struct completion *cmd_done;
352 int st;
353 u32 creg_status;
354};
355
356static void creg_cmd_done_cb(struct rsxx_cardinfo *card,
357 struct creg_cmd *cmd,
358 int st)
359{
360 struct creg_completion *cmd_completion;
361
362 cmd_completion = (struct creg_completion *)cmd->cb_private;
363 BUG_ON(!cmd_completion);
364
365 cmd_completion->st = st;
366 cmd_completion->creg_status = cmd->status;
367 complete(cmd_completion->cmd_done);
368}
369
370static int __issue_creg_rw(struct rsxx_cardinfo *card,
371 unsigned int op,
372 unsigned int addr,
373 unsigned int cnt8,
374 void *buf,
375 int stream,
376 unsigned int *hw_stat)
377{
378 DECLARE_COMPLETION_ONSTACK(cmd_done);
379 struct creg_completion completion;
380 unsigned long timeout;
381 int st;
382
383 INIT_COMPLETION(cmd_done);
384 completion.cmd_done = &cmd_done;
385 completion.st = 0;
386 completion.creg_status = 0;
387
388 st = creg_queue_cmd(card, op, addr, cnt8, buf, stream, creg_cmd_done_cb,
389 &completion);
390 if (st)
391 return st;
392
393 timeout = msecs_to_jiffies((CREG_TIMEOUT_MSEC *
394 card->creg_ctrl.q_depth) + 20000);
395
396 /*
397 * The creg interface is guaranteed to complete. It has a timeout
398 * mechanism that will kick in if hardware does not respond.
399 */
400 st = wait_for_completion_timeout(completion.cmd_done, timeout);
401 if (st == 0) {
402 /*
403 * This is really bad, because the kernel timer did not
404 * expire and notify us of a timeout!
405 */
406 dev_crit(CARD_TO_DEV(card),
407 "cregs timer failed\n");
408 creg_reset(card);
409 return -EIO;
410 }
411
412 *hw_stat = completion.creg_status;
413
414 if (completion.st) {
415 dev_warn(CARD_TO_DEV(card),
416 "creg command failed(%d x%08x)\n",
417 completion.st, addr);
418 return completion.st;
419 }
420
421 return 0;
422}
423
424static int issue_creg_rw(struct rsxx_cardinfo *card,
425 u32 addr,
426 unsigned int size8,
427 void *data,
428 int stream,
429 int read)
430{
431 unsigned int hw_stat;
432 unsigned int xfer;
433 unsigned int op;
434 int st;
435
436 op = read ? CREG_OP_READ : CREG_OP_WRITE;
437
438 do {
439 xfer = min_t(unsigned int, size8, MAX_CREG_DATA8);
440
441 st = __issue_creg_rw(card, op, addr, xfer,
442 data, stream, &hw_stat);
443 if (st)
444 return st;
445
446 data = (void *)((char *)data + xfer);
447 addr += xfer;
448 size8 -= xfer;
449 } while (size8);
450
451 return 0;
452}
453
454/* ---------------------------- Public API ---------------------------------- */
455int rsxx_creg_write(struct rsxx_cardinfo *card,
456 u32 addr,
457 unsigned int size8,
458 void *data,
459 int byte_stream)
460{
461 return issue_creg_rw(card, addr, size8, data, byte_stream, 0);
462}
463
464int rsxx_creg_read(struct rsxx_cardinfo *card,
465 u32 addr,
466 unsigned int size8,
467 void *data,
468 int byte_stream)
469{
470 return issue_creg_rw(card, addr, size8, data, byte_stream, 1);
471}
472
473int rsxx_get_card_state(struct rsxx_cardinfo *card, unsigned int *state)
474{
475 return rsxx_creg_read(card, CREG_ADD_CARD_STATE,
476 sizeof(*state), state, 0);
477}
478
479int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8)
480{
481 unsigned int size;
482 int st;
483
484 st = rsxx_creg_read(card, CREG_ADD_CARD_SIZE,
485 sizeof(size), &size, 0);
486 if (st)
487 return st;
488
489 *size8 = (u64)size * RSXX_HW_BLK_SIZE;
490 return 0;
491}
492
493int rsxx_get_num_targets(struct rsxx_cardinfo *card,
494 unsigned int *n_targets)
495{
496 return rsxx_creg_read(card, CREG_ADD_NUM_TARGETS,
497 sizeof(*n_targets), n_targets, 0);
498}
499
500int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
501 u32 *capabilities)
502{
503 return rsxx_creg_read(card, CREG_ADD_CAPABILITIES,
504 sizeof(*capabilities), capabilities, 0);
505}
506
507int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd)
508{
509 return rsxx_creg_write(card, CREG_ADD_CARD_CMD,
510 sizeof(cmd), &cmd, 0);
511}
512
513
514/*----------------- HW Log Functions -------------------*/
515static void hw_log_msg(struct rsxx_cardinfo *card, const char *str, int len)
516{
517 static char level;
518
519 /*
520 * New messages start with "<#>", where # is the log level. Messages
521 * that extend past the log buffer will use the previous level
522 */
523 if ((len > 3) && (str[0] == '<') && (str[2] == '>')) {
524 level = str[1];
525 str += 3; /* Skip past the log level. */
526 len -= 3;
527 }
528
529 switch (level) {
530 case '0':
531 dev_emerg(CARD_TO_DEV(card), "HW: %.*s", len, str);
532 break;
533 case '1':
534 dev_alert(CARD_TO_DEV(card), "HW: %.*s", len, str);
535 break;
536 case '2':
537 dev_crit(CARD_TO_DEV(card), "HW: %.*s", len, str);
538 break;
539 case '3':
540 dev_err(CARD_TO_DEV(card), "HW: %.*s", len, str);
541 break;
542 case '4':
543 dev_warn(CARD_TO_DEV(card), "HW: %.*s", len, str);
544 break;
545 case '5':
546 dev_notice(CARD_TO_DEV(card), "HW: %.*s", len, str);
547 break;
548 case '6':
549 dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
550 break;
551 case '7':
552 dev_dbg(CARD_TO_DEV(card), "HW: %.*s", len, str);
553 break;
554 default:
555 dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
556 break;
557 }
558}
559
560/*
561 * The substrncpy() function copies to string(up to count bytes) point to by src
562 * (including the terminating '\0' character) to dest. Returns the number of
563 * bytes copied to dest.
564 */
565static int substrncpy(char *dest, const char *src, int count)
566{
567 int max_cnt = count;
568
569 while (count) {
570 count--;
571 *dest = *src;
572 if (*dest == '\0')
573 break;
574 src++;
575 dest++;
576 }
577 return max_cnt - count;
578}
579
580
581static void read_hw_log_done(struct rsxx_cardinfo *card,
582 struct creg_cmd *cmd,
583 int st)
584{
585 char *buf;
586 char *log_str;
587 int cnt;
588 int len;
589 int off;
590
591 buf = cmd->buf;
592 off = 0;
593
594 /* Failed getting the log message */
595 if (st)
596 return;
597
598 while (off < cmd->cnt8) {
599 log_str = &card->log.buf[card->log.buf_len];
600 cnt = min(cmd->cnt8 - off, LOG_BUF_SIZE8 - card->log.buf_len);
601 len = substrncpy(log_str, &buf[off], cnt);
602
603 off += len;
604 card->log.buf_len += len;
605
606 /*
607 * Flush the log if we've hit the end of a message or if we've
608 * run out of buffer space.
609 */
610 if ((log_str[len - 1] == '\0') ||
611 (card->log.buf_len == LOG_BUF_SIZE8)) {
612 if (card->log.buf_len != 1) /* Don't log blank lines. */
613 hw_log_msg(card, card->log.buf,
614 card->log.buf_len);
615 card->log.buf_len = 0;
616 }
617
618 }
619
620 if (cmd->status & CREG_STAT_LOG_PENDING)
621 rsxx_read_hw_log(card);
622}
623
624int rsxx_read_hw_log(struct rsxx_cardinfo *card)
625{
626 int st;
627
628 st = creg_queue_cmd(card, CREG_OP_READ, CREG_ADD_LOG,
629 sizeof(card->log.tmp), card->log.tmp,
630 1, read_hw_log_done, NULL);
631 if (st)
632 dev_err(CARD_TO_DEV(card),
633 "Failed getting log text\n");
634
635 return st;
636}
637
638/*-------------- IOCTL REG Access ------------------*/
639static int issue_reg_cmd(struct rsxx_cardinfo *card,
640 struct rsxx_reg_access *cmd,
641 int read)
642{
643 unsigned int op = read ? CREG_OP_READ : CREG_OP_WRITE;
644
645 return __issue_creg_rw(card, op, cmd->addr, cmd->cnt, cmd->data,
646 cmd->stream, &cmd->stat);
647}
648
649int rsxx_reg_access(struct rsxx_cardinfo *card,
650 struct rsxx_reg_access __user *ucmd,
651 int read)
652{
653 struct rsxx_reg_access cmd;
654 int st;
655
656 st = copy_from_user(&cmd, ucmd, sizeof(cmd));
657 if (st)
658 return -EFAULT;
659
660 st = issue_reg_cmd(card, &cmd, read);
661 if (st)
662 return st;
663
664 st = put_user(cmd.stat, &ucmd->stat);
665 if (st)
666 return -EFAULT;
667
668 if (read) {
669 st = copy_to_user(ucmd->data, cmd.data, cmd.cnt);
670 if (st)
671 return -EFAULT;
672 }
673
674 return 0;
675}
676
677/*------------ Initialization & Setup --------------*/
678int rsxx_creg_setup(struct rsxx_cardinfo *card)
679{
680 card->creg_ctrl.active_cmd = NULL;
681
682 INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
683 mutex_init(&card->creg_ctrl.reset_lock);
684 INIT_LIST_HEAD(&card->creg_ctrl.queue);
685 mutex_init(&card->creg_ctrl.lock);
686 spin_lock_init(&card->creg_ctrl.pop_lock);
687 setup_timer(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out,
688 (unsigned long) card);
689
690 return 0;
691}
692
693void rsxx_creg_destroy(struct rsxx_cardinfo *card)
694{
695 struct creg_cmd *cmd;
696 struct creg_cmd *tmp;
697 int cnt = 0;
698
699 /* Cancel outstanding commands */
700 mutex_lock(&card->creg_ctrl.lock);
701 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
702 list_del(&cmd->list);
703 if (cmd->cb)
704 cmd->cb(card, cmd, -ECANCELED);
705 kmem_cache_free(creg_cmd_pool, cmd);
706 cnt++;
707 }
708
709 if (cnt)
710 dev_info(CARD_TO_DEV(card),
711 "Canceled %d queue creg commands\n", cnt);
712
713 cmd = card->creg_ctrl.active_cmd;
714 card->creg_ctrl.active_cmd = NULL;
715 if (cmd) {
716 if (timer_pending(&card->creg_ctrl.cmd_timer))
717 del_timer_sync(&card->creg_ctrl.cmd_timer);
718
719 if (cmd->cb)
720 cmd->cb(card, cmd, -ECANCELED);
721 dev_info(CARD_TO_DEV(card),
722 "Canceled active creg command\n");
723 kmem_cache_free(creg_cmd_pool, cmd);
724 }
725 mutex_unlock(&card->creg_ctrl.lock);
726
727 cancel_work_sync(&card->creg_ctrl.done_work);
728}
729
730
731int rsxx_creg_init(void)
732{
733 creg_cmd_pool = KMEM_CACHE(creg_cmd, SLAB_HWCACHE_ALIGN);
734 if (!creg_cmd_pool)
735 return -ENOMEM;
736
737 return 0;
738}
739
740void rsxx_creg_cleanup(void)
741{
742 kmem_cache_destroy(creg_cmd_pool);
743}
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
new file mode 100644
index 000000000000..96df053e0ed4
--- /dev/null
+++ b/drivers/block/rsxx/dev.c
@@ -0,0 +1,367 @@
1/*
2* Filename: dev.c
3*
4*
5* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
7*
8* (C) Copyright 2013 IBM Corporation
9*
10* This program is free software; you can redistribute it and/or
11* modify it under the terms of the GNU General Public License as
12* published by the Free Software Foundation; either version 2 of the
13* License, or (at your option) any later version.
14*
15* This program is distributed in the hope that it will be useful, but
16* WITHOUT ANY WARRANTY; without even the implied warranty of
17* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18* General Public License for more details.
19*
20* You should have received a copy of the GNU General Public License
21* along with this program; if not, write to the Free Software Foundation,
22* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23*/
24
25#include <linux/kernel.h>
26#include <linux/interrupt.h>
27#include <linux/module.h>
28#include <linux/pci.h>
29#include <linux/slab.h>
30
31#include <linux/hdreg.h>
32#include <linux/genhd.h>
33#include <linux/blkdev.h>
34#include <linux/bio.h>
35
36#include <linux/fs.h>
37
38#include "rsxx_priv.h"
39
40static unsigned int blkdev_minors = 64;
41module_param(blkdev_minors, uint, 0444);
42MODULE_PARM_DESC(blkdev_minors, "Number of minors(partitions)");
43
44/*
45 * For now I'm making this tweakable in case any applications hit this limit.
46 * If you see a "bio too big" error in the log you will need to raise this
47 * value.
48 */
49static unsigned int blkdev_max_hw_sectors = 1024;
50module_param(blkdev_max_hw_sectors, uint, 0444);
51MODULE_PARM_DESC(blkdev_max_hw_sectors, "Max hw sectors for a single BIO");
52
53static unsigned int enable_blkdev = 1;
54module_param(enable_blkdev , uint, 0444);
55MODULE_PARM_DESC(enable_blkdev, "Enable block device interfaces");
56
57
58struct rsxx_bio_meta {
59 struct bio *bio;
60 atomic_t pending_dmas;
61 atomic_t error;
62 unsigned long start_time;
63};
64
65static struct kmem_cache *bio_meta_pool;
66
67/*----------------- Block Device Operations -----------------*/
68static int rsxx_blkdev_ioctl(struct block_device *bdev,
69 fmode_t mode,
70 unsigned int cmd,
71 unsigned long arg)
72{
73 struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
74
75 switch (cmd) {
76 case RSXX_GETREG:
77 return rsxx_reg_access(card, (void __user *)arg, 1);
78 case RSXX_SETREG:
79 return rsxx_reg_access(card, (void __user *)arg, 0);
80 }
81
82 return -ENOTTY;
83}
84
85static int rsxx_getgeo(struct block_device *bdev, struct hd_geometry *geo)
86{
87 struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
88 u64 blocks = card->size8 >> 9;
89
90 /*
91 * get geometry: Fake it. I haven't found any drivers that set
92 * geo->start, so we won't either.
93 */
94 if (card->size8) {
95 geo->heads = 64;
96 geo->sectors = 16;
97 do_div(blocks, (geo->heads * geo->sectors));
98 geo->cylinders = blocks;
99 } else {
100 geo->heads = 0;
101 geo->sectors = 0;
102 geo->cylinders = 0;
103 }
104 return 0;
105}
106
107static const struct block_device_operations rsxx_fops = {
108 .owner = THIS_MODULE,
109 .getgeo = rsxx_getgeo,
110 .ioctl = rsxx_blkdev_ioctl,
111};
112
113static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio)
114{
115 struct hd_struct *part0 = &card->gendisk->part0;
116 int rw = bio_data_dir(bio);
117 int cpu;
118
119 cpu = part_stat_lock();
120
121 part_round_stats(cpu, part0);
122 part_inc_in_flight(part0, rw);
123
124 part_stat_unlock();
125}
126
127static void disk_stats_complete(struct rsxx_cardinfo *card,
128 struct bio *bio,
129 unsigned long start_time)
130{
131 struct hd_struct *part0 = &card->gendisk->part0;
132 unsigned long duration = jiffies - start_time;
133 int rw = bio_data_dir(bio);
134 int cpu;
135
136 cpu = part_stat_lock();
137
138 part_stat_add(cpu, part0, sectors[rw], bio_sectors(bio));
139 part_stat_inc(cpu, part0, ios[rw]);
140 part_stat_add(cpu, part0, ticks[rw], duration);
141
142 part_round_stats(cpu, part0);
143 part_dec_in_flight(part0, rw);
144
145 part_stat_unlock();
146}
147
148static void bio_dma_done_cb(struct rsxx_cardinfo *card,
149 void *cb_data,
150 unsigned int error)
151{
152 struct rsxx_bio_meta *meta = (struct rsxx_bio_meta *)cb_data;
153
154 if (error)
155 atomic_set(&meta->error, 1);
156
157 if (atomic_dec_and_test(&meta->pending_dmas)) {
158 disk_stats_complete(card, meta->bio, meta->start_time);
159
160 bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0);
161 kmem_cache_free(bio_meta_pool, meta);
162 }
163}
164
165static void rsxx_make_request(struct request_queue *q, struct bio *bio)
166{
167 struct rsxx_cardinfo *card = q->queuedata;
168 struct rsxx_bio_meta *bio_meta;
169 int st = -EINVAL;
170
171 might_sleep();
172
173 if (unlikely(card->halt)) {
174 st = -EFAULT;
175 goto req_err;
176 }
177
178 if (unlikely(card->dma_fault)) {
179 st = (-EFAULT);
180 goto req_err;
181 }
182
183 if (bio->bi_size == 0) {
184 dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
185 goto req_err;
186 }
187
188 bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL);
189 if (!bio_meta) {
190 st = -ENOMEM;
191 goto req_err;
192 }
193
194 bio_meta->bio = bio;
195 atomic_set(&bio_meta->error, 0);
196 atomic_set(&bio_meta->pending_dmas, 0);
197 bio_meta->start_time = jiffies;
198
199 disk_stats_start(card, bio);
200
201 dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
202 bio_data_dir(bio) ? 'W' : 'R', bio_meta,
203 (u64)bio->bi_sector << 9, bio->bi_size);
204
205 st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
206 bio_dma_done_cb, bio_meta);
207 if (st)
208 goto queue_err;
209
210 return;
211
212queue_err:
213 kmem_cache_free(bio_meta_pool, bio_meta);
214req_err:
215 bio_endio(bio, st);
216}
217
218/*----------------- Device Setup -------------------*/
219static bool rsxx_discard_supported(struct rsxx_cardinfo *card)
220{
221 unsigned char pci_rev;
222
223 pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
224
225 return (pci_rev >= RSXX_DISCARD_SUPPORT);
226}
227
228static unsigned short rsxx_get_logical_block_size(
229 struct rsxx_cardinfo *card)
230{
231 u32 capabilities = 0;
232 int st;
233
234 st = rsxx_get_card_capabilities(card, &capabilities);
235 if (st)
236 dev_warn(CARD_TO_DEV(card),
237 "Failed reading card capabilities register\n");
238
239 /* Earlier firmware did not have support for 512 byte accesses */
240 if (capabilities & CARD_CAP_SUBPAGE_WRITES)
241 return 512;
242 else
243 return RSXX_HW_BLK_SIZE;
244}
245
246int rsxx_attach_dev(struct rsxx_cardinfo *card)
247{
248 mutex_lock(&card->dev_lock);
249
250 /* The block device requires the stripe size from the config. */
251 if (enable_blkdev) {
252 if (card->config_valid)
253 set_capacity(card->gendisk, card->size8 >> 9);
254 else
255 set_capacity(card->gendisk, 0);
256 add_disk(card->gendisk);
257
258 card->bdev_attached = 1;
259 }
260
261 mutex_unlock(&card->dev_lock);
262
263 return 0;
264}
265
266void rsxx_detach_dev(struct rsxx_cardinfo *card)
267{
268 mutex_lock(&card->dev_lock);
269
270 if (card->bdev_attached) {
271 del_gendisk(card->gendisk);
272 card->bdev_attached = 0;
273 }
274
275 mutex_unlock(&card->dev_lock);
276}
277
278int rsxx_setup_dev(struct rsxx_cardinfo *card)
279{
280 unsigned short blk_size;
281
282 mutex_init(&card->dev_lock);
283
284 if (!enable_blkdev)
285 return 0;
286
287 card->major = register_blkdev(0, DRIVER_NAME);
288 if (card->major < 0) {
289 dev_err(CARD_TO_DEV(card), "Failed to get major number\n");
290 return -ENOMEM;
291 }
292
293 card->queue = blk_alloc_queue(GFP_KERNEL);
294 if (!card->queue) {
295 dev_err(CARD_TO_DEV(card), "Failed queue alloc\n");
296 unregister_blkdev(card->major, DRIVER_NAME);
297 return -ENOMEM;
298 }
299
300 card->gendisk = alloc_disk(blkdev_minors);
301 if (!card->gendisk) {
302 dev_err(CARD_TO_DEV(card), "Failed disk alloc\n");
303 blk_cleanup_queue(card->queue);
304 unregister_blkdev(card->major, DRIVER_NAME);
305 return -ENOMEM;
306 }
307
308 blk_size = rsxx_get_logical_block_size(card);
309
310 blk_queue_make_request(card->queue, rsxx_make_request);
311 blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY);
312 blk_queue_dma_alignment(card->queue, blk_size - 1);
313 blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
314 blk_queue_logical_block_size(card->queue, blk_size);
315 blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
316
317 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue);
318 if (rsxx_discard_supported(card)) {
319 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, card->queue);
320 blk_queue_max_discard_sectors(card->queue,
321 RSXX_HW_BLK_SIZE >> 9);
322 card->queue->limits.discard_granularity = RSXX_HW_BLK_SIZE;
323 card->queue->limits.discard_alignment = RSXX_HW_BLK_SIZE;
324 card->queue->limits.discard_zeroes_data = 1;
325 }
326
327 card->queue->queuedata = card;
328
329 snprintf(card->gendisk->disk_name, sizeof(card->gendisk->disk_name),
330 "rsxx%d", card->disk_id);
331 card->gendisk->driverfs_dev = &card->dev->dev;
332 card->gendisk->major = card->major;
333 card->gendisk->first_minor = 0;
334 card->gendisk->fops = &rsxx_fops;
335 card->gendisk->private_data = card;
336 card->gendisk->queue = card->queue;
337
338 return 0;
339}
340
341void rsxx_destroy_dev(struct rsxx_cardinfo *card)
342{
343 if (!enable_blkdev)
344 return;
345
346 put_disk(card->gendisk);
347 card->gendisk = NULL;
348
349 blk_cleanup_queue(card->queue);
350 unregister_blkdev(card->major, DRIVER_NAME);
351}
352
353int rsxx_dev_init(void)
354{
355 bio_meta_pool = KMEM_CACHE(rsxx_bio_meta, SLAB_HWCACHE_ALIGN);
356 if (!bio_meta_pool)
357 return -ENOMEM;
358
359 return 0;
360}
361
362void rsxx_dev_cleanup(void)
363{
364 kmem_cache_destroy(bio_meta_pool);
365}
366
367
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
new file mode 100644
index 000000000000..08da35ea1d85
--- /dev/null
+++ b/drivers/block/rsxx/dma.c
@@ -0,0 +1,997 @@
1/*
2* Filename: dma.c
3*
4*
5* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
7*
8* (C) Copyright 2013 IBM Corporation
9*
10* This program is free software; you can redistribute it and/or
11* modify it under the terms of the GNU General Public License as
12* published by the Free Software Foundation; either version 2 of the
13* License, or (at your option) any later version.
14*
15* This program is distributed in the hope that it will be useful, but
16* WITHOUT ANY WARRANTY; without even the implied warranty of
17* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18* General Public License for more details.
19*
20* You should have received a copy of the GNU General Public License
21* along with this program; if not, write to the Free Software Foundation,
22* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23*/
24
25#include "rsxx_priv.h"
26
27struct rsxx_dma {
28 struct list_head list;
29 u8 cmd;
30 unsigned int laddr; /* Logical address on the ramsan */
31 struct {
32 u32 off;
33 u32 cnt;
34 } sub_page;
35 dma_addr_t dma_addr;
36 struct page *page;
37 unsigned int pg_off; /* Page Offset */
38 rsxx_dma_cb cb;
39 void *cb_data;
40};
41
42/* This timeout is used to detect a stalled DMA channel */
43#define DMA_ACTIVITY_TIMEOUT msecs_to_jiffies(10000)
44
45struct hw_status {
46 u8 status;
47 u8 tag;
48 __le16 count;
49 __le32 _rsvd2;
50 __le64 _rsvd3;
51} __packed;
52
53enum rsxx_dma_status {
54 DMA_SW_ERR = 0x1,
55 DMA_HW_FAULT = 0x2,
56 DMA_CANCELLED = 0x4,
57};
58
59struct hw_cmd {
60 u8 command;
61 u8 tag;
62 u8 _rsvd;
63 u8 sub_page; /* Bit[0:2]: 512byte offset */
64 /* Bit[4:6]: 512byte count */
65 __le32 device_addr;
66 __le64 host_addr;
67} __packed;
68
69enum rsxx_hw_cmd {
70 HW_CMD_BLK_DISCARD = 0x70,
71 HW_CMD_BLK_WRITE = 0x80,
72 HW_CMD_BLK_READ = 0xC0,
73 HW_CMD_BLK_RECON_READ = 0xE0,
74};
75
76enum rsxx_hw_status {
77 HW_STATUS_CRC = 0x01,
78 HW_STATUS_HARD_ERR = 0x02,
79 HW_STATUS_SOFT_ERR = 0x04,
80 HW_STATUS_FAULT = 0x08,
81};
82
83#define STATUS_BUFFER_SIZE8 4096
84#define COMMAND_BUFFER_SIZE8 4096
85
86static struct kmem_cache *rsxx_dma_pool;
87
88struct dma_tracker {
89 int next_tag;
90 struct rsxx_dma *dma;
91};
92
93#define DMA_TRACKER_LIST_SIZE8 (sizeof(struct dma_tracker_list) + \
94 (sizeof(struct dma_tracker) * RSXX_MAX_OUTSTANDING_CMDS))
95
96struct dma_tracker_list {
97 spinlock_t lock;
98 int head;
99 struct dma_tracker list[0];
100};
101
102
103/*----------------- Misc Utility Functions -------------------*/
104unsigned int rsxx_addr8_to_laddr(u64 addr8, struct rsxx_cardinfo *card)
105{
106 unsigned long long tgt_addr8;
107
108 tgt_addr8 = ((addr8 >> card->_stripe.upper_shift) &
109 card->_stripe.upper_mask) |
110 ((addr8) & card->_stripe.lower_mask);
111 do_div(tgt_addr8, RSXX_HW_BLK_SIZE);
112 return tgt_addr8;
113}
114
115unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8)
116{
117 unsigned int tgt;
118
119 tgt = (addr8 >> card->_stripe.target_shift) & card->_stripe.target_mask;
120
121 return tgt;
122}
123
124static void rsxx_dma_queue_reset(struct rsxx_cardinfo *card)
125{
126 /* Reset all DMA Command/Status Queues */
127 iowrite32(DMA_QUEUE_RESET, card->regmap + RESET);
128}
129
130static unsigned int get_dma_size(struct rsxx_dma *dma)
131{
132 if (dma->sub_page.cnt)
133 return dma->sub_page.cnt << 9;
134 else
135 return RSXX_HW_BLK_SIZE;
136}
137
138
139/*----------------- DMA Tracker -------------------*/
140static void set_tracker_dma(struct dma_tracker_list *trackers,
141 int tag,
142 struct rsxx_dma *dma)
143{
144 trackers->list[tag].dma = dma;
145}
146
147static struct rsxx_dma *get_tracker_dma(struct dma_tracker_list *trackers,
148 int tag)
149{
150 return trackers->list[tag].dma;
151}
152
153static int pop_tracker(struct dma_tracker_list *trackers)
154{
155 int tag;
156
157 spin_lock(&trackers->lock);
158 tag = trackers->head;
159 if (tag != -1) {
160 trackers->head = trackers->list[tag].next_tag;
161 trackers->list[tag].next_tag = -1;
162 }
163 spin_unlock(&trackers->lock);
164
165 return tag;
166}
167
168static void push_tracker(struct dma_tracker_list *trackers, int tag)
169{
170 spin_lock(&trackers->lock);
171 trackers->list[tag].next_tag = trackers->head;
172 trackers->head = tag;
173 trackers->list[tag].dma = NULL;
174 spin_unlock(&trackers->lock);
175}
176
177
178/*----------------- Interrupt Coalescing -------------*/
179/*
180 * Interrupt Coalescing Register Format:
181 * Interrupt Timer (64ns units) [15:0]
182 * Interrupt Count [24:16]
183 * Reserved [31:25]
184*/
185#define INTR_COAL_LATENCY_MASK (0x0000ffff)
186
187#define INTR_COAL_COUNT_SHIFT 16
188#define INTR_COAL_COUNT_BITS 9
189#define INTR_COAL_COUNT_MASK (((1 << INTR_COAL_COUNT_BITS) - 1) << \
190 INTR_COAL_COUNT_SHIFT)
191#define INTR_COAL_LATENCY_UNITS_NS 64
192
193
194static u32 dma_intr_coal_val(u32 mode, u32 count, u32 latency)
195{
196 u32 latency_units = latency / INTR_COAL_LATENCY_UNITS_NS;
197
198 if (mode == RSXX_INTR_COAL_DISABLED)
199 return 0;
200
201 return ((count << INTR_COAL_COUNT_SHIFT) & INTR_COAL_COUNT_MASK) |
202 (latency_units & INTR_COAL_LATENCY_MASK);
203
204}
205
206static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card)
207{
208 int i;
209 u32 q_depth = 0;
210 u32 intr_coal;
211
212 if (card->config.data.intr_coal.mode != RSXX_INTR_COAL_AUTO_TUNE)
213 return;
214
215 for (i = 0; i < card->n_targets; i++)
216 q_depth += atomic_read(&card->ctrl[i].stats.hw_q_depth);
217
218 intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
219 q_depth / 2,
220 card->config.data.intr_coal.latency);
221 iowrite32(intr_coal, card->regmap + INTR_COAL);
222}
223
224/*----------------- RSXX DMA Handling -------------------*/
225static void rsxx_complete_dma(struct rsxx_cardinfo *card,
226 struct rsxx_dma *dma,
227 unsigned int status)
228{
229 if (status & DMA_SW_ERR)
230 printk_ratelimited(KERN_ERR
231 "SW Error in DMA(cmd x%02x, laddr x%08x)\n",
232 dma->cmd, dma->laddr);
233 if (status & DMA_HW_FAULT)
234 printk_ratelimited(KERN_ERR
235 "HW Fault in DMA(cmd x%02x, laddr x%08x)\n",
236 dma->cmd, dma->laddr);
237 if (status & DMA_CANCELLED)
238 printk_ratelimited(KERN_ERR
239 "DMA Cancelled(cmd x%02x, laddr x%08x)\n",
240 dma->cmd, dma->laddr);
241
242 if (dma->dma_addr)
243 pci_unmap_page(card->dev, dma->dma_addr, get_dma_size(dma),
244 dma->cmd == HW_CMD_BLK_WRITE ?
245 PCI_DMA_TODEVICE :
246 PCI_DMA_FROMDEVICE);
247
248 if (dma->cb)
249 dma->cb(card, dma->cb_data, status ? 1 : 0);
250
251 kmem_cache_free(rsxx_dma_pool, dma);
252}
253
254static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl,
255 struct rsxx_dma *dma)
256{
257 /*
258 * Requeued DMAs go to the front of the queue so they are issued
259 * first.
260 */
261 spin_lock(&ctrl->queue_lock);
262 list_add(&dma->list, &ctrl->queue);
263 spin_unlock(&ctrl->queue_lock);
264}
265
266static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl,
267 struct rsxx_dma *dma,
268 u8 hw_st)
269{
270 unsigned int status = 0;
271 int requeue_cmd = 0;
272
273 dev_dbg(CARD_TO_DEV(ctrl->card),
274 "Handling DMA error(cmd x%02x, laddr x%08x st:x%02x)\n",
275 dma->cmd, dma->laddr, hw_st);
276
277 if (hw_st & HW_STATUS_CRC)
278 ctrl->stats.crc_errors++;
279 if (hw_st & HW_STATUS_HARD_ERR)
280 ctrl->stats.hard_errors++;
281 if (hw_st & HW_STATUS_SOFT_ERR)
282 ctrl->stats.soft_errors++;
283
284 switch (dma->cmd) {
285 case HW_CMD_BLK_READ:
286 if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
287 if (ctrl->card->scrub_hard) {
288 dma->cmd = HW_CMD_BLK_RECON_READ;
289 requeue_cmd = 1;
290 ctrl->stats.reads_retried++;
291 } else {
292 status |= DMA_HW_FAULT;
293 ctrl->stats.reads_failed++;
294 }
295 } else if (hw_st & HW_STATUS_FAULT) {
296 status |= DMA_HW_FAULT;
297 ctrl->stats.reads_failed++;
298 }
299
300 break;
301 case HW_CMD_BLK_RECON_READ:
302 if (hw_st & (HW_STATUS_CRC | HW_STATUS_HARD_ERR)) {
303 /* Data could not be reconstructed. */
304 status |= DMA_HW_FAULT;
305 ctrl->stats.reads_failed++;
306 }
307
308 break;
309 case HW_CMD_BLK_WRITE:
310 status |= DMA_HW_FAULT;
311 ctrl->stats.writes_failed++;
312
313 break;
314 case HW_CMD_BLK_DISCARD:
315 status |= DMA_HW_FAULT;
316 ctrl->stats.discards_failed++;
317
318 break;
319 default:
320 dev_err(CARD_TO_DEV(ctrl->card),
321 "Unknown command in DMA!(cmd: x%02x "
322 "laddr x%08x st: x%02x\n",
323 dma->cmd, dma->laddr, hw_st);
324 status |= DMA_SW_ERR;
325
326 break;
327 }
328
329 if (requeue_cmd)
330 rsxx_requeue_dma(ctrl, dma);
331 else
332 rsxx_complete_dma(ctrl->card, dma, status);
333}
334
335static void dma_engine_stalled(unsigned long data)
336{
337 struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data;
338
339 if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
340 return;
341
342 if (ctrl->cmd.idx != ioread32(ctrl->regmap + SW_CMD_IDX)) {
343 /*
344 * The dma engine was stalled because the SW_CMD_IDX write
345 * was lost. Issue it again to recover.
346 */
347 dev_warn(CARD_TO_DEV(ctrl->card),
348 "SW_CMD_IDX write was lost, re-writing...\n");
349 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
350 mod_timer(&ctrl->activity_timer,
351 jiffies + DMA_ACTIVITY_TIMEOUT);
352 } else {
353 dev_warn(CARD_TO_DEV(ctrl->card),
354 "DMA channel %d has stalled, faulting interface.\n",
355 ctrl->id);
356 ctrl->card->dma_fault = 1;
357 }
358}
359
360static void rsxx_issue_dmas(struct work_struct *work)
361{
362 struct rsxx_dma_ctrl *ctrl;
363 struct rsxx_dma *dma;
364 int tag;
365 int cmds_pending = 0;
366 struct hw_cmd *hw_cmd_buf;
367
368 ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work);
369 hw_cmd_buf = ctrl->cmd.buf;
370
371 if (unlikely(ctrl->card->halt))
372 return;
373
374 while (1) {
375 spin_lock(&ctrl->queue_lock);
376 if (list_empty(&ctrl->queue)) {
377 spin_unlock(&ctrl->queue_lock);
378 break;
379 }
380 spin_unlock(&ctrl->queue_lock);
381
382 tag = pop_tracker(ctrl->trackers);
383 if (tag == -1)
384 break;
385
386 spin_lock(&ctrl->queue_lock);
387 dma = list_entry(ctrl->queue.next, struct rsxx_dma, list);
388 list_del(&dma->list);
389 ctrl->stats.sw_q_depth--;
390 spin_unlock(&ctrl->queue_lock);
391
392 /*
393 * This will catch any DMAs that slipped in right before the
394 * fault, but was queued after all the other DMAs were
395 * cancelled.
396 */
397 if (unlikely(ctrl->card->dma_fault)) {
398 push_tracker(ctrl->trackers, tag);
399 rsxx_complete_dma(ctrl->card, dma, DMA_CANCELLED);
400 continue;
401 }
402
403 set_tracker_dma(ctrl->trackers, tag, dma);
404 hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd;
405 hw_cmd_buf[ctrl->cmd.idx].tag = tag;
406 hw_cmd_buf[ctrl->cmd.idx]._rsvd = 0;
407 hw_cmd_buf[ctrl->cmd.idx].sub_page =
408 ((dma->sub_page.cnt & 0x7) << 4) |
409 (dma->sub_page.off & 0x7);
410
411 hw_cmd_buf[ctrl->cmd.idx].device_addr =
412 cpu_to_le32(dma->laddr);
413
414 hw_cmd_buf[ctrl->cmd.idx].host_addr =
415 cpu_to_le64(dma->dma_addr);
416
417 dev_dbg(CARD_TO_DEV(ctrl->card),
418 "Issue DMA%d(laddr %d tag %d) to idx %d\n",
419 ctrl->id, dma->laddr, tag, ctrl->cmd.idx);
420
421 ctrl->cmd.idx = (ctrl->cmd.idx + 1) & RSXX_CS_IDX_MASK;
422 cmds_pending++;
423
424 if (dma->cmd == HW_CMD_BLK_WRITE)
425 ctrl->stats.writes_issued++;
426 else if (dma->cmd == HW_CMD_BLK_DISCARD)
427 ctrl->stats.discards_issued++;
428 else
429 ctrl->stats.reads_issued++;
430 }
431
432 /* Let HW know we've queued commands. */
433 if (cmds_pending) {
434 /*
435 * We must guarantee that the CPU writes to 'ctrl->cmd.buf'
436 * (which is in PCI-consistent system-memory) from the loop
437 * above make it into the coherency domain before the
438 * following PIO "trigger" updating the cmd.idx. A WMB is
439 * sufficient. We need not explicitly CPU cache-flush since
440 * the memory is a PCI-consistent (ie; coherent) mapping.
441 */
442 wmb();
443
444 atomic_add(cmds_pending, &ctrl->stats.hw_q_depth);
445 mod_timer(&ctrl->activity_timer,
446 jiffies + DMA_ACTIVITY_TIMEOUT);
447 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
448 }
449}
450
451static void rsxx_dma_done(struct work_struct *work)
452{
453 struct rsxx_dma_ctrl *ctrl;
454 struct rsxx_dma *dma;
455 unsigned long flags;
456 u16 count;
457 u8 status;
458 u8 tag;
459 struct hw_status *hw_st_buf;
460
461 ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work);
462 hw_st_buf = ctrl->status.buf;
463
464 if (unlikely(ctrl->card->halt) ||
465 unlikely(ctrl->card->dma_fault))
466 return;
467
468 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
469
470 while (count == ctrl->e_cnt) {
471 /*
472 * The read memory-barrier is necessary to keep aggressive
473 * processors/optimizers (such as the PPC Apple G5) from
474 * reordering the following status-buffer tag & status read
475 * *before* the count read on subsequent iterations of the
476 * loop!
477 */
478 rmb();
479
480 status = hw_st_buf[ctrl->status.idx].status;
481 tag = hw_st_buf[ctrl->status.idx].tag;
482
483 dma = get_tracker_dma(ctrl->trackers, tag);
484 if (dma == NULL) {
485 spin_lock_irqsave(&ctrl->card->irq_lock, flags);
486 rsxx_disable_ier(ctrl->card, CR_INTR_DMA_ALL);
487 spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
488
489 dev_err(CARD_TO_DEV(ctrl->card),
490 "No tracker for tag %d "
491 "(idx %d id %d)\n",
492 tag, ctrl->status.idx, ctrl->id);
493 return;
494 }
495
496 dev_dbg(CARD_TO_DEV(ctrl->card),
497 "Completing DMA%d"
498 "(laddr x%x tag %d st: x%x cnt: x%04x) from idx %d.\n",
499 ctrl->id, dma->laddr, tag, status, count,
500 ctrl->status.idx);
501
502 atomic_dec(&ctrl->stats.hw_q_depth);
503
504 mod_timer(&ctrl->activity_timer,
505 jiffies + DMA_ACTIVITY_TIMEOUT);
506
507 if (status)
508 rsxx_handle_dma_error(ctrl, dma, status);
509 else
510 rsxx_complete_dma(ctrl->card, dma, 0);
511
512 push_tracker(ctrl->trackers, tag);
513
514 ctrl->status.idx = (ctrl->status.idx + 1) &
515 RSXX_CS_IDX_MASK;
516 ctrl->e_cnt++;
517
518 count = le16_to_cpu(hw_st_buf[ctrl->status.idx].count);
519 }
520
521 dma_intr_coal_auto_tune(ctrl->card);
522
523 if (atomic_read(&ctrl->stats.hw_q_depth) == 0)
524 del_timer_sync(&ctrl->activity_timer);
525
526 spin_lock_irqsave(&ctrl->card->irq_lock, flags);
527 rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id));
528 spin_unlock_irqrestore(&ctrl->card->irq_lock, flags);
529
530 spin_lock(&ctrl->queue_lock);
531 if (ctrl->stats.sw_q_depth)
532 queue_work(ctrl->issue_wq, &ctrl->issue_dma_work);
533 spin_unlock(&ctrl->queue_lock);
534}
535
536static int rsxx_cleanup_dma_queue(struct rsxx_cardinfo *card,
537 struct list_head *q)
538{
539 struct rsxx_dma *dma;
540 struct rsxx_dma *tmp;
541 int cnt = 0;
542
543 list_for_each_entry_safe(dma, tmp, q, list) {
544 list_del(&dma->list);
545
546 if (dma->dma_addr)
547 pci_unmap_page(card->dev, dma->dma_addr,
548 get_dma_size(dma),
549 (dma->cmd == HW_CMD_BLK_WRITE) ?
550 PCI_DMA_TODEVICE :
551 PCI_DMA_FROMDEVICE);
552 kmem_cache_free(rsxx_dma_pool, dma);
553 cnt++;
554 }
555
556 return cnt;
557}
558
559static int rsxx_queue_discard(struct rsxx_cardinfo *card,
560 struct list_head *q,
561 unsigned int laddr,
562 rsxx_dma_cb cb,
563 void *cb_data)
564{
565 struct rsxx_dma *dma;
566
567 dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
568 if (!dma)
569 return -ENOMEM;
570
571 dma->cmd = HW_CMD_BLK_DISCARD;
572 dma->laddr = laddr;
573 dma->dma_addr = 0;
574 dma->sub_page.off = 0;
575 dma->sub_page.cnt = 0;
576 dma->page = NULL;
577 dma->pg_off = 0;
578 dma->cb = cb;
579 dma->cb_data = cb_data;
580
581 dev_dbg(CARD_TO_DEV(card), "Queuing[D] laddr %x\n", dma->laddr);
582
583 list_add_tail(&dma->list, q);
584
585 return 0;
586}
587
588static int rsxx_queue_dma(struct rsxx_cardinfo *card,
589 struct list_head *q,
590 int dir,
591 unsigned int dma_off,
592 unsigned int dma_len,
593 unsigned int laddr,
594 struct page *page,
595 unsigned int pg_off,
596 rsxx_dma_cb cb,
597 void *cb_data)
598{
599 struct rsxx_dma *dma;
600
601 dma = kmem_cache_alloc(rsxx_dma_pool, GFP_KERNEL);
602 if (!dma)
603 return -ENOMEM;
604
605 dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len,
606 dir ? PCI_DMA_TODEVICE :
607 PCI_DMA_FROMDEVICE);
608 if (!dma->dma_addr) {
609 kmem_cache_free(rsxx_dma_pool, dma);
610 return -ENOMEM;
611 }
612
613 dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ;
614 dma->laddr = laddr;
615 dma->sub_page.off = (dma_off >> 9);
616 dma->sub_page.cnt = (dma_len >> 9);
617 dma->page = page;
618 dma->pg_off = pg_off;
619 dma->cb = cb;
620 dma->cb_data = cb_data;
621
622 dev_dbg(CARD_TO_DEV(card),
623 "Queuing[%c] laddr %x off %d cnt %d page %p pg_off %d\n",
624 dir ? 'W' : 'R', dma->laddr, dma->sub_page.off,
625 dma->sub_page.cnt, dma->page, dma->pg_off);
626
627 /* Queue the DMA */
628 list_add_tail(&dma->list, q);
629
630 return 0;
631}
632
633int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
634 struct bio *bio,
635 atomic_t *n_dmas,
636 rsxx_dma_cb cb,
637 void *cb_data)
638{
639 struct list_head dma_list[RSXX_MAX_TARGETS];
640 struct bio_vec *bvec;
641 unsigned long long addr8;
642 unsigned int laddr;
643 unsigned int bv_len;
644 unsigned int bv_off;
645 unsigned int dma_off;
646 unsigned int dma_len;
647 int dma_cnt[RSXX_MAX_TARGETS];
648 int tgt;
649 int st;
650 int i;
651
652 addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */
653 atomic_set(n_dmas, 0);
654
655 for (i = 0; i < card->n_targets; i++) {
656 INIT_LIST_HEAD(&dma_list[i]);
657 dma_cnt[i] = 0;
658 }
659
660 if (bio->bi_rw & REQ_DISCARD) {
661 bv_len = bio->bi_size;
662
663 while (bv_len > 0) {
664 tgt = rsxx_get_dma_tgt(card, addr8);
665 laddr = rsxx_addr8_to_laddr(addr8, card);
666
667 st = rsxx_queue_discard(card, &dma_list[tgt], laddr,
668 cb, cb_data);
669 if (st)
670 goto bvec_err;
671
672 dma_cnt[tgt]++;
673 atomic_inc(n_dmas);
674 addr8 += RSXX_HW_BLK_SIZE;
675 bv_len -= RSXX_HW_BLK_SIZE;
676 }
677 } else {
678 bio_for_each_segment(bvec, bio, i) {
679 bv_len = bvec->bv_len;
680 bv_off = bvec->bv_offset;
681
682 while (bv_len > 0) {
683 tgt = rsxx_get_dma_tgt(card, addr8);
684 laddr = rsxx_addr8_to_laddr(addr8, card);
685 dma_off = addr8 & RSXX_HW_BLK_MASK;
686 dma_len = min(bv_len,
687 RSXX_HW_BLK_SIZE - dma_off);
688
689 st = rsxx_queue_dma(card, &dma_list[tgt],
690 bio_data_dir(bio),
691 dma_off, dma_len,
692 laddr, bvec->bv_page,
693 bv_off, cb, cb_data);
694 if (st)
695 goto bvec_err;
696
697 dma_cnt[tgt]++;
698 atomic_inc(n_dmas);
699 addr8 += dma_len;
700 bv_off += dma_len;
701 bv_len -= dma_len;
702 }
703 }
704 }
705
706 for (i = 0; i < card->n_targets; i++) {
707 if (!list_empty(&dma_list[i])) {
708 spin_lock(&card->ctrl[i].queue_lock);
709 card->ctrl[i].stats.sw_q_depth += dma_cnt[i];
710 list_splice_tail(&dma_list[i], &card->ctrl[i].queue);
711 spin_unlock(&card->ctrl[i].queue_lock);
712
713 queue_work(card->ctrl[i].issue_wq,
714 &card->ctrl[i].issue_dma_work);
715 }
716 }
717
718 return 0;
719
720bvec_err:
721 for (i = 0; i < card->n_targets; i++)
722 rsxx_cleanup_dma_queue(card, &dma_list[i]);
723
724 return st;
725}
726
727
728/*----------------- DMA Engine Initialization & Setup -------------------*/
729static int rsxx_dma_ctrl_init(struct pci_dev *dev,
730 struct rsxx_dma_ctrl *ctrl)
731{
732 int i;
733
734 memset(&ctrl->stats, 0, sizeof(ctrl->stats));
735
736 ctrl->status.buf = pci_alloc_consistent(dev, STATUS_BUFFER_SIZE8,
737 &ctrl->status.dma_addr);
738 ctrl->cmd.buf = pci_alloc_consistent(dev, COMMAND_BUFFER_SIZE8,
739 &ctrl->cmd.dma_addr);
740 if (ctrl->status.buf == NULL || ctrl->cmd.buf == NULL)
741 return -ENOMEM;
742
743 ctrl->trackers = vmalloc(DMA_TRACKER_LIST_SIZE8);
744 if (!ctrl->trackers)
745 return -ENOMEM;
746
747 ctrl->trackers->head = 0;
748 for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) {
749 ctrl->trackers->list[i].next_tag = i + 1;
750 ctrl->trackers->list[i].dma = NULL;
751 }
752 ctrl->trackers->list[RSXX_MAX_OUTSTANDING_CMDS-1].next_tag = -1;
753 spin_lock_init(&ctrl->trackers->lock);
754
755 spin_lock_init(&ctrl->queue_lock);
756 INIT_LIST_HEAD(&ctrl->queue);
757
758 setup_timer(&ctrl->activity_timer, dma_engine_stalled,
759 (unsigned long)ctrl);
760
761 ctrl->issue_wq = alloc_ordered_workqueue(DRIVER_NAME"_issue", 0);
762 if (!ctrl->issue_wq)
763 return -ENOMEM;
764
765 ctrl->done_wq = alloc_ordered_workqueue(DRIVER_NAME"_done", 0);
766 if (!ctrl->done_wq)
767 return -ENOMEM;
768
769 INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas);
770 INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done);
771
772 memset(ctrl->status.buf, 0xac, STATUS_BUFFER_SIZE8);
773 iowrite32(lower_32_bits(ctrl->status.dma_addr),
774 ctrl->regmap + SB_ADD_LO);
775 iowrite32(upper_32_bits(ctrl->status.dma_addr),
776 ctrl->regmap + SB_ADD_HI);
777
778 memset(ctrl->cmd.buf, 0x83, COMMAND_BUFFER_SIZE8);
779 iowrite32(lower_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_LO);
780 iowrite32(upper_32_bits(ctrl->cmd.dma_addr), ctrl->regmap + CB_ADD_HI);
781
782 ctrl->status.idx = ioread32(ctrl->regmap + HW_STATUS_CNT);
783 if (ctrl->status.idx > RSXX_MAX_OUTSTANDING_CMDS) {
784 dev_crit(&dev->dev, "Failed reading status cnt x%x\n",
785 ctrl->status.idx);
786 return -EINVAL;
787 }
788 iowrite32(ctrl->status.idx, ctrl->regmap + HW_STATUS_CNT);
789 iowrite32(ctrl->status.idx, ctrl->regmap + SW_STATUS_CNT);
790
791 ctrl->cmd.idx = ioread32(ctrl->regmap + HW_CMD_IDX);
792 if (ctrl->cmd.idx > RSXX_MAX_OUTSTANDING_CMDS) {
793 dev_crit(&dev->dev, "Failed reading cmd cnt x%x\n",
794 ctrl->status.idx);
795 return -EINVAL;
796 }
797 iowrite32(ctrl->cmd.idx, ctrl->regmap + HW_CMD_IDX);
798 iowrite32(ctrl->cmd.idx, ctrl->regmap + SW_CMD_IDX);
799
800 wmb();
801
802 return 0;
803}
804
805int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
806 unsigned int stripe_size8)
807{
808 if (!is_power_of_2(stripe_size8)) {
809 dev_err(CARD_TO_DEV(card),
810 "stripe_size is NOT a power of 2!\n");
811 return -EINVAL;
812 }
813
814 card->_stripe.lower_mask = stripe_size8 - 1;
815
816 card->_stripe.upper_mask = ~(card->_stripe.lower_mask);
817 card->_stripe.upper_shift = ffs(card->n_targets) - 1;
818
819 card->_stripe.target_mask = card->n_targets - 1;
820 card->_stripe.target_shift = ffs(stripe_size8) - 1;
821
822 dev_dbg(CARD_TO_DEV(card), "_stripe.lower_mask = x%016llx\n",
823 card->_stripe.lower_mask);
824 dev_dbg(CARD_TO_DEV(card), "_stripe.upper_shift = x%016llx\n",
825 card->_stripe.upper_shift);
826 dev_dbg(CARD_TO_DEV(card), "_stripe.upper_mask = x%016llx\n",
827 card->_stripe.upper_mask);
828 dev_dbg(CARD_TO_DEV(card), "_stripe.target_mask = x%016llx\n",
829 card->_stripe.target_mask);
830 dev_dbg(CARD_TO_DEV(card), "_stripe.target_shift = x%016llx\n",
831 card->_stripe.target_shift);
832
833 return 0;
834}
835
836int rsxx_dma_configure(struct rsxx_cardinfo *card)
837{
838 u32 intr_coal;
839
840 intr_coal = dma_intr_coal_val(card->config.data.intr_coal.mode,
841 card->config.data.intr_coal.count,
842 card->config.data.intr_coal.latency);
843 iowrite32(intr_coal, card->regmap + INTR_COAL);
844
845 return rsxx_dma_stripe_setup(card, card->config.data.stripe_size);
846}
847
848int rsxx_dma_setup(struct rsxx_cardinfo *card)
849{
850 unsigned long flags;
851 int st;
852 int i;
853
854 dev_info(CARD_TO_DEV(card),
855 "Initializing %d DMA targets\n",
856 card->n_targets);
857
858 /* Regmap is divided up into 4K chunks. One for each DMA channel */
859 for (i = 0; i < card->n_targets; i++)
860 card->ctrl[i].regmap = card->regmap + (i * 4096);
861
862 card->dma_fault = 0;
863
864 /* Reset the DMA queues */
865 rsxx_dma_queue_reset(card);
866
867 /************* Setup DMA Control *************/
868 for (i = 0; i < card->n_targets; i++) {
869 st = rsxx_dma_ctrl_init(card->dev, &card->ctrl[i]);
870 if (st)
871 goto failed_dma_setup;
872
873 card->ctrl[i].card = card;
874 card->ctrl[i].id = i;
875 }
876
877 card->scrub_hard = 1;
878
879 if (card->config_valid)
880 rsxx_dma_configure(card);
881
882 /* Enable the interrupts after all setup has completed. */
883 for (i = 0; i < card->n_targets; i++) {
884 spin_lock_irqsave(&card->irq_lock, flags);
885 rsxx_enable_ier_and_isr(card, CR_INTR_DMA(i));
886 spin_unlock_irqrestore(&card->irq_lock, flags);
887 }
888
889 return 0;
890
891failed_dma_setup:
892 for (i = 0; i < card->n_targets; i++) {
893 struct rsxx_dma_ctrl *ctrl = &card->ctrl[i];
894
895 if (ctrl->issue_wq) {
896 destroy_workqueue(ctrl->issue_wq);
897 ctrl->issue_wq = NULL;
898 }
899
900 if (ctrl->done_wq) {
901 destroy_workqueue(ctrl->done_wq);
902 ctrl->done_wq = NULL;
903 }
904
905 if (ctrl->trackers)
906 vfree(ctrl->trackers);
907
908 if (ctrl->status.buf)
909 pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
910 ctrl->status.buf,
911 ctrl->status.dma_addr);
912 if (ctrl->cmd.buf)
913 pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
914 ctrl->cmd.buf, ctrl->cmd.dma_addr);
915 }
916
917 return st;
918}
919
920
921void rsxx_dma_destroy(struct rsxx_cardinfo *card)
922{
923 struct rsxx_dma_ctrl *ctrl;
924 struct rsxx_dma *dma;
925 int i, j;
926 int cnt = 0;
927
928 for (i = 0; i < card->n_targets; i++) {
929 ctrl = &card->ctrl[i];
930
931 if (ctrl->issue_wq) {
932 destroy_workqueue(ctrl->issue_wq);
933 ctrl->issue_wq = NULL;
934 }
935
936 if (ctrl->done_wq) {
937 destroy_workqueue(ctrl->done_wq);
938 ctrl->done_wq = NULL;
939 }
940
941 if (timer_pending(&ctrl->activity_timer))
942 del_timer_sync(&ctrl->activity_timer);
943
944 /* Clean up the DMA queue */
945 spin_lock(&ctrl->queue_lock);
946 cnt = rsxx_cleanup_dma_queue(card, &ctrl->queue);
947 spin_unlock(&ctrl->queue_lock);
948
949 if (cnt)
950 dev_info(CARD_TO_DEV(card),
951 "Freed %d queued DMAs on channel %d\n",
952 cnt, i);
953
954 /* Clean up issued DMAs */
955 for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) {
956 dma = get_tracker_dma(ctrl->trackers, j);
957 if (dma) {
958 pci_unmap_page(card->dev, dma->dma_addr,
959 get_dma_size(dma),
960 (dma->cmd == HW_CMD_BLK_WRITE) ?
961 PCI_DMA_TODEVICE :
962 PCI_DMA_FROMDEVICE);
963 kmem_cache_free(rsxx_dma_pool, dma);
964 cnt++;
965 }
966 }
967
968 if (cnt)
969 dev_info(CARD_TO_DEV(card),
970 "Freed %d pending DMAs on channel %d\n",
971 cnt, i);
972
973 vfree(ctrl->trackers);
974
975 pci_free_consistent(card->dev, STATUS_BUFFER_SIZE8,
976 ctrl->status.buf, ctrl->status.dma_addr);
977 pci_free_consistent(card->dev, COMMAND_BUFFER_SIZE8,
978 ctrl->cmd.buf, ctrl->cmd.dma_addr);
979 }
980}
981
982
983int rsxx_dma_init(void)
984{
985 rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN);
986 if (!rsxx_dma_pool)
987 return -ENOMEM;
988
989 return 0;
990}
991
992
993void rsxx_dma_cleanup(void)
994{
995 kmem_cache_destroy(rsxx_dma_pool);
996}
997
diff --git a/drivers/block/rsxx/rsxx.h b/drivers/block/rsxx/rsxx.h
new file mode 100644
index 000000000000..9581e136a042
--- /dev/null
+++ b/drivers/block/rsxx/rsxx.h
@@ -0,0 +1,43 @@
1/*
2* Filename: rsxx.h
3*
4*
5* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
7*
8* (C) Copyright 2013 IBM Corporation
9*
10* This program is free software; you can redistribute it and/or
11* modify it under the terms of the GNU General Public License as
12* published by the Free Software Foundation; either version 2 of the
13* License, or (at your option) any later version.
14*
15* This program is distributed in the hope that it will be useful, but
16* WITHOUT ANY WARRANTY; without even the implied warranty of
17* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18* General Public License for more details.
19*
20* You should have received a copy of the GNU General Public License
21* along with this program; if not, write to the Free Software Foundation,
22* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23*/
24
25#ifndef __RSXX_H__
26#define __RSXX_H__
27
28/*----------------- IOCTL Definitions -------------------*/
29
30struct rsxx_reg_access {
31 __u32 addr;
32 __u32 cnt;
33 __u32 stat;
34 __u32 stream;
35 __u32 data[8];
36};
37
38#define RSXX_IOC_MAGIC 'r'
39
40#define RSXX_GETREG _IOWR(RSXX_IOC_MAGIC, 0x20, struct rsxx_reg_access)
41#define RSXX_SETREG _IOWR(RSXX_IOC_MAGIC, 0x21, struct rsxx_reg_access)
42
43#endif /* __RSXX_H_ */
diff --git a/drivers/block/rsxx/rsxx_cfg.h b/drivers/block/rsxx/rsxx_cfg.h
new file mode 100644
index 000000000000..c025fe5fdb70
--- /dev/null
+++ b/drivers/block/rsxx/rsxx_cfg.h
@@ -0,0 +1,72 @@
1/*
2* Filename: rsXX_cfg.h
3*
4*
5* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
7*
8* (C) Copyright 2013 IBM Corporation
9*
10* This program is free software; you can redistribute it and/or
11* modify it under the terms of the GNU General Public License as
12* published by the Free Software Foundation; either version 2 of the
13* License, or (at your option) any later version.
14*
15* This program is distributed in the hope that it will be useful, but
16* WITHOUT ANY WARRANTY; without even the implied warranty of
17* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18* General Public License for more details.
19*
20* You should have received a copy of the GNU General Public License
21* along with this program; if not, write to the Free Software Foundation,
22* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23*/
24
25#ifndef __RSXX_CFG_H__
26#define __RSXX_CFG_H__
27
28/* NOTE: Config values will be saved in network byte order (i.e. Big endian) */
29#include <linux/types.h>
30
31/*
32 * The card config version must match the driver's expected version. If it does
33 * not, the DMA interfaces will not be attached and the user will need to
34 * initialize/upgrade the card configuration using the card config utility.
35 */
36#define RSXX_CFG_VERSION 4
37
38struct card_cfg_hdr {
39 __u32 version;
40 __u32 crc;
41};
42
43struct card_cfg_data {
44 __u32 block_size;
45 __u32 stripe_size;
46 __u32 vendor_id;
47 __u32 cache_order;
48 struct {
49 __u32 mode; /* Disabled, manual, auto-tune... */
50 __u32 count; /* Number of intr to coalesce */
51 __u32 latency;/* Max wait time (in ns) */
52 } intr_coal;
53};
54
55struct rsxx_card_cfg {
56 struct card_cfg_hdr hdr;
57 struct card_cfg_data data;
58};
59
60/* Vendor ID Values */
61#define RSXX_VENDOR_ID_TMS_IBM 0
62#define RSXX_VENDOR_ID_DSI 1
63#define RSXX_VENDOR_COUNT 2
64
65/* Interrupt Coalescing Values */
66#define RSXX_INTR_COAL_DISABLED 0
67#define RSXX_INTR_COAL_EXPLICIT 1
68#define RSXX_INTR_COAL_AUTO_TUNE 2
69
70
71#endif /* __RSXX_CFG_H__ */
72
diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h
new file mode 100644
index 000000000000..3887e496f54b
--- /dev/null
+++ b/drivers/block/rsxx/rsxx_priv.h
@@ -0,0 +1,408 @@
1/*
2* Filename: rsxx_priv.h
3*
4*
5* Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6* Philip Kelleher <pjk1939@linux.vnet.ibm.com>
7*
8* (C) Copyright 2013 IBM Corporation
9*
10* This program is free software; you can redistribute it and/or
11* modify it under the terms of the GNU General Public License as
12* published by the Free Software Foundation; either version 2 of the
13* License, or (at your option) any later version.
14*
15* This program is distributed in the hope that it will be useful, but
16* WITHOUT ANY WARRANTY; without even the implied warranty of
17* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18* General Public License for more details.
19*
20* You should have received a copy of the GNU General Public License
21* along with this program; if not, write to the Free Software Foundation,
22* Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23*/
24
25#ifndef __RSXX_PRIV_H__
26#define __RSXX_PRIV_H__
27
28#include <linux/version.h>
29#include <linux/semaphore.h>
30
31#include <linux/fs.h>
32#include <linux/interrupt.h>
33#include <linux/mutex.h>
34#include <linux/pci.h>
35#include <linux/spinlock.h>
36#include <linux/sysfs.h>
37#include <linux/workqueue.h>
38#include <linux/bio.h>
39#include <linux/vmalloc.h>
40#include <linux/timer.h>
41#include <linux/ioctl.h>
42
43#include "rsxx.h"
44#include "rsxx_cfg.h"
45
46struct proc_cmd;
47
48#define PCI_VENDOR_ID_TMS_IBM 0x15B6
49#define PCI_DEVICE_ID_RS70_FLASH 0x0019
50#define PCI_DEVICE_ID_RS70D_FLASH 0x001A
51#define PCI_DEVICE_ID_RS80_FLASH 0x001C
52#define PCI_DEVICE_ID_RS81_FLASH 0x001E
53
54#define RS70_PCI_REV_SUPPORTED 4
55
56#define DRIVER_NAME "rsxx"
57#define DRIVER_VERSION "3.7"
58
59/* Block size is 4096 */
60#define RSXX_HW_BLK_SHIFT 12
61#define RSXX_HW_BLK_SIZE (1 << RSXX_HW_BLK_SHIFT)
62#define RSXX_HW_BLK_MASK (RSXX_HW_BLK_SIZE - 1)
63
64#define MAX_CREG_DATA8 32
65#define LOG_BUF_SIZE8 128
66
67#define RSXX_MAX_OUTSTANDING_CMDS 255
68#define RSXX_CS_IDX_MASK 0xff
69
70#define RSXX_MAX_TARGETS 8
71
72struct dma_tracker_list;
73
74/* DMA Command/Status Buffer structure */
75struct rsxx_cs_buffer {
76 dma_addr_t dma_addr;
77 void *buf;
78 u32 idx;
79};
80
81struct rsxx_dma_stats {
82 u32 crc_errors;
83 u32 hard_errors;
84 u32 soft_errors;
85 u32 writes_issued;
86 u32 writes_failed;
87 u32 reads_issued;
88 u32 reads_failed;
89 u32 reads_retried;
90 u32 discards_issued;
91 u32 discards_failed;
92 u32 done_rescheduled;
93 u32 issue_rescheduled;
94 u32 sw_q_depth; /* Number of DMAs on the SW queue. */
95 atomic_t hw_q_depth; /* Number of DMAs queued to HW. */
96};
97
98struct rsxx_dma_ctrl {
99 struct rsxx_cardinfo *card;
100 int id;
101 void __iomem *regmap;
102 struct rsxx_cs_buffer status;
103 struct rsxx_cs_buffer cmd;
104 u16 e_cnt;
105 spinlock_t queue_lock;
106 struct list_head queue;
107 struct workqueue_struct *issue_wq;
108 struct work_struct issue_dma_work;
109 struct workqueue_struct *done_wq;
110 struct work_struct dma_done_work;
111 struct timer_list activity_timer;
112 struct dma_tracker_list *trackers;
113 struct rsxx_dma_stats stats;
114};
115
116struct rsxx_cardinfo {
117 struct pci_dev *dev;
118 unsigned int halt;
119
120 void __iomem *regmap;
121 spinlock_t irq_lock;
122 unsigned int isr_mask;
123 unsigned int ier_mask;
124
125 struct rsxx_card_cfg config;
126 int config_valid;
127
128 /* Embedded CPU Communication */
129 struct {
130 struct mutex lock;
131 bool active;
132 struct creg_cmd *active_cmd;
133 struct work_struct done_work;
134 struct list_head queue;
135 unsigned int q_depth;
136 /* Cache the creg status to prevent ioreads */
137 struct {
138 u32 stat;
139 u32 failed_cancel_timer;
140 u32 creg_timeout;
141 } creg_stats;
142 struct timer_list cmd_timer;
143 struct mutex reset_lock;
144 spinlock_t pop_lock;
145 int reset;
146 } creg_ctrl;
147
148 struct {
149 char tmp[MAX_CREG_DATA8];
150 char buf[LOG_BUF_SIZE8]; /* terminated */
151 int buf_len;
152 } log;
153
154 struct work_struct event_work;
155 unsigned int state;
156 u64 size8;
157
158 /* Lock the device attach/detach function */
159 struct mutex dev_lock;
160
161 /* Block Device Variables */
162 bool bdev_attached;
163 int disk_id;
164 int major;
165 struct request_queue *queue;
166 struct gendisk *gendisk;
167 struct {
168 /* Used to convert a byte address to a device address. */
169 u64 lower_mask;
170 u64 upper_shift;
171 u64 upper_mask;
172 u64 target_mask;
173 u64 target_shift;
174 } _stripe;
175 unsigned int dma_fault;
176
177 int scrub_hard;
178
179 int n_targets;
180 struct rsxx_dma_ctrl *ctrl;
181};
182
183enum rsxx_pci_regmap {
184 HWID = 0x00, /* Hardware Identification Register */
185 SCRATCH = 0x04, /* Scratch/Debug Register */
186 RESET = 0x08, /* Reset Register */
187 ISR = 0x10, /* Interrupt Status Register */
188 IER = 0x14, /* Interrupt Enable Register */
189 IPR = 0x18, /* Interrupt Poll Register */
190 CB_ADD_LO = 0x20, /* Command Host Buffer Address [31:0] */
191 CB_ADD_HI = 0x24, /* Command Host Buffer Address [63:32]*/
192 HW_CMD_IDX = 0x28, /* Hardware Processed Command Index */
193 SW_CMD_IDX = 0x2C, /* Software Processed Command Index */
194 SB_ADD_LO = 0x30, /* Status Host Buffer Address [31:0] */
195 SB_ADD_HI = 0x34, /* Status Host Buffer Address [63:32] */
196 HW_STATUS_CNT = 0x38, /* Hardware Status Counter */
197 SW_STATUS_CNT = 0x3C, /* Deprecated */
198 CREG_CMD = 0x40, /* CPU Command Register */
199 CREG_ADD = 0x44, /* CPU Address Register */
200 CREG_CNT = 0x48, /* CPU Count Register */
201 CREG_STAT = 0x4C, /* CPU Status Register */
202 CREG_DATA0 = 0x50, /* CPU Data Registers */
203 CREG_DATA1 = 0x54,
204 CREG_DATA2 = 0x58,
205 CREG_DATA3 = 0x5C,
206 CREG_DATA4 = 0x60,
207 CREG_DATA5 = 0x64,
208 CREG_DATA6 = 0x68,
209 CREG_DATA7 = 0x6c,
210 INTR_COAL = 0x70, /* Interrupt Coalescing Register */
211 HW_ERROR = 0x74, /* Card Error Register */
212 PCI_DEBUG0 = 0x78, /* PCI Debug Registers */
213 PCI_DEBUG1 = 0x7C,
214 PCI_DEBUG2 = 0x80,
215 PCI_DEBUG3 = 0x84,
216 PCI_DEBUG4 = 0x88,
217 PCI_DEBUG5 = 0x8C,
218 PCI_DEBUG6 = 0x90,
219 PCI_DEBUG7 = 0x94,
220 PCI_POWER_THROTTLE = 0x98,
221 PERF_CTRL = 0x9c,
222 PERF_TIMER_LO = 0xa0,
223 PERF_TIMER_HI = 0xa4,
224 PERF_RD512_LO = 0xa8,
225 PERF_RD512_HI = 0xac,
226 PERF_WR512_LO = 0xb0,
227 PERF_WR512_HI = 0xb4,
228};
229
230enum rsxx_intr {
231 CR_INTR_DMA0 = 0x00000001,
232 CR_INTR_CREG = 0x00000002,
233 CR_INTR_DMA1 = 0x00000004,
234 CR_INTR_EVENT = 0x00000008,
235 CR_INTR_DMA2 = 0x00000010,
236 CR_INTR_DMA3 = 0x00000020,
237 CR_INTR_DMA4 = 0x00000040,
238 CR_INTR_DMA5 = 0x00000080,
239 CR_INTR_DMA6 = 0x00000100,
240 CR_INTR_DMA7 = 0x00000200,
241 CR_INTR_DMA_ALL = 0x000003f5,
242 CR_INTR_ALL = 0xffffffff,
243};
244
245static inline int CR_INTR_DMA(int N)
246{
247 static const unsigned int _CR_INTR_DMA[] = {
248 CR_INTR_DMA0, CR_INTR_DMA1, CR_INTR_DMA2, CR_INTR_DMA3,
249 CR_INTR_DMA4, CR_INTR_DMA5, CR_INTR_DMA6, CR_INTR_DMA7
250 };
251 return _CR_INTR_DMA[N];
252}
253enum rsxx_pci_reset {
254 DMA_QUEUE_RESET = 0x00000001,
255};
256
257enum rsxx_pci_revision {
258 RSXX_DISCARD_SUPPORT = 2,
259};
260
261enum rsxx_creg_cmd {
262 CREG_CMD_TAG_MASK = 0x0000FF00,
263 CREG_OP_WRITE = 0x000000C0,
264 CREG_OP_READ = 0x000000E0,
265};
266
267enum rsxx_creg_addr {
268 CREG_ADD_CARD_CMD = 0x80001000,
269 CREG_ADD_CARD_STATE = 0x80001004,
270 CREG_ADD_CARD_SIZE = 0x8000100c,
271 CREG_ADD_CAPABILITIES = 0x80001050,
272 CREG_ADD_LOG = 0x80002000,
273 CREG_ADD_NUM_TARGETS = 0x80003000,
274 CREG_ADD_CONFIG = 0xB0000000,
275};
276
277enum rsxx_creg_card_cmd {
278 CARD_CMD_STARTUP = 1,
279 CARD_CMD_SHUTDOWN = 2,
280 CARD_CMD_LOW_LEVEL_FORMAT = 3,
281 CARD_CMD_FPGA_RECONFIG_BR = 4,
282 CARD_CMD_FPGA_RECONFIG_MAIN = 5,
283 CARD_CMD_BACKUP = 6,
284 CARD_CMD_RESET = 7,
285 CARD_CMD_deprecated = 8,
286 CARD_CMD_UNINITIALIZE = 9,
287 CARD_CMD_DSTROY_EMERGENCY = 10,
288 CARD_CMD_DSTROY_NORMAL = 11,
289 CARD_CMD_DSTROY_EXTENDED = 12,
290 CARD_CMD_DSTROY_ABORT = 13,
291};
292
293enum rsxx_card_state {
294 CARD_STATE_SHUTDOWN = 0x00000001,
295 CARD_STATE_STARTING = 0x00000002,
296 CARD_STATE_FORMATTING = 0x00000004,
297 CARD_STATE_UNINITIALIZED = 0x00000008,
298 CARD_STATE_GOOD = 0x00000010,
299 CARD_STATE_SHUTTING_DOWN = 0x00000020,
300 CARD_STATE_FAULT = 0x00000040,
301 CARD_STATE_RD_ONLY_FAULT = 0x00000080,
302 CARD_STATE_DSTROYING = 0x00000100,
303};
304
305enum rsxx_led {
306 LED_DEFAULT = 0x0,
307 LED_IDENTIFY = 0x1,
308 LED_SOAK = 0x2,
309};
310
311enum rsxx_creg_flash_lock {
312 CREG_FLASH_LOCK = 1,
313 CREG_FLASH_UNLOCK = 2,
314};
315
316enum rsxx_card_capabilities {
317 CARD_CAP_SUBPAGE_WRITES = 0x00000080,
318};
319
320enum rsxx_creg_stat {
321 CREG_STAT_STATUS_MASK = 0x00000003,
322 CREG_STAT_SUCCESS = 0x1,
323 CREG_STAT_ERROR = 0x2,
324 CREG_STAT_CHAR_PENDING = 0x00000004, /* Character I/O pending bit */
325 CREG_STAT_LOG_PENDING = 0x00000008, /* HW log message pending bit */
326 CREG_STAT_TAG_MASK = 0x0000ff00,
327};
328
329static inline unsigned int CREG_DATA(int N)
330{
331 return CREG_DATA0 + (N << 2);
332}
333
334/*----------------- Convenient Log Wrappers -------------------*/
335#define CARD_TO_DEV(__CARD) (&(__CARD)->dev->dev)
336
337/***** config.c *****/
338int rsxx_load_config(struct rsxx_cardinfo *card);
339int rsxx_save_config(struct rsxx_cardinfo *card);
340
341/***** core.c *****/
342void rsxx_enable_ier(struct rsxx_cardinfo *card, unsigned int intr);
343void rsxx_disable_ier(struct rsxx_cardinfo *card, unsigned int intr);
344void rsxx_enable_ier_and_isr(struct rsxx_cardinfo *card,
345 unsigned int intr);
346void rsxx_disable_ier_and_isr(struct rsxx_cardinfo *card,
347 unsigned int intr);
348char *rsxx_card_state_to_str(unsigned int state);
349irqreturn_t rsxx_isr(int irq, void *pdata);
350
351/***** dev.c *****/
352int rsxx_attach_dev(struct rsxx_cardinfo *card);
353void rsxx_detach_dev(struct rsxx_cardinfo *card);
354int rsxx_setup_dev(struct rsxx_cardinfo *card);
355void rsxx_destroy_dev(struct rsxx_cardinfo *card);
356int rsxx_dev_init(void);
357void rsxx_dev_cleanup(void);
358
359/***** dma.c ****/
360typedef void (*rsxx_dma_cb)(struct rsxx_cardinfo *card,
361 void *cb_data,
362 unsigned int status);
363int rsxx_dma_setup(struct rsxx_cardinfo *card);
364void rsxx_dma_destroy(struct rsxx_cardinfo *card);
365int rsxx_dma_init(void);
366void rsxx_dma_cleanup(void);
367int rsxx_dma_configure(struct rsxx_cardinfo *card);
368int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
369 struct bio *bio,
370 atomic_t *n_dmas,
371 rsxx_dma_cb cb,
372 void *cb_data);
373int rsxx_dma_stripe_setup(struct rsxx_cardinfo *card,
374 unsigned int stripe_size8);
375unsigned int rsxx_get_dma_tgt(struct rsxx_cardinfo *card, u64 addr8);
376unsigned int rsxx_addr8_to_laddr(u64 addr8, struct rsxx_cardinfo *card);
377
378/***** cregs.c *****/
379int rsxx_creg_write(struct rsxx_cardinfo *card, u32 addr,
380 unsigned int size8,
381 void *data,
382 int byte_stream);
383int rsxx_creg_read(struct rsxx_cardinfo *card,
384 u32 addr,
385 unsigned int size8,
386 void *data,
387 int byte_stream);
388int rsxx_read_hw_log(struct rsxx_cardinfo *card);
389int rsxx_get_card_state(struct rsxx_cardinfo *card,
390 unsigned int *state);
391int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8);
392int rsxx_get_num_targets(struct rsxx_cardinfo *card,
393 unsigned int *n_targets);
394int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
395 u32 *capabilities);
396int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd);
397int rsxx_creg_setup(struct rsxx_cardinfo *card);
398void rsxx_creg_destroy(struct rsxx_cardinfo *card);
399int rsxx_creg_init(void);
400void rsxx_creg_cleanup(void);
401
402int rsxx_reg_access(struct rsxx_cardinfo *card,
403 struct rsxx_reg_access __user *ucmd,
404 int read);
405
406
407
408#endif /* __DRIVERS_BLOCK_RSXX_H__ */