aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/hpsa.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/scsi/hpsa.c')
-rw-r--r--drivers/scsi/hpsa.c1074
1 files changed, 719 insertions, 355 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index c5d0606ad097..6bba23a26303 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -31,7 +31,6 @@
31#include <linux/seq_file.h> 31#include <linux/seq_file.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/spinlock.h> 33#include <linux/spinlock.h>
34#include <linux/smp_lock.h>
35#include <linux/compat.h> 34#include <linux/compat.h>
36#include <linux/blktrace_api.h> 35#include <linux/blktrace_api.h>
37#include <linux/uaccess.h> 36#include <linux/uaccess.h>
@@ -75,6 +74,10 @@ static int hpsa_allow_any;
75module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); 74module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR);
76MODULE_PARM_DESC(hpsa_allow_any, 75MODULE_PARM_DESC(hpsa_allow_any,
77 "Allow hpsa driver to access unknown HP Smart Array hardware"); 76 "Allow hpsa driver to access unknown HP Smart Array hardware");
77static int hpsa_simple_mode;
78module_param(hpsa_simple_mode, int, S_IRUGO|S_IWUSR);
79MODULE_PARM_DESC(hpsa_simple_mode,
80 "Use 'simple mode' rather than 'performant mode'");
78 81
79/* define the PCI info for the cards we can control */ 82/* define the PCI info for the cards we can control */
80static const struct pci_device_id hpsa_pci_device_id[] = { 83static const struct pci_device_id hpsa_pci_device_id[] = {
@@ -86,16 +89,14 @@ static const struct pci_device_id hpsa_pci_device_id[] = {
86 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, 89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a},
87 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, 90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b},
88 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, 91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233},
89 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3250}, 92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3350},
90 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3251}, 93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3351},
91 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3252}, 94 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3352},
92 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3253}, 95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3353},
93 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3254}, 96 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3354},
94#define PCI_DEVICE_ID_HP_CISSF 0x333f 97 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3355},
95 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x333F}, 98 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x3356},
96 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 99 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
97 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
98 {PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
99 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, 100 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
100 {0,} 101 {0,}
101}; 102};
@@ -114,13 +115,13 @@ static struct board_type products[] = {
114 {0x3249103C, "Smart Array P812", &SA5_access}, 115 {0x3249103C, "Smart Array P812", &SA5_access},
115 {0x324a103C, "Smart Array P712m", &SA5_access}, 116 {0x324a103C, "Smart Array P712m", &SA5_access},
116 {0x324b103C, "Smart Array P711m", &SA5_access}, 117 {0x324b103C, "Smart Array P711m", &SA5_access},
117 {0x3233103C, "StorageWorks P1210m", &SA5_access}, 118 {0x3350103C, "Smart Array", &SA5_access},
118 {0x333F103C, "StorageWorks P1210m", &SA5_access}, 119 {0x3351103C, "Smart Array", &SA5_access},
119 {0x3250103C, "Smart Array", &SA5_access}, 120 {0x3352103C, "Smart Array", &SA5_access},
120 {0x3250113C, "Smart Array", &SA5_access}, 121 {0x3353103C, "Smart Array", &SA5_access},
121 {0x3250123C, "Smart Array", &SA5_access}, 122 {0x3354103C, "Smart Array", &SA5_access},
122 {0x3250133C, "Smart Array", &SA5_access}, 123 {0x3355103C, "Smart Array", &SA5_access},
123 {0x3250143C, "Smart Array", &SA5_access}, 124 {0x3356103C, "Smart Array", &SA5_access},
124 {0xFFFF103C, "Unknown Smart Array", &SA5_access}, 125 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
125}; 126};
126 127
@@ -143,8 +144,7 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
143 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, 144 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
144 int cmd_type); 145 int cmd_type);
145 146
146static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, 147static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
147 void (*done)(struct scsi_cmnd *));
148static void hpsa_scan_start(struct Scsi_Host *); 148static void hpsa_scan_start(struct Scsi_Host *);
149static int hpsa_scan_finished(struct Scsi_Host *sh, 149static int hpsa_scan_finished(struct Scsi_Host *sh,
150 unsigned long elapsed_time); 150 unsigned long elapsed_time);
@@ -155,17 +155,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
155static int hpsa_slave_alloc(struct scsi_device *sdev); 155static int hpsa_slave_alloc(struct scsi_device *sdev);
156static void hpsa_slave_destroy(struct scsi_device *sdev); 156static void hpsa_slave_destroy(struct scsi_device *sdev);
157 157
158static ssize_t raid_level_show(struct device *dev,
159 struct device_attribute *attr, char *buf);
160static ssize_t lunid_show(struct device *dev,
161 struct device_attribute *attr, char *buf);
162static ssize_t unique_id_show(struct device *dev,
163 struct device_attribute *attr, char *buf);
164static ssize_t host_show_firmware_revision(struct device *dev,
165 struct device_attribute *attr, char *buf);
166static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); 158static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno);
167static ssize_t host_store_rescan(struct device *dev,
168 struct device_attribute *attr, const char *buf, size_t count);
169static int check_for_unit_attention(struct ctlr_info *h, 159static int check_for_unit_attention(struct ctlr_info *h,
170 struct CommandList *c); 160 struct CommandList *c);
171static void check_ioctl_unit_attention(struct ctlr_info *h, 161static void check_ioctl_unit_attention(struct ctlr_info *h,
@@ -181,47 +171,10 @@ static int __devinit hpsa_find_cfg_addrs(struct pci_dev *pdev,
181static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev, 171static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
182 unsigned long *memory_bar); 172 unsigned long *memory_bar);
183static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id); 173static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);
184 174static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
185static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); 175 void __iomem *vaddr, int wait_for_ready);
186static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); 176#define BOARD_NOT_READY 0
187static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); 177#define BOARD_READY 1
188static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
189static DEVICE_ATTR(firmware_revision, S_IRUGO,
190 host_show_firmware_revision, NULL);
191
192static struct device_attribute *hpsa_sdev_attrs[] = {
193 &dev_attr_raid_level,
194 &dev_attr_lunid,
195 &dev_attr_unique_id,
196 NULL,
197};
198
199static struct device_attribute *hpsa_shost_attrs[] = {
200 &dev_attr_rescan,
201 &dev_attr_firmware_revision,
202 NULL,
203};
204
205static struct scsi_host_template hpsa_driver_template = {
206 .module = THIS_MODULE,
207 .name = "hpsa",
208 .proc_name = "hpsa",
209 .queuecommand = hpsa_scsi_queue_command,
210 .scan_start = hpsa_scan_start,
211 .scan_finished = hpsa_scan_finished,
212 .change_queue_depth = hpsa_change_queue_depth,
213 .this_id = -1,
214 .use_clustering = ENABLE_CLUSTERING,
215 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
216 .ioctl = hpsa_ioctl,
217 .slave_alloc = hpsa_slave_alloc,
218 .slave_destroy = hpsa_slave_destroy,
219#ifdef CONFIG_COMPAT
220 .compat_ioctl = hpsa_compat_ioctl,
221#endif
222 .sdev_attrs = hpsa_sdev_attrs,
223 .shost_attrs = hpsa_shost_attrs,
224};
225 178
226static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) 179static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)
227{ 180{
@@ -299,67 +252,92 @@ static ssize_t host_show_firmware_revision(struct device *dev,
299 fwrev[0], fwrev[1], fwrev[2], fwrev[3]); 252 fwrev[0], fwrev[1], fwrev[2], fwrev[3]);
300} 253}
301 254
302/* Enqueuing and dequeuing functions for cmdlists. */ 255static ssize_t host_show_commands_outstanding(struct device *dev,
303static inline void addQ(struct hlist_head *list, struct CommandList *c) 256 struct device_attribute *attr, char *buf)
304{ 257{
305 hlist_add_head(&c->list, list); 258 struct Scsi_Host *shost = class_to_shost(dev);
259 struct ctlr_info *h = shost_to_hba(shost);
260
261 return snprintf(buf, 20, "%d\n", h->commands_outstanding);
306} 262}
307 263
308static inline u32 next_command(struct ctlr_info *h) 264static ssize_t host_show_transport_mode(struct device *dev,
265 struct device_attribute *attr, char *buf)
309{ 266{
310 u32 a; 267 struct ctlr_info *h;
311 268 struct Scsi_Host *shost = class_to_shost(dev);
312 if (unlikely(h->transMethod != CFGTBL_Trans_Performant))
313 return h->access.command_completed(h);
314 269
315 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { 270 h = shost_to_hba(shost);
316 a = *(h->reply_pool_head); /* Next cmd in ring buffer */ 271 return snprintf(buf, 20, "%s\n",
317 (h->reply_pool_head)++; 272 h->transMethod & CFGTBL_Trans_Performant ?
318 h->commands_outstanding--; 273 "performant" : "simple");
319 } else {
320 a = FIFO_EMPTY;
321 }
322 /* Check for wraparound */
323 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
324 h->reply_pool_head = h->reply_pool;
325 h->reply_pool_wraparound ^= 1;
326 }
327 return a;
328} 274}
329 275
330/* set_performant_mode: Modify the tag for cciss performant 276/* List of controllers which cannot be hard reset on kexec with reset_devices */
331 * set bit 0 for pull model, bits 3-1 for block fetch 277static u32 unresettable_controller[] = {
332 * register number 278 0x324a103C, /* Smart Array P712m */
333 */ 279 0x324b103C, /* SmartArray P711m */
334static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) 280 0x3223103C, /* Smart Array P800 */
281 0x3234103C, /* Smart Array P400 */
282 0x3235103C, /* Smart Array P400i */
283 0x3211103C, /* Smart Array E200i */
284 0x3212103C, /* Smart Array E200 */
285 0x3213103C, /* Smart Array E200i */
286 0x3214103C, /* Smart Array E200i */
287 0x3215103C, /* Smart Array E200i */
288 0x3237103C, /* Smart Array E500 */
289 0x323D103C, /* Smart Array P700m */
290 0x409C0E11, /* Smart Array 6400 */
291 0x409D0E11, /* Smart Array 6400 EM */
292};
293
294/* List of controllers which cannot even be soft reset */
295static u32 soft_unresettable_controller[] = {
296 /* Exclude 640x boards. These are two pci devices in one slot
297 * which share a battery backed cache module. One controls the
298 * cache, the other accesses the cache through the one that controls
299 * it. If we reset the one controlling the cache, the other will
300 * likely not be happy. Just forbid resetting this conjoined mess.
301 * The 640x isn't really supported by hpsa anyway.
302 */
303 0x409C0E11, /* Smart Array 6400 */
304 0x409D0E11, /* Smart Array 6400 EM */
305};
306
307static int ctlr_is_hard_resettable(u32 board_id)
335{ 308{
336 if (likely(h->transMethod == CFGTBL_Trans_Performant)) 309 int i;
337 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); 310
311 for (i = 0; i < ARRAY_SIZE(unresettable_controller); i++)
312 if (unresettable_controller[i] == board_id)
313 return 0;
314 return 1;
338} 315}
339 316
340static void enqueue_cmd_and_start_io(struct ctlr_info *h, 317static int ctlr_is_soft_resettable(u32 board_id)
341 struct CommandList *c)
342{ 318{
343 unsigned long flags; 319 int i;
344 320
345 set_performant_mode(h, c); 321 for (i = 0; i < ARRAY_SIZE(soft_unresettable_controller); i++)
346 spin_lock_irqsave(&h->lock, flags); 322 if (soft_unresettable_controller[i] == board_id)
347 addQ(&h->reqQ, c); 323 return 0;
348 h->Qdepth++; 324 return 1;
349 start_io(h);
350 spin_unlock_irqrestore(&h->lock, flags);
351} 325}
352 326
353static inline void removeQ(struct CommandList *c) 327static int ctlr_is_resettable(u32 board_id)
354{ 328{
355 if (WARN_ON(hlist_unhashed(&c->list))) 329 return ctlr_is_hard_resettable(board_id) ||
356 return; 330 ctlr_is_soft_resettable(board_id);
357 hlist_del_init(&c->list);
358} 331}
359 332
360static inline int is_hba_lunid(unsigned char scsi3addr[]) 333static ssize_t host_show_resettable(struct device *dev,
334 struct device_attribute *attr, char *buf)
361{ 335{
362 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; 336 struct ctlr_info *h;
337 struct Scsi_Host *shost = class_to_shost(dev);
338
339 h = shost_to_hba(shost);
340 return snprintf(buf, 20, "%d\n", ctlr_is_resettable(h->board_id));
363} 341}
364 342
365static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) 343static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
@@ -367,15 +345,6 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])
367 return (scsi3addr[3] & 0xC0) == 0x40; 345 return (scsi3addr[3] & 0xC0) == 0x40;
368} 346}
369 347
370static inline int is_scsi_rev_5(struct ctlr_info *h)
371{
372 if (!h->hba_inquiry_data)
373 return 0;
374 if ((h->hba_inquiry_data[2] & 0x07) == 5)
375 return 1;
376 return 0;
377}
378
379static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", 348static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
380 "UNKNOWN" 349 "UNKNOWN"
381}; 350};
@@ -467,6 +436,129 @@ static ssize_t unique_id_show(struct device *dev,
467 sn[12], sn[13], sn[14], sn[15]); 436 sn[12], sn[13], sn[14], sn[15]);
468} 437}
469 438
439static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);
440static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);
441static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);
442static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
443static DEVICE_ATTR(firmware_revision, S_IRUGO,
444 host_show_firmware_revision, NULL);
445static DEVICE_ATTR(commands_outstanding, S_IRUGO,
446 host_show_commands_outstanding, NULL);
447static DEVICE_ATTR(transport_mode, S_IRUGO,
448 host_show_transport_mode, NULL);
449static DEVICE_ATTR(resettable, S_IRUGO,
450 host_show_resettable, NULL);
451
452static struct device_attribute *hpsa_sdev_attrs[] = {
453 &dev_attr_raid_level,
454 &dev_attr_lunid,
455 &dev_attr_unique_id,
456 NULL,
457};
458
459static struct device_attribute *hpsa_shost_attrs[] = {
460 &dev_attr_rescan,
461 &dev_attr_firmware_revision,
462 &dev_attr_commands_outstanding,
463 &dev_attr_transport_mode,
464 &dev_attr_resettable,
465 NULL,
466};
467
468static struct scsi_host_template hpsa_driver_template = {
469 .module = THIS_MODULE,
470 .name = "hpsa",
471 .proc_name = "hpsa",
472 .queuecommand = hpsa_scsi_queue_command,
473 .scan_start = hpsa_scan_start,
474 .scan_finished = hpsa_scan_finished,
475 .change_queue_depth = hpsa_change_queue_depth,
476 .this_id = -1,
477 .use_clustering = ENABLE_CLUSTERING,
478 .eh_device_reset_handler = hpsa_eh_device_reset_handler,
479 .ioctl = hpsa_ioctl,
480 .slave_alloc = hpsa_slave_alloc,
481 .slave_destroy = hpsa_slave_destroy,
482#ifdef CONFIG_COMPAT
483 .compat_ioctl = hpsa_compat_ioctl,
484#endif
485 .sdev_attrs = hpsa_sdev_attrs,
486 .shost_attrs = hpsa_shost_attrs,
487};
488
489
490/* Enqueuing and dequeuing functions for cmdlists. */
491static inline void addQ(struct list_head *list, struct CommandList *c)
492{
493 list_add_tail(&c->list, list);
494}
495
496static inline u32 next_command(struct ctlr_info *h)
497{
498 u32 a;
499
500 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
501 return h->access.command_completed(h);
502
503 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
504 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
505 (h->reply_pool_head)++;
506 h->commands_outstanding--;
507 } else {
508 a = FIFO_EMPTY;
509 }
510 /* Check for wraparound */
511 if (h->reply_pool_head == (h->reply_pool + h->max_commands)) {
512 h->reply_pool_head = h->reply_pool;
513 h->reply_pool_wraparound ^= 1;
514 }
515 return a;
516}
517
518/* set_performant_mode: Modify the tag for cciss performant
519 * set bit 0 for pull model, bits 3-1 for block fetch
520 * register number
521 */
522static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)
523{
524 if (likely(h->transMethod & CFGTBL_Trans_Performant))
525 c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1);
526}
527
528static void enqueue_cmd_and_start_io(struct ctlr_info *h,
529 struct CommandList *c)
530{
531 unsigned long flags;
532
533 set_performant_mode(h, c);
534 spin_lock_irqsave(&h->lock, flags);
535 addQ(&h->reqQ, c);
536 h->Qdepth++;
537 start_io(h);
538 spin_unlock_irqrestore(&h->lock, flags);
539}
540
541static inline void removeQ(struct CommandList *c)
542{
543 if (WARN_ON(list_empty(&c->list)))
544 return;
545 list_del_init(&c->list);
546}
547
548static inline int is_hba_lunid(unsigned char scsi3addr[])
549{
550 return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0;
551}
552
553static inline int is_scsi_rev_5(struct ctlr_info *h)
554{
555 if (!h->hba_inquiry_data)
556 return 0;
557 if ((h->hba_inquiry_data[2] & 0x07) == 5)
558 return 1;
559 return 0;
560}
561
470static int hpsa_find_target_lun(struct ctlr_info *h, 562static int hpsa_find_target_lun(struct ctlr_info *h,
471 unsigned char scsi3addr[], int bus, int *target, int *lun) 563 unsigned char scsi3addr[], int bus, int *target, int *lun)
472{ 564{
@@ -649,11 +741,6 @@ static void fixup_botched_add(struct ctlr_info *h,
649static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, 741static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
650 struct hpsa_scsi_dev_t *dev2) 742 struct hpsa_scsi_dev_t *dev2)
651{ 743{
652 if ((is_logical_dev_addr_mode(dev1->scsi3addr) ||
653 (dev1->lun != -1 && dev2->lun != -1)) &&
654 dev1->devtype != 0x0C)
655 return (memcmp(dev1, dev2, sizeof(*dev1)) == 0);
656
657 /* we compare everything except lun and target as these 744 /* we compare everything except lun and target as these
658 * are not yet assigned. Compare parts likely 745 * are not yet assigned. Compare parts likely
659 * to differ first 746 * to differ first
@@ -668,12 +755,8 @@ static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1,
668 return 0; 755 return 0;
669 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) 756 if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0)
670 return 0; 757 return 0;
671 if (memcmp(dev1->revision, dev2->revision, sizeof(dev1->revision)) != 0)
672 return 0;
673 if (dev1->devtype != dev2->devtype) 758 if (dev1->devtype != dev2->devtype)
674 return 0; 759 return 0;
675 if (dev1->raid_level != dev2->raid_level)
676 return 0;
677 if (dev1->bus != dev2->bus) 760 if (dev1->bus != dev2->bus)
678 return 0; 761 return 0;
679 return 1; 762 return 1;
@@ -875,13 +958,6 @@ static void hpsa_slave_destroy(struct scsi_device *sdev)
875 /* nothing to do. */ 958 /* nothing to do. */
876} 959}
877 960
878static void hpsa_scsi_setup(struct ctlr_info *h)
879{
880 h->ndevices = 0;
881 h->scsi_host = NULL;
882 spin_lock_init(&h->devlock);
883}
884
885static void hpsa_free_sg_chain_blocks(struct ctlr_info *h) 961static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
886{ 962{
887 int i; 963 int i;
@@ -952,8 +1028,7 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
952 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE); 1028 pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
953} 1029}
954 1030
955static void complete_scsi_command(struct CommandList *cp, 1031static void complete_scsi_command(struct CommandList *cp)
956 int timeout, u32 tag)
957{ 1032{
958 struct scsi_cmnd *cmd; 1033 struct scsi_cmnd *cmd;
959 struct ctlr_info *h; 1034 struct ctlr_info *h;
@@ -962,6 +1037,7 @@ static void complete_scsi_command(struct CommandList *cp,
962 unsigned char sense_key; 1037 unsigned char sense_key;
963 unsigned char asc; /* additional sense code */ 1038 unsigned char asc; /* additional sense code */
964 unsigned char ascq; /* additional sense code qualifier */ 1039 unsigned char ascq; /* additional sense code qualifier */
1040 unsigned long sense_data_size;
965 1041
966 ei = cp->err_info; 1042 ei = cp->err_info;
967 cmd = (struct scsi_cmnd *) cp->scsi_cmd; 1043 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
@@ -976,10 +1052,14 @@ static void complete_scsi_command(struct CommandList *cp,
976 cmd->result |= ei->ScsiStatus; 1052 cmd->result |= ei->ScsiStatus;
977 1053
978 /* copy the sense data whether we need to or not. */ 1054 /* copy the sense data whether we need to or not. */
979 memcpy(cmd->sense_buffer, ei->SenseInfo, 1055 if (SCSI_SENSE_BUFFERSIZE < sizeof(ei->SenseInfo))
980 ei->SenseLen > SCSI_SENSE_BUFFERSIZE ? 1056 sense_data_size = SCSI_SENSE_BUFFERSIZE;
981 SCSI_SENSE_BUFFERSIZE : 1057 else
982 ei->SenseLen); 1058 sense_data_size = sizeof(ei->SenseInfo);
1059 if (ei->SenseLen < sense_data_size)
1060 sense_data_size = ei->SenseLen;
1061
1062 memcpy(cmd->sense_buffer, ei->SenseInfo, sense_data_size);
983 scsi_set_resid(cmd, ei->ResidualCnt); 1063 scsi_set_resid(cmd, ei->ResidualCnt);
984 1064
985 if (ei->CommandStatus == 0) { 1065 if (ei->CommandStatus == 0) {
@@ -1147,6 +1227,10 @@ static void complete_scsi_command(struct CommandList *cp,
1147 cmd->result = DID_TIME_OUT << 16; 1227 cmd->result = DID_TIME_OUT << 16;
1148 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); 1228 dev_warn(&h->pdev->dev, "cp %p timedout\n", cp);
1149 break; 1229 break;
1230 case CMD_UNABORTABLE:
1231 cmd->result = DID_ERROR << 16;
1232 dev_warn(&h->pdev->dev, "Command unabortable\n");
1233 break;
1150 default: 1234 default:
1151 cmd->result = DID_ERROR << 16; 1235 cmd->result = DID_ERROR << 16;
1152 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", 1236 dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n",
@@ -1177,7 +1261,7 @@ static int hpsa_scsi_detect(struct ctlr_info *h)
1177 sh->sg_tablesize = h->maxsgentries; 1261 sh->sg_tablesize = h->maxsgentries;
1178 h->scsi_host = sh; 1262 h->scsi_host = sh;
1179 sh->hostdata[0] = (unsigned long) h; 1263 sh->hostdata[0] = (unsigned long) h;
1180 sh->irq = h->intr[PERF_MODE_INT]; 1264 sh->irq = h->intr[h->intr_mode];
1181 sh->unique_id = sh->irq; 1265 sh->unique_id = sh->irq;
1182 error = scsi_add_host(sh, &h->pdev->dev); 1266 error = scsi_add_host(sh, &h->pdev->dev);
1183 if (error) 1267 if (error)
@@ -1250,7 +1334,7 @@ static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,
1250 int retry_count = 0; 1334 int retry_count = 0;
1251 1335
1252 do { 1336 do {
1253 memset(c->err_info, 0, sizeof(c->err_info)); 1337 memset(c->err_info, 0, sizeof(*c->err_info));
1254 hpsa_scsi_do_simple_cmd_core(h, c); 1338 hpsa_scsi_do_simple_cmd_core(h, c);
1255 retry_count++; 1339 retry_count++;
1256 } while (check_for_unit_attention(h, c) && retry_count <= 3); 1340 } while (check_for_unit_attention(h, c) && retry_count <= 3);
@@ -1312,6 +1396,9 @@ static void hpsa_scsi_interpret_error(struct CommandList *cp)
1312 case CMD_TIMEOUT: 1396 case CMD_TIMEOUT:
1313 dev_warn(d, "cp %p timed out\n", cp); 1397 dev_warn(d, "cp %p timed out\n", cp);
1314 break; 1398 break;
1399 case CMD_UNABORTABLE:
1400 dev_warn(d, "Command unabortable\n");
1401 break;
1315 default: 1402 default:
1316 dev_warn(d, "cp %p returned unknown status %x\n", cp, 1403 dev_warn(d, "cp %p returned unknown status %x\n", cp,
1317 ei->CommandStatus); 1404 ei->CommandStatus);
@@ -1485,8 +1572,6 @@ static int hpsa_update_device_info(struct ctlr_info *h,
1485 sizeof(this_device->vendor)); 1572 sizeof(this_device->vendor));
1486 memcpy(this_device->model, &inq_buff[16], 1573 memcpy(this_device->model, &inq_buff[16],
1487 sizeof(this_device->model)); 1574 sizeof(this_device->model));
1488 memcpy(this_device->revision, &inq_buff[32],
1489 sizeof(this_device->revision));
1490 memset(this_device->device_id, 0, 1575 memset(this_device->device_id, 0,
1491 sizeof(this_device->device_id)); 1576 sizeof(this_device->device_id));
1492 hpsa_get_device_id(h, scsi3addr, this_device->device_id, 1577 hpsa_get_device_id(h, scsi3addr, this_device->device_id,
@@ -1511,6 +1596,7 @@ static unsigned char *msa2xxx_model[] = {
1511 "MSA2024", 1596 "MSA2024",
1512 "MSA2312", 1597 "MSA2312",
1513 "MSA2324", 1598 "MSA2324",
1599 "P2000 G3 SAS",
1514 NULL, 1600 NULL,
1515}; 1601};
1516 1602
@@ -1614,6 +1700,8 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
1614 if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */ 1700 if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */
1615 return 0; 1701 return 0;
1616 1702
1703 memset(scsi3addr, 0, 8);
1704 scsi3addr[3] = target;
1617 if (is_hba_lunid(scsi3addr)) 1705 if (is_hba_lunid(scsi3addr))
1618 return 0; /* Don't add the RAID controller here. */ 1706 return 0; /* Don't add the RAID controller here. */
1619 1707
@@ -1628,8 +1716,6 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h,
1628 return 0; 1716 return 0;
1629 } 1717 }
1630 1718
1631 memset(scsi3addr, 0, 8);
1632 scsi3addr[3] = target;
1633 if (hpsa_update_device_info(h, scsi3addr, this_device)) 1719 if (hpsa_update_device_info(h, scsi3addr, this_device))
1634 return 0; 1720 return 0;
1635 (*nmsa2xxx_enclosures)++; 1721 (*nmsa2xxx_enclosures)++;
@@ -1926,7 +2012,7 @@ sglist_finished:
1926} 2012}
1927 2013
1928 2014
1929static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, 2015static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,
1930 void (*done)(struct scsi_cmnd *)) 2016 void (*done)(struct scsi_cmnd *))
1931{ 2017{
1932 struct ctlr_info *h; 2018 struct ctlr_info *h;
@@ -2020,6 +2106,8 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd,
2020 return 0; 2106 return 0;
2021} 2107}
2022 2108
2109static DEF_SCSI_QCMD(hpsa_scsi_queue_command)
2110
2023static void hpsa_scan_start(struct Scsi_Host *sh) 2111static void hpsa_scan_start(struct Scsi_Host *sh)
2024{ 2112{
2025 struct ctlr_info *h = shost_to_hba(sh); 2113 struct ctlr_info *h = shost_to_hba(sh);
@@ -2216,7 +2304,7 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h)
2216 2304
2217 c->cmdindex = i; 2305 c->cmdindex = i;
2218 2306
2219 INIT_HLIST_NODE(&c->list); 2307 INIT_LIST_HEAD(&c->list);
2220 c->busaddr = (u32) cmd_dma_handle; 2308 c->busaddr = (u32) cmd_dma_handle;
2221 temp64.val = (u64) err_dma_handle; 2309 temp64.val = (u64) err_dma_handle;
2222 c->ErrDesc.Addr.lower = temp64.val32.lower; 2310 c->ErrDesc.Addr.lower = temp64.val32.lower;
@@ -2254,7 +2342,7 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)
2254 } 2342 }
2255 memset(c->err_info, 0, sizeof(*c->err_info)); 2343 memset(c->err_info, 0, sizeof(*c->err_info));
2256 2344
2257 INIT_HLIST_NODE(&c->list); 2345 INIT_LIST_HEAD(&c->list);
2258 c->busaddr = (u32) cmd_dma_handle; 2346 c->busaddr = (u32) cmd_dma_handle;
2259 temp64.val = (u64) err_dma_handle; 2347 temp64.val = (u64) err_dma_handle;
2260 c->ErrDesc.Addr.lower = temp64.val32.lower; 2348 c->ErrDesc.Addr.lower = temp64.val32.lower;
@@ -2284,7 +2372,7 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c)
2284 pci_free_consistent(h->pdev, sizeof(*c->err_info), 2372 pci_free_consistent(h->pdev, sizeof(*c->err_info),
2285 c->err_info, (dma_addr_t) temp64.val); 2373 c->err_info, (dma_addr_t) temp64.val);
2286 pci_free_consistent(h->pdev, sizeof(*c), 2374 pci_free_consistent(h->pdev, sizeof(*c),
2287 c, (dma_addr_t) c->busaddr); 2375 c, (dma_addr_t) (c->busaddr & DIRECT_LOOKUP_MASK));
2288} 2376}
2289 2377
2290#ifdef CONFIG_COMPAT 2378#ifdef CONFIG_COMPAT
@@ -2298,6 +2386,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg)
2298 int err; 2386 int err;
2299 u32 cp; 2387 u32 cp;
2300 2388
2389 memset(&arg64, 0, sizeof(arg64));
2301 err = 0; 2390 err = 0;
2302 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 2391 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2303 sizeof(arg64.LUN_info)); 2392 sizeof(arg64.LUN_info));
@@ -2334,6 +2423,7 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev,
2334 int err; 2423 int err;
2335 u32 cp; 2424 u32 cp;
2336 2425
2426 memset(&arg64, 0, sizeof(arg64));
2337 err = 0; 2427 err = 0;
2338 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, 2428 err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
2339 sizeof(arg64.LUN_info)); 2429 sizeof(arg64.LUN_info));
@@ -2450,15 +2540,17 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2450 buff = kmalloc(iocommand.buf_size, GFP_KERNEL); 2540 buff = kmalloc(iocommand.buf_size, GFP_KERNEL);
2451 if (buff == NULL) 2541 if (buff == NULL)
2452 return -EFAULT; 2542 return -EFAULT;
2453 } 2543 if (iocommand.Request.Type.Direction == XFER_WRITE) {
2454 if (iocommand.Request.Type.Direction == XFER_WRITE) { 2544 /* Copy the data into the buffer we created */
2455 /* Copy the data into the buffer we created */ 2545 if (copy_from_user(buff, iocommand.buf,
2456 if (copy_from_user(buff, iocommand.buf, iocommand.buf_size)) { 2546 iocommand.buf_size)) {
2457 kfree(buff); 2547 kfree(buff);
2458 return -EFAULT; 2548 return -EFAULT;
2549 }
2550 } else {
2551 memset(buff, 0, iocommand.buf_size);
2459 } 2552 }
2460 } else 2553 }
2461 memset(buff, 0, iocommand.buf_size);
2462 c = cmd_special_alloc(h); 2554 c = cmd_special_alloc(h);
2463 if (c == NULL) { 2555 if (c == NULL) {
2464 kfree(buff); 2556 kfree(buff);
@@ -2493,7 +2585,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2493 c->SG[0].Ext = 0; /* we are not chaining*/ 2585 c->SG[0].Ext = 0; /* we are not chaining*/
2494 } 2586 }
2495 hpsa_scsi_do_simple_cmd_core(h, c); 2587 hpsa_scsi_do_simple_cmd_core(h, c);
2496 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL); 2588 if (iocommand.buf_size > 0)
2589 hpsa_pci_unmap(h->pdev, c, 1, PCI_DMA_BIDIRECTIONAL);
2497 check_ioctl_unit_attention(h, c); 2590 check_ioctl_unit_attention(h, c);
2498 2591
2499 /* Copy the error information out */ 2592 /* Copy the error information out */
@@ -2504,8 +2597,8 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2504 cmd_special_free(h, c); 2597 cmd_special_free(h, c);
2505 return -EFAULT; 2598 return -EFAULT;
2506 } 2599 }
2507 2600 if (iocommand.Request.Type.Direction == XFER_READ &&
2508 if (iocommand.Request.Type.Direction == XFER_READ) { 2601 iocommand.buf_size > 0) {
2509 /* Copy the data out of the buffer we created */ 2602 /* Copy the data out of the buffer we created */
2510 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { 2603 if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) {
2511 kfree(buff); 2604 kfree(buff);
@@ -2598,14 +2691,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2598 } 2691 }
2599 c->cmd_type = CMD_IOCTL_PEND; 2692 c->cmd_type = CMD_IOCTL_PEND;
2600 c->Header.ReplyQueue = 0; 2693 c->Header.ReplyQueue = 0;
2601 2694 c->Header.SGList = c->Header.SGTotal = sg_used;
2602 if (ioc->buf_size > 0) {
2603 c->Header.SGList = sg_used;
2604 c->Header.SGTotal = sg_used;
2605 } else {
2606 c->Header.SGList = 0;
2607 c->Header.SGTotal = 0;
2608 }
2609 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN)); 2695 memcpy(&c->Header.LUN, &ioc->LUN_info, sizeof(c->Header.LUN));
2610 c->Header.Tag.lower = c->busaddr; 2696 c->Header.Tag.lower = c->busaddr;
2611 memcpy(&c->Request, &ioc->Request, sizeof(c->Request)); 2697 memcpy(&c->Request, &ioc->Request, sizeof(c->Request));
@@ -2622,7 +2708,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2622 } 2708 }
2623 } 2709 }
2624 hpsa_scsi_do_simple_cmd_core(h, c); 2710 hpsa_scsi_do_simple_cmd_core(h, c);
2625 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL); 2711 if (sg_used)
2712 hpsa_pci_unmap(h->pdev, c, sg_used, PCI_DMA_BIDIRECTIONAL);
2626 check_ioctl_unit_attention(h, c); 2713 check_ioctl_unit_attention(h, c);
2627 /* Copy the error information out */ 2714 /* Copy the error information out */
2628 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info)); 2715 memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));
@@ -2631,7 +2718,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)
2631 status = -EFAULT; 2718 status = -EFAULT;
2632 goto cleanup1; 2719 goto cleanup1;
2633 } 2720 }
2634 if (ioc->Request.Type.Direction == XFER_READ) { 2721 if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) {
2635 /* Copy the data out of the buffer we created */ 2722 /* Copy the data out of the buffer we created */
2636 BYTE __user *ptr = ioc->buf; 2723 BYTE __user *ptr = ioc->buf;
2637 for (i = 0; i < sg_used; i++) { 2724 for (i = 0; i < sg_used; i++) {
@@ -2692,6 +2779,26 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)
2692 } 2779 }
2693} 2780}
2694 2781
2782static int __devinit hpsa_send_host_reset(struct ctlr_info *h,
2783 unsigned char *scsi3addr, u8 reset_type)
2784{
2785 struct CommandList *c;
2786
2787 c = cmd_alloc(h);
2788 if (!c)
2789 return -ENOMEM;
2790 fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0,
2791 RAID_CTLR_LUNID, TYPE_MSG);
2792 c->Request.CDB[1] = reset_type; /* fill_cmd defaults to target reset */
2793 c->waiting = NULL;
2794 enqueue_cmd_and_start_io(h, c);
2795 /* Don't wait for completion, the reset won't complete. Don't free
2796 * the command either. This is the last command we will send before
2797 * re-initializing everything, so it doesn't matter and won't leak.
2798 */
2799 return 0;
2800}
2801
2695static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, 2802static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
2696 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, 2803 void *buff, size_t size, u8 page_code, unsigned char *scsi3addr,
2697 int cmd_type) 2804 int cmd_type)
@@ -2769,7 +2876,8 @@ static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,
2769 c->Request.Type.Attribute = ATTR_SIMPLE; 2876 c->Request.Type.Attribute = ATTR_SIMPLE;
2770 c->Request.Type.Direction = XFER_NONE; 2877 c->Request.Type.Direction = XFER_NONE;
2771 c->Request.Timeout = 0; /* Don't time out */ 2878 c->Request.Timeout = 0; /* Don't time out */
2772 c->Request.CDB[0] = 0x01; /* RESET_MSG is 0x01 */ 2879 memset(&c->Request.CDB[0], 0, sizeof(c->Request.CDB));
2880 c->Request.CDB[0] = cmd;
2773 c->Request.CDB[1] = 0x03; /* Reset target above */ 2881 c->Request.CDB[1] = 0x03; /* Reset target above */
2774 /* If bytes 4-7 are zero, it means reset the */ 2882 /* If bytes 4-7 are zero, it means reset the */
2775 /* LunID device */ 2883 /* LunID device */
@@ -2827,8 +2935,8 @@ static void start_io(struct ctlr_info *h)
2827{ 2935{
2828 struct CommandList *c; 2936 struct CommandList *c;
2829 2937
2830 while (!hlist_empty(&h->reqQ)) { 2938 while (!list_empty(&h->reqQ)) {
2831 c = hlist_entry(h->reqQ.first, struct CommandList, list); 2939 c = list_entry(h->reqQ.next, struct CommandList, list);
2832 /* can't do anything if fifo is full */ 2940 /* can't do anything if fifo is full */
2833 if ((h->access.fifo_full(h))) { 2941 if ((h->access.fifo_full(h))) {
2834 dev_warn(&h->pdev->dev, "fifo full\n"); 2942 dev_warn(&h->pdev->dev, "fifo full\n");
@@ -2877,27 +2985,29 @@ static inline void finish_cmd(struct CommandList *c, u32 raw_tag)
2877{ 2985{
2878 removeQ(c); 2986 removeQ(c);
2879 if (likely(c->cmd_type == CMD_SCSI)) 2987 if (likely(c->cmd_type == CMD_SCSI))
2880 complete_scsi_command(c, 0, raw_tag); 2988 complete_scsi_command(c);
2881 else if (c->cmd_type == CMD_IOCTL_PEND) 2989 else if (c->cmd_type == CMD_IOCTL_PEND)
2882 complete(c->waiting); 2990 complete(c->waiting);
2883} 2991}
2884 2992
2885static inline u32 hpsa_tag_contains_index(u32 tag) 2993static inline u32 hpsa_tag_contains_index(u32 tag)
2886{ 2994{
2887#define DIRECT_LOOKUP_BIT 0x10
2888 return tag & DIRECT_LOOKUP_BIT; 2995 return tag & DIRECT_LOOKUP_BIT;
2889} 2996}
2890 2997
2891static inline u32 hpsa_tag_to_index(u32 tag) 2998static inline u32 hpsa_tag_to_index(u32 tag)
2892{ 2999{
2893#define DIRECT_LOOKUP_SHIFT 5
2894 return tag >> DIRECT_LOOKUP_SHIFT; 3000 return tag >> DIRECT_LOOKUP_SHIFT;
2895} 3001}
2896 3002
2897static inline u32 hpsa_tag_discard_error_bits(u32 tag) 3003
3004static inline u32 hpsa_tag_discard_error_bits(struct ctlr_info *h, u32 tag)
2898{ 3005{
2899#define HPSA_ERROR_BITS 0x03 3006#define HPSA_PERF_ERROR_BITS ((1 << DIRECT_LOOKUP_SHIFT) - 1)
2900 return tag & ~HPSA_ERROR_BITS; 3007#define HPSA_SIMPLE_ERROR_BITS 0x03
3008 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
3009 return tag & ~HPSA_SIMPLE_ERROR_BITS;
3010 return tag & ~HPSA_PERF_ERROR_BITS;
2901} 3011}
2902 3012
2903/* process completion of an indexed ("direct lookup") command */ 3013/* process completion of an indexed ("direct lookup") command */
@@ -2921,10 +3031,9 @@ static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
2921{ 3031{
2922 u32 tag; 3032 u32 tag;
2923 struct CommandList *c = NULL; 3033 struct CommandList *c = NULL;
2924 struct hlist_node *tmp;
2925 3034
2926 tag = hpsa_tag_discard_error_bits(raw_tag); 3035 tag = hpsa_tag_discard_error_bits(h, raw_tag);
2927 hlist_for_each_entry(c, tmp, &h->cmpQ, list) { 3036 list_for_each_entry(c, &h->cmpQ, list) {
2928 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { 3037 if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) {
2929 finish_cmd(c, raw_tag); 3038 finish_cmd(c, raw_tag);
2930 return next_command(h); 3039 return next_command(h);
@@ -2934,6 +3043,63 @@ static inline u32 process_nonindexed_cmd(struct ctlr_info *h,
2934 return next_command(h); 3043 return next_command(h);
2935} 3044}
2936 3045
3046/* Some controllers, like p400, will give us one interrupt
3047 * after a soft reset, even if we turned interrupts off.
3048 * Only need to check for this in the hpsa_xxx_discard_completions
3049 * functions.
3050 */
3051static int ignore_bogus_interrupt(struct ctlr_info *h)
3052{
3053 if (likely(!reset_devices))
3054 return 0;
3055
3056 if (likely(h->interrupts_enabled))
3057 return 0;
3058
3059 dev_info(&h->pdev->dev, "Received interrupt while interrupts disabled "
3060 "(known firmware bug.) Ignoring.\n");
3061
3062 return 1;
3063}
3064
3065static irqreturn_t hpsa_intx_discard_completions(int irq, void *dev_id)
3066{
3067 struct ctlr_info *h = dev_id;
3068 unsigned long flags;
3069 u32 raw_tag;
3070
3071 if (ignore_bogus_interrupt(h))
3072 return IRQ_NONE;
3073
3074 if (interrupt_not_for_us(h))
3075 return IRQ_NONE;
3076 spin_lock_irqsave(&h->lock, flags);
3077 while (interrupt_pending(h)) {
3078 raw_tag = get_next_completion(h);
3079 while (raw_tag != FIFO_EMPTY)
3080 raw_tag = next_command(h);
3081 }
3082 spin_unlock_irqrestore(&h->lock, flags);
3083 return IRQ_HANDLED;
3084}
3085
3086static irqreturn_t hpsa_msix_discard_completions(int irq, void *dev_id)
3087{
3088 struct ctlr_info *h = dev_id;
3089 unsigned long flags;
3090 u32 raw_tag;
3091
3092 if (ignore_bogus_interrupt(h))
3093 return IRQ_NONE;
3094
3095 spin_lock_irqsave(&h->lock, flags);
3096 raw_tag = get_next_completion(h);
3097 while (raw_tag != FIFO_EMPTY)
3098 raw_tag = next_command(h);
3099 spin_unlock_irqrestore(&h->lock, flags);
3100 return IRQ_HANDLED;
3101}
3102
2937static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id) 3103static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id)
2938{ 3104{
2939 struct ctlr_info *h = dev_id; 3105 struct ctlr_info *h = dev_id;
@@ -2974,7 +3140,10 @@ static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id)
2974 return IRQ_HANDLED; 3140 return IRQ_HANDLED;
2975} 3141}
2976 3142
2977/* Send a message CDB to the firmware. */ 3143/* Send a message CDB to the firmware. Careful, this only works
3144 * in simple mode, not performant mode due to the tag lookup.
3145 * We only ever use this immediately after a controller reset.
3146 */
2978static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, 3147static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
2979 unsigned char type) 3148 unsigned char type)
2980{ 3149{
@@ -3040,7 +3209,7 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
3040 3209
3041 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { 3210 for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) {
3042 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); 3211 tag = readl(vaddr + SA5_REPLY_PORT_OFFSET);
3043 if (hpsa_tag_discard_error_bits(tag) == paddr32) 3212 if ((tag & ~HPSA_SIMPLE_ERROR_BITS) == paddr32)
3044 break; 3213 break;
3045 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); 3214 msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS);
3046 } 3215 }
@@ -3069,43 +3238,10 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
3069 return 0; 3238 return 0;
3070} 3239}
3071 3240
3072#define hpsa_soft_reset_controller(p) hpsa_message(p, 1, 0)
3073#define hpsa_noop(p) hpsa_message(p, 3, 0) 3241#define hpsa_noop(p) hpsa_message(p, 3, 0)
3074 3242
3075static __devinit int hpsa_reset_msi(struct pci_dev *pdev)
3076{
3077/* the #defines are stolen from drivers/pci/msi.h. */
3078#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
3079#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
3080
3081 int pos;
3082 u16 control = 0;
3083
3084 pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
3085 if (pos) {
3086 pci_read_config_word(pdev, msi_control_reg(pos), &control);
3087 if (control & PCI_MSI_FLAGS_ENABLE) {
3088 dev_info(&pdev->dev, "resetting MSI\n");
3089 pci_write_config_word(pdev, msi_control_reg(pos),
3090 control & ~PCI_MSI_FLAGS_ENABLE);
3091 }
3092 }
3093
3094 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
3095 if (pos) {
3096 pci_read_config_word(pdev, msi_control_reg(pos), &control);
3097 if (control & PCI_MSIX_FLAGS_ENABLE) {
3098 dev_info(&pdev->dev, "resetting MSI-X\n");
3099 pci_write_config_word(pdev, msi_control_reg(pos),
3100 control & ~PCI_MSIX_FLAGS_ENABLE);
3101 }
3102 }
3103
3104 return 0;
3105}
3106
3107static int hpsa_controller_hard_reset(struct pci_dev *pdev, 3243static int hpsa_controller_hard_reset(struct pci_dev *pdev,
3108 void * __iomem vaddr, bool use_doorbell) 3244 void * __iomem vaddr, u32 use_doorbell)
3109{ 3245{
3110 u16 pmcsr; 3246 u16 pmcsr;
3111 int pos; 3247 int pos;
@@ -3116,8 +3252,7 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
3116 * other way using the doorbell register. 3252 * other way using the doorbell register.
3117 */ 3253 */
3118 dev_info(&pdev->dev, "using doorbell to reset controller\n"); 3254 dev_info(&pdev->dev, "using doorbell to reset controller\n");
3119 writel(DOORBELL_CTLR_RESET, vaddr + SA5_DOORBELL); 3255 writel(use_doorbell, vaddr + SA5_DOORBELL);
3120 msleep(1000);
3121 } else { /* Try to do it the PCI power state way */ 3256 } else { /* Try to do it the PCI power state way */
3122 3257
3123 /* Quoting from the Open CISS Specification: "The Power 3258 /* Quoting from the Open CISS Specification: "The Power
@@ -3148,28 +3283,79 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,
3148 pmcsr &= ~PCI_PM_CTRL_STATE_MASK; 3283 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3149 pmcsr |= PCI_D0; 3284 pmcsr |= PCI_D0;
3150 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr); 3285 pci_write_config_word(pdev, pos + PCI_PM_CTRL, pmcsr);
3151
3152 msleep(500);
3153 } 3286 }
3154 return 0; 3287 return 0;
3155} 3288}
3156 3289
3290static __devinit void init_driver_version(char *driver_version, int len)
3291{
3292 memset(driver_version, 0, len);
3293 strncpy(driver_version, "hpsa " HPSA_DRIVER_VERSION, len - 1);
3294}
3295
3296static __devinit int write_driver_ver_to_cfgtable(
3297 struct CfgTable __iomem *cfgtable)
3298{
3299 char *driver_version;
3300 int i, size = sizeof(cfgtable->driver_version);
3301
3302 driver_version = kmalloc(size, GFP_KERNEL);
3303 if (!driver_version)
3304 return -ENOMEM;
3305
3306 init_driver_version(driver_version, size);
3307 for (i = 0; i < size; i++)
3308 writeb(driver_version[i], &cfgtable->driver_version[i]);
3309 kfree(driver_version);
3310 return 0;
3311}
3312
3313static __devinit void read_driver_ver_from_cfgtable(
3314 struct CfgTable __iomem *cfgtable, unsigned char *driver_ver)
3315{
3316 int i;
3317
3318 for (i = 0; i < sizeof(cfgtable->driver_version); i++)
3319 driver_ver[i] = readb(&cfgtable->driver_version[i]);
3320}
3321
3322static __devinit int controller_reset_failed(
3323 struct CfgTable __iomem *cfgtable)
3324{
3325
3326 char *driver_ver, *old_driver_ver;
3327 int rc, size = sizeof(cfgtable->driver_version);
3328
3329 old_driver_ver = kmalloc(2 * size, GFP_KERNEL);
3330 if (!old_driver_ver)
3331 return -ENOMEM;
3332 driver_ver = old_driver_ver + size;
3333
3334 /* After a reset, the 32 bytes of "driver version" in the cfgtable
3335 * should have been changed, otherwise we know the reset failed.
3336 */
3337 init_driver_version(old_driver_ver, size);
3338 read_driver_ver_from_cfgtable(cfgtable, driver_ver);
3339 rc = !memcmp(driver_ver, old_driver_ver, size);
3340 kfree(old_driver_ver);
3341 return rc;
3342}
3157/* This does a hard reset of the controller using PCI power management 3343/* This does a hard reset of the controller using PCI power management
3158 * states or the using the doorbell register. 3344 * states or the using the doorbell register.
3159 */ 3345 */
3160static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev) 3346static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3161{ 3347{
3162 u16 saved_config_space[32];
3163 u64 cfg_offset; 3348 u64 cfg_offset;
3164 u32 cfg_base_addr; 3349 u32 cfg_base_addr;
3165 u64 cfg_base_addr_index; 3350 u64 cfg_base_addr_index;
3166 void __iomem *vaddr; 3351 void __iomem *vaddr;
3167 unsigned long paddr; 3352 unsigned long paddr;
3168 u32 misc_fw_support, active_transport; 3353 u32 misc_fw_support;
3169 int rc, i; 3354 int rc;
3170 struct CfgTable __iomem *cfgtable; 3355 struct CfgTable __iomem *cfgtable;
3171 bool use_doorbell; 3356 u32 use_doorbell;
3172 u32 board_id; 3357 u32 board_id;
3358 u16 command_register;
3173 3359
3174 /* For controllers as old as the P600, this is very nearly 3360 /* For controllers as old as the P600, this is very nearly
3175 * the same thing as 3361 * the same thing as
@@ -3179,33 +3365,28 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3179 * pci_set_power_state(pci_dev, PCI_D0); 3365 * pci_set_power_state(pci_dev, PCI_D0);
3180 * pci_restore_state(pci_dev); 3366 * pci_restore_state(pci_dev);
3181 * 3367 *
3182 * but we can't use these nice canned kernel routines on
3183 * kexec, because they also check the MSI/MSI-X state in PCI
3184 * configuration space and do the wrong thing when it is
3185 * set/cleared. Also, the pci_save/restore_state functions
3186 * violate the ordering requirements for restoring the
3187 * configuration space from the CCISS document (see the
3188 * comment below). So we roll our own ....
3189 *
3190 * For controllers newer than the P600, the pci power state 3368 * For controllers newer than the P600, the pci power state
3191 * method of resetting doesn't work so we have another way 3369 * method of resetting doesn't work so we have another way
3192 * using the doorbell register. 3370 * using the doorbell register.
3193 */ 3371 */
3194 3372
3195 /* Exclude 640x boards. These are two pci devices in one slot 3373 rc = hpsa_lookup_board_id(pdev, &board_id);
3196 * which share a battery backed cache module. One controls the 3374 if (rc < 0 || !ctlr_is_resettable(board_id)) {
3197 * cache, the other accesses the cache through the one that controls 3375 dev_warn(&pdev->dev, "Not resetting device.\n");
3198 * it. If we reset the one controlling the cache, the other will 3376 return -ENODEV;
3199 * likely not be happy. Just forbid resetting this conjoined mess. 3377 }
3200 * The 640x isn't really supported by hpsa anyway.
3201 */
3202 hpsa_lookup_board_id(pdev, &board_id);
3203 if (board_id == 0x409C0E11 || board_id == 0x409D0E11)
3204 return -ENOTSUPP;
3205 3378
3206 for (i = 0; i < 32; i++) 3379 /* if controller is soft- but not hard resettable... */
3207 pci_read_config_word(pdev, 2*i, &saved_config_space[i]); 3380 if (!ctlr_is_hard_resettable(board_id))
3381 return -ENOTSUPP; /* try soft reset later. */
3208 3382
3383 /* Save the PCI command register */
3384 pci_read_config_word(pdev, 4, &command_register);
3385 /* Turn the board off. This is so that later pci_restore_state()
3386 * won't turn the board on before the rest of config space is ready.
3387 */
3388 pci_disable_device(pdev);
3389 pci_save_state(pdev);
3209 3390
3210 /* find the first memory BAR, so we can find the cfg table */ 3391 /* find the first memory BAR, so we can find the cfg table */
3211 rc = hpsa_pci_find_memory_BAR(pdev, &paddr); 3392 rc = hpsa_pci_find_memory_BAR(pdev, &paddr);
@@ -3226,51 +3407,72 @@ static __devinit int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)
3226 rc = -ENOMEM; 3407 rc = -ENOMEM;
3227 goto unmap_vaddr; 3408 goto unmap_vaddr;
3228 } 3409 }
3410 rc = write_driver_ver_to_cfgtable(cfgtable);
3411 if (rc)
3412 goto unmap_vaddr;
3229 3413
3230 /* If reset via doorbell register is supported, use that. */ 3414 /* If reset via doorbell register is supported, use that.
3231 misc_fw_support = readl(&cfgtable->misc_fw_support); 3415 * There are two such methods. Favor the newest method.
3232 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
3233
3234 /* The doorbell reset seems to cause lockups on some Smart
3235 * Arrays (e.g. P410, P410i, maybe others). Until this is
3236 * fixed or at least isolated, avoid the doorbell reset.
3237 */ 3416 */
3238 use_doorbell = 0; 3417 misc_fw_support = readl(&cfgtable->misc_fw_support);
3418 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET2;
3419 if (use_doorbell) {
3420 use_doorbell = DOORBELL_CTLR_RESET2;
3421 } else {
3422 use_doorbell = misc_fw_support & MISC_FW_DOORBELL_RESET;
3423 if (use_doorbell) {
3424 dev_warn(&pdev->dev, "Controller claims that "
3425 "'Bit 2 doorbell reset' is "
3426 "supported, but not 'bit 5 doorbell reset'. "
3427 "Firmware update is recommended.\n");
3428 rc = -ENOTSUPP; /* try soft reset */
3429 goto unmap_cfgtable;
3430 }
3431 }
3239 3432
3240 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell); 3433 rc = hpsa_controller_hard_reset(pdev, vaddr, use_doorbell);
3241 if (rc) 3434 if (rc)
3242 goto unmap_cfgtable; 3435 goto unmap_cfgtable;
3243 3436
3244 /* Restore the PCI configuration space. The Open CISS 3437 pci_restore_state(pdev);
3245 * Specification says, "Restore the PCI Configuration 3438 rc = pci_enable_device(pdev);
3246 * Registers, offsets 00h through 60h. It is important to 3439 if (rc) {
3247 * restore the command register, 16-bits at offset 04h, 3440 dev_warn(&pdev->dev, "failed to enable device.\n");
3248 * last. Do not restore the configuration status register, 3441 goto unmap_cfgtable;
3249 * 16-bits at offset 06h." Note that the offset is 2*i.
3250 */
3251 for (i = 0; i < 32; i++) {
3252 if (i == 2 || i == 3)
3253 continue;
3254 pci_write_config_word(pdev, 2*i, saved_config_space[i]);
3255 } 3442 }
3256 wmb(); 3443 pci_write_config_word(pdev, 4, command_register);
3257 pci_write_config_word(pdev, 4, saved_config_space[2]);
3258 3444
3259 /* Some devices (notably the HP Smart Array 5i Controller) 3445 /* Some devices (notably the HP Smart Array 5i Controller)
3260 need a little pause here */ 3446 need a little pause here */
3261 msleep(HPSA_POST_RESET_PAUSE_MSECS); 3447 msleep(HPSA_POST_RESET_PAUSE_MSECS);
3262 3448
3263 /* Controller should be in simple mode at this point. If it's not, 3449 /* Wait for board to become not ready, then ready. */
3264 * It means we're on one of those controllers which doesn't support 3450 dev_info(&pdev->dev, "Waiting for board to reset.\n");
3265 * the doorbell reset method and on which the PCI power management reset 3451 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
3266 * method doesn't work (P800, for example.) 3452 if (rc) {
3267 * In those cases, pretend the reset worked and hope for the best. 3453 dev_warn(&pdev->dev,
3268 */ 3454 "failed waiting for board to reset."
3269 active_transport = readl(&cfgtable->TransportActive); 3455 " Will try soft reset.\n");
3270 if (active_transport & PERFORMANT_MODE) { 3456 rc = -ENOTSUPP; /* Not expected, but try soft reset later */
3271 dev_warn(&pdev->dev, "Unable to successfully reset controller," 3457 goto unmap_cfgtable;
3272 " proceeding anyway.\n"); 3458 }
3459 rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);
3460 if (rc) {
3461 dev_warn(&pdev->dev,
3462 "failed waiting for board to become ready "
3463 "after hard reset\n");
3464 goto unmap_cfgtable;
3465 }
3466
3467 rc = controller_reset_failed(vaddr);
3468 if (rc < 0)
3469 goto unmap_cfgtable;
3470 if (rc) {
3471 dev_warn(&pdev->dev, "Unable to successfully reset "
3472 "controller. Will try soft reset.\n");
3273 rc = -ENOTSUPP; 3473 rc = -ENOTSUPP;
3474 } else {
3475 dev_info(&pdev->dev, "board ready after hard reset.\n");
3274 } 3476 }
3275 3477
3276unmap_cfgtable: 3478unmap_cfgtable:
@@ -3403,7 +3605,7 @@ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h)
3403default_int_mode: 3605default_int_mode:
3404#endif /* CONFIG_PCI_MSI */ 3606#endif /* CONFIG_PCI_MSI */
3405 /* if we get here we're going to use the default interrupt mode */ 3607 /* if we get here we're going to use the default interrupt mode */
3406 h->intr[PERF_MODE_INT] = h->pdev->irq; 3608 h->intr[h->intr_mode] = h->pdev->irq;
3407} 3609}
3408 3610
3409static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id) 3611static int __devinit hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
@@ -3455,18 +3657,28 @@ static int __devinit hpsa_pci_find_memory_BAR(struct pci_dev *pdev,
3455 return -ENODEV; 3657 return -ENODEV;
3456} 3658}
3457 3659
3458static int __devinit hpsa_wait_for_board_ready(struct ctlr_info *h) 3660static int __devinit hpsa_wait_for_board_state(struct pci_dev *pdev,
3661 void __iomem *vaddr, int wait_for_ready)
3459{ 3662{
3460 int i; 3663 int i, iterations;
3461 u32 scratchpad; 3664 u32 scratchpad;
3665 if (wait_for_ready)
3666 iterations = HPSA_BOARD_READY_ITERATIONS;
3667 else
3668 iterations = HPSA_BOARD_NOT_READY_ITERATIONS;
3462 3669
3463 for (i = 0; i < HPSA_BOARD_READY_ITERATIONS; i++) { 3670 for (i = 0; i < iterations; i++) {
3464 scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); 3671 scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
3465 if (scratchpad == HPSA_FIRMWARE_READY) 3672 if (wait_for_ready) {
3466 return 0; 3673 if (scratchpad == HPSA_FIRMWARE_READY)
3674 return 0;
3675 } else {
3676 if (scratchpad != HPSA_FIRMWARE_READY)
3677 return 0;
3678 }
3467 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS); 3679 msleep(HPSA_BOARD_READY_POLL_INTERVAL_MSECS);
3468 } 3680 }
3469 dev_warn(&h->pdev->dev, "board not ready, timed out.\n"); 3681 dev_warn(&pdev->dev, "board not ready, timed out.\n");
3470 return -ENODEV; 3682 return -ENODEV;
3471} 3683}
3472 3684
@@ -3501,6 +3713,9 @@ static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
3501 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable)); 3713 cfg_base_addr_index) + cfg_offset, sizeof(*h->cfgtable));
3502 if (!h->cfgtable) 3714 if (!h->cfgtable)
3503 return -ENOMEM; 3715 return -ENOMEM;
3716 rc = write_driver_ver_to_cfgtable(h->cfgtable);
3717 if (rc)
3718 return rc;
3504 /* Find performant mode table. */ 3719 /* Find performant mode table. */
3505 trans_offset = readl(&h->cfgtable->TransMethodOffset); 3720 trans_offset = readl(&h->cfgtable->TransMethodOffset);
3506 h->transtable = remap_pci_mem(pci_resource_start(h->pdev, 3721 h->transtable = remap_pci_mem(pci_resource_start(h->pdev,
@@ -3514,6 +3729,11 @@ static int __devinit hpsa_find_cfgtables(struct ctlr_info *h)
3514static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h) 3729static void __devinit hpsa_get_max_perf_mode_cmds(struct ctlr_info *h)
3515{ 3730{
3516 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); 3731 h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
3732
3733 /* Limit commands in memory limited kdump scenario. */
3734 if (reset_devices && h->max_commands > 32)
3735 h->max_commands = 32;
3736
3517 if (h->max_commands < 16) { 3737 if (h->max_commands < 16) {
3518 dev_warn(&h->pdev->dev, "Controller reports " 3738 dev_warn(&h->pdev->dev, "Controller reports "
3519 "max supported commands of %d, an obvious lie. " 3739 "max supported commands of %d, an obvious lie. "
@@ -3588,16 +3808,21 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)
3588static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h) 3808static void __devinit hpsa_wait_for_mode_change_ack(struct ctlr_info *h)
3589{ 3809{
3590 int i; 3810 int i;
3811 u32 doorbell_value;
3812 unsigned long flags;
3591 3813
3592 /* under certain very rare conditions, this can take awhile. 3814 /* under certain very rare conditions, this can take awhile.
3593 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right 3815 * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right
3594 * as we enter this code.) 3816 * as we enter this code.)
3595 */ 3817 */
3596 for (i = 0; i < MAX_CONFIG_WAIT; i++) { 3818 for (i = 0; i < MAX_CONFIG_WAIT; i++) {
3597 if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) 3819 spin_lock_irqsave(&h->lock, flags);
3820 doorbell_value = readl(h->vaddr + SA5_DOORBELL);
3821 spin_unlock_irqrestore(&h->lock, flags);
3822 if (!(doorbell_value & CFGTBL_ChangeReq))
3598 break; 3823 break;
3599 /* delay and try again */ 3824 /* delay and try again */
3600 msleep(10); 3825 usleep_range(10000, 20000);
3601 } 3826 }
3602} 3827}
3603 3828
@@ -3620,6 +3845,7 @@ static int __devinit hpsa_enter_simple_mode(struct ctlr_info *h)
3620 "unable to get board into simple mode\n"); 3845 "unable to get board into simple mode\n");
3621 return -ENODEV; 3846 return -ENODEV;
3622 } 3847 }
3848 h->transMethod = CFGTBL_Trans_Simple;
3623 return 0; 3849 return 0;
3624} 3850}
3625 3851
@@ -3658,7 +3884,7 @@ static int __devinit hpsa_pci_init(struct ctlr_info *h)
3658 err = -ENOMEM; 3884 err = -ENOMEM;
3659 goto err_out_free_res; 3885 goto err_out_free_res;
3660 } 3886 }
3661 err = hpsa_wait_for_board_ready(h); 3887 err = hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
3662 if (err) 3888 if (err)
3663 goto err_out_free_res; 3889 goto err_out_free_res;
3664 err = hpsa_find_cfgtables(h); 3890 err = hpsa_find_cfgtables(h);
@@ -3724,13 +3950,12 @@ static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
3724 * due to concerns about shared bbwc between 6402/6404 pair. 3950 * due to concerns about shared bbwc between 6402/6404 pair.
3725 */ 3951 */
3726 if (rc == -ENOTSUPP) 3952 if (rc == -ENOTSUPP)
3727 return 0; /* just try to do the kdump anyhow. */ 3953 return rc; /* just try to do the kdump anyhow. */
3728 if (rc) 3954 if (rc)
3729 return -ENODEV; 3955 return -ENODEV;
3730 if (hpsa_reset_msi(pdev))
3731 return -ENODEV;
3732 3956
3733 /* Now try to get the controller to respond to a no-op */ 3957 /* Now try to get the controller to respond to a no-op */
3958 dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
3734 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) { 3959 for (i = 0; i < HPSA_POST_RESET_NOOP_RETRIES; i++) {
3735 if (hpsa_noop(pdev) == 0) 3960 if (hpsa_noop(pdev) == 0)
3736 break; 3961 break;
@@ -3741,18 +3966,133 @@ static __devinit int hpsa_init_reset_devices(struct pci_dev *pdev)
3741 return 0; 3966 return 0;
3742} 3967}
3743 3968
3969static __devinit int hpsa_allocate_cmd_pool(struct ctlr_info *h)
3970{
3971 h->cmd_pool_bits = kzalloc(
3972 DIV_ROUND_UP(h->nr_cmds, BITS_PER_LONG) *
3973 sizeof(unsigned long), GFP_KERNEL);
3974 h->cmd_pool = pci_alloc_consistent(h->pdev,
3975 h->nr_cmds * sizeof(*h->cmd_pool),
3976 &(h->cmd_pool_dhandle));
3977 h->errinfo_pool = pci_alloc_consistent(h->pdev,
3978 h->nr_cmds * sizeof(*h->errinfo_pool),
3979 &(h->errinfo_pool_dhandle));
3980 if ((h->cmd_pool_bits == NULL)
3981 || (h->cmd_pool == NULL)
3982 || (h->errinfo_pool == NULL)) {
3983 dev_err(&h->pdev->dev, "out of memory in %s", __func__);
3984 return -ENOMEM;
3985 }
3986 return 0;
3987}
3988
3989static void hpsa_free_cmd_pool(struct ctlr_info *h)
3990{
3991 kfree(h->cmd_pool_bits);
3992 if (h->cmd_pool)
3993 pci_free_consistent(h->pdev,
3994 h->nr_cmds * sizeof(struct CommandList),
3995 h->cmd_pool, h->cmd_pool_dhandle);
3996 if (h->errinfo_pool)
3997 pci_free_consistent(h->pdev,
3998 h->nr_cmds * sizeof(struct ErrorInfo),
3999 h->errinfo_pool,
4000 h->errinfo_pool_dhandle);
4001}
4002
4003static int hpsa_request_irq(struct ctlr_info *h,
4004 irqreturn_t (*msixhandler)(int, void *),
4005 irqreturn_t (*intxhandler)(int, void *))
4006{
4007 int rc;
4008
4009 if (h->msix_vector || h->msi_vector)
4010 rc = request_irq(h->intr[h->intr_mode], msixhandler,
4011 IRQF_DISABLED, h->devname, h);
4012 else
4013 rc = request_irq(h->intr[h->intr_mode], intxhandler,
4014 IRQF_DISABLED, h->devname, h);
4015 if (rc) {
4016 dev_err(&h->pdev->dev, "unable to get irq %d for %s\n",
4017 h->intr[h->intr_mode], h->devname);
4018 return -ENODEV;
4019 }
4020 return 0;
4021}
4022
4023static int __devinit hpsa_kdump_soft_reset(struct ctlr_info *h)
4024{
4025 if (hpsa_send_host_reset(h, RAID_CTLR_LUNID,
4026 HPSA_RESET_TYPE_CONTROLLER)) {
4027 dev_warn(&h->pdev->dev, "Resetting array controller failed.\n");
4028 return -EIO;
4029 }
4030
4031 dev_info(&h->pdev->dev, "Waiting for board to soft reset.\n");
4032 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_NOT_READY)) {
4033 dev_warn(&h->pdev->dev, "Soft reset had no effect.\n");
4034 return -1;
4035 }
4036
4037 dev_info(&h->pdev->dev, "Board reset, awaiting READY status.\n");
4038 if (hpsa_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY)) {
4039 dev_warn(&h->pdev->dev, "Board failed to become ready "
4040 "after soft reset.\n");
4041 return -1;
4042 }
4043
4044 return 0;
4045}
4046
4047static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)
4048{
4049 free_irq(h->intr[h->intr_mode], h);
4050#ifdef CONFIG_PCI_MSI
4051 if (h->msix_vector)
4052 pci_disable_msix(h->pdev);
4053 else if (h->msi_vector)
4054 pci_disable_msi(h->pdev);
4055#endif /* CONFIG_PCI_MSI */
4056 hpsa_free_sg_chain_blocks(h);
4057 hpsa_free_cmd_pool(h);
4058 kfree(h->blockFetchTable);
4059 pci_free_consistent(h->pdev, h->reply_pool_size,
4060 h->reply_pool, h->reply_pool_dhandle);
4061 if (h->vaddr)
4062 iounmap(h->vaddr);
4063 if (h->transtable)
4064 iounmap(h->transtable);
4065 if (h->cfgtable)
4066 iounmap(h->cfgtable);
4067 pci_release_regions(h->pdev);
4068 kfree(h);
4069}
4070
3744static int __devinit hpsa_init_one(struct pci_dev *pdev, 4071static int __devinit hpsa_init_one(struct pci_dev *pdev,
3745 const struct pci_device_id *ent) 4072 const struct pci_device_id *ent)
3746{ 4073{
3747 int dac, rc; 4074 int dac, rc;
3748 struct ctlr_info *h; 4075 struct ctlr_info *h;
4076 int try_soft_reset = 0;
4077 unsigned long flags;
3749 4078
3750 if (number_of_controllers == 0) 4079 if (number_of_controllers == 0)
3751 printk(KERN_INFO DRIVER_NAME "\n"); 4080 printk(KERN_INFO DRIVER_NAME "\n");
3752 4081
3753 rc = hpsa_init_reset_devices(pdev); 4082 rc = hpsa_init_reset_devices(pdev);
3754 if (rc) 4083 if (rc) {
3755 return rc; 4084 if (rc != -ENOTSUPP)
4085 return rc;
4086 /* If the reset fails in a particular way (it has no way to do
4087 * a proper hard reset, so returns -ENOTSUPP) we can try to do
4088 * a soft reset once we get the controller configured up to the
4089 * point that it can accept a command.
4090 */
4091 try_soft_reset = 1;
4092 rc = 0;
4093 }
4094
4095reinit_after_soft_reset:
3756 4096
3757 /* Command structures must be aligned on a 32-byte boundary because 4097 /* Command structures must be aligned on a 32-byte boundary because
3758 * the 5 lower bits of the address are used by the hardware. and by 4098 * the 5 lower bits of the address are used by the hardware. and by
@@ -3766,8 +4106,11 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
3766 4106
3767 h->pdev = pdev; 4107 h->pdev = pdev;
3768 h->busy_initializing = 1; 4108 h->busy_initializing = 1;
3769 INIT_HLIST_HEAD(&h->cmpQ); 4109 h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;
3770 INIT_HLIST_HEAD(&h->reqQ); 4110 INIT_LIST_HEAD(&h->cmpQ);
4111 INIT_LIST_HEAD(&h->reqQ);
4112 spin_lock_init(&h->lock);
4113 spin_lock_init(&h->scan_lock);
3771 rc = hpsa_pci_init(h); 4114 rc = hpsa_pci_init(h);
3772 if (rc != 0) 4115 if (rc != 0)
3773 goto clean1; 4116 goto clean1;
@@ -3793,56 +4136,82 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
3793 /* make sure the board interrupts are off */ 4136 /* make sure the board interrupts are off */
3794 h->access.set_intr_mask(h, HPSA_INTR_OFF); 4137 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3795 4138
3796 if (h->msix_vector || h->msi_vector) 4139 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
3797 rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_msi,
3798 IRQF_DISABLED, h->devname, h);
3799 else
3800 rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr_intx,
3801 IRQF_DISABLED, h->devname, h);
3802 if (rc) {
3803 dev_err(&pdev->dev, "unable to get irq %d for %s\n",
3804 h->intr[PERF_MODE_INT], h->devname);
3805 goto clean2; 4140 goto clean2;
3806 }
3807
3808 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", 4141 dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n",
3809 h->devname, pdev->device, 4142 h->devname, pdev->device,
3810 h->intr[PERF_MODE_INT], dac ? "" : " not"); 4143 h->intr[h->intr_mode], dac ? "" : " not");
3811 4144 if (hpsa_allocate_cmd_pool(h))
3812 h->cmd_pool_bits =
3813 kmalloc(((h->nr_cmds + BITS_PER_LONG -
3814 1) / BITS_PER_LONG) * sizeof(unsigned long), GFP_KERNEL);
3815 h->cmd_pool = pci_alloc_consistent(h->pdev,
3816 h->nr_cmds * sizeof(*h->cmd_pool),
3817 &(h->cmd_pool_dhandle));
3818 h->errinfo_pool = pci_alloc_consistent(h->pdev,
3819 h->nr_cmds * sizeof(*h->errinfo_pool),
3820 &(h->errinfo_pool_dhandle));
3821 if ((h->cmd_pool_bits == NULL)
3822 || (h->cmd_pool == NULL)
3823 || (h->errinfo_pool == NULL)) {
3824 dev_err(&pdev->dev, "out of memory");
3825 rc = -ENOMEM;
3826 goto clean4; 4145 goto clean4;
3827 }
3828 if (hpsa_allocate_sg_chain_blocks(h)) 4146 if (hpsa_allocate_sg_chain_blocks(h))
3829 goto clean4; 4147 goto clean4;
3830 spin_lock_init(&h->lock);
3831 spin_lock_init(&h->scan_lock);
3832 init_waitqueue_head(&h->scan_wait_queue); 4148 init_waitqueue_head(&h->scan_wait_queue);
3833 h->scan_finished = 1; /* no scan currently in progress */ 4149 h->scan_finished = 1; /* no scan currently in progress */
3834 4150
3835 pci_set_drvdata(pdev, h); 4151 pci_set_drvdata(pdev, h);
3836 memset(h->cmd_pool_bits, 0, 4152 h->ndevices = 0;
3837 ((h->nr_cmds + BITS_PER_LONG - 4153 h->scsi_host = NULL;
3838 1) / BITS_PER_LONG) * sizeof(unsigned long)); 4154 spin_lock_init(&h->devlock);
4155 hpsa_put_ctlr_into_performant_mode(h);
3839 4156
3840 hpsa_scsi_setup(h); 4157 /* At this point, the controller is ready to take commands.
4158 * Now, if reset_devices and the hard reset didn't work, try
4159 * the soft reset and see if that works.
4160 */
4161 if (try_soft_reset) {
4162
4163 /* This is kind of gross. We may or may not get a completion
4164 * from the soft reset command, and if we do, then the value
4165 * from the fifo may or may not be valid. So, we wait 10 secs
4166 * after the reset throwing away any completions we get during
4167 * that time. Unregister the interrupt handler and register
4168 * fake ones to scoop up any residual completions.
4169 */
4170 spin_lock_irqsave(&h->lock, flags);
4171 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4172 spin_unlock_irqrestore(&h->lock, flags);
4173 free_irq(h->intr[h->intr_mode], h);
4174 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
4175 hpsa_intx_discard_completions);
4176 if (rc) {
4177 dev_warn(&h->pdev->dev, "Failed to request_irq after "
4178 "soft reset.\n");
4179 goto clean4;
4180 }
4181
4182 rc = hpsa_kdump_soft_reset(h);
4183 if (rc)
4184 /* Neither hard nor soft reset worked, we're hosed. */
4185 goto clean4;
4186
4187 dev_info(&h->pdev->dev, "Board READY.\n");
4188 dev_info(&h->pdev->dev,
4189 "Waiting for stale completions to drain.\n");
4190 h->access.set_intr_mask(h, HPSA_INTR_ON);
4191 msleep(10000);
4192 h->access.set_intr_mask(h, HPSA_INTR_OFF);
4193
4194 rc = controller_reset_failed(h->cfgtable);
4195 if (rc)
4196 dev_info(&h->pdev->dev,
4197 "Soft reset appears to have failed.\n");
4198
4199 /* since the controller's reset, we have to go back and re-init
4200 * everything. Easiest to just forget what we've done and do it
4201 * all over again.
4202 */
4203 hpsa_undo_allocations_after_kdump_soft_reset(h);
4204 try_soft_reset = 0;
4205 if (rc)
4206 /* don't go to clean4, we already unallocated */
4207 return -ENODEV;
4208
4209 goto reinit_after_soft_reset;
4210 }
3841 4211
3842 /* Turn the interrupts on so we can service requests */ 4212 /* Turn the interrupts on so we can service requests */
3843 h->access.set_intr_mask(h, HPSA_INTR_ON); 4213 h->access.set_intr_mask(h, HPSA_INTR_ON);
3844 4214
3845 hpsa_put_ctlr_into_performant_mode(h);
3846 hpsa_hba_inquiry(h); 4215 hpsa_hba_inquiry(h);
3847 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ 4216 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
3848 h->busy_initializing = 0; 4217 h->busy_initializing = 0;
@@ -3850,17 +4219,8 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev,
3850 4219
3851clean4: 4220clean4:
3852 hpsa_free_sg_chain_blocks(h); 4221 hpsa_free_sg_chain_blocks(h);
3853 kfree(h->cmd_pool_bits); 4222 hpsa_free_cmd_pool(h);
3854 if (h->cmd_pool) 4223 free_irq(h->intr[h->intr_mode], h);
3855 pci_free_consistent(h->pdev,
3856 h->nr_cmds * sizeof(struct CommandList),
3857 h->cmd_pool, h->cmd_pool_dhandle);
3858 if (h->errinfo_pool)
3859 pci_free_consistent(h->pdev,
3860 h->nr_cmds * sizeof(struct ErrorInfo),
3861 h->errinfo_pool,
3862 h->errinfo_pool_dhandle);
3863 free_irq(h->intr[PERF_MODE_INT], h);
3864clean2: 4224clean2:
3865clean1: 4225clean1:
3866 h->busy_initializing = 0; 4226 h->busy_initializing = 0;
@@ -3904,7 +4264,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
3904 */ 4264 */
3905 hpsa_flush_cache(h); 4265 hpsa_flush_cache(h);
3906 h->access.set_intr_mask(h, HPSA_INTR_OFF); 4266 h->access.set_intr_mask(h, HPSA_INTR_OFF);
3907 free_irq(h->intr[PERF_MODE_INT], h); 4267 free_irq(h->intr[h->intr_mode], h);
3908#ifdef CONFIG_PCI_MSI 4268#ifdef CONFIG_PCI_MSI
3909 if (h->msix_vector) 4269 if (h->msix_vector)
3910 pci_disable_msix(h->pdev); 4270 pci_disable_msix(h->pdev);
@@ -4006,7 +4366,8 @@ static void calc_bucket_map(int bucket[], int num_buckets,
4006 } 4366 }
4007} 4367}
4008 4368
4009static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h) 4369static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h,
4370 u32 use_short_tags)
4010{ 4371{
4011 int i; 4372 int i;
4012 unsigned long register_value; 4373 unsigned long register_value;
@@ -4054,7 +4415,7 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
4054 writel(0, &h->transtable->RepQCtrAddrHigh32); 4415 writel(0, &h->transtable->RepQCtrAddrHigh32);
4055 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); 4416 writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32);
4056 writel(0, &h->transtable->RepQAddr0High32); 4417 writel(0, &h->transtable->RepQAddr0High32);
4057 writel(CFGTBL_Trans_Performant, 4418 writel(CFGTBL_Trans_Performant | use_short_tags,
4058 &(h->cfgtable->HostWrite.TransportRequest)); 4419 &(h->cfgtable->HostWrite.TransportRequest));
4059 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); 4420 writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);
4060 hpsa_wait_for_mode_change_ack(h); 4421 hpsa_wait_for_mode_change_ack(h);
@@ -4064,12 +4425,18 @@ static __devinit void hpsa_enter_performant_mode(struct ctlr_info *h)
4064 " performant mode\n"); 4425 " performant mode\n");
4065 return; 4426 return;
4066 } 4427 }
4428 /* Change the access methods to the performant access methods */
4429 h->access = SA5_performant_access;
4430 h->transMethod = CFGTBL_Trans_Performant;
4067} 4431}
4068 4432
4069static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) 4433static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
4070{ 4434{
4071 u32 trans_support; 4435 u32 trans_support;
4072 4436
4437 if (hpsa_simple_mode)
4438 return;
4439
4073 trans_support = readl(&(h->cfgtable->TransportSupport)); 4440 trans_support = readl(&(h->cfgtable->TransportSupport));
4074 if (!(trans_support & PERFORMANT_MODE)) 4441 if (!(trans_support & PERFORMANT_MODE))
4075 return; 4442 return;
@@ -4089,11 +4456,8 @@ static __devinit void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)
4089 || (h->blockFetchTable == NULL)) 4456 || (h->blockFetchTable == NULL))
4090 goto clean_up; 4457 goto clean_up;
4091 4458
4092 hpsa_enter_performant_mode(h); 4459 hpsa_enter_performant_mode(h,
4093 4460 trans_support & CFGTBL_Trans_use_short_tags);
4094 /* Change the access methods to the performant access methods */
4095 h->access = SA5_performant_access;
4096 h->transMethod = CFGTBL_Trans_Performant;
4097 4461
4098 return; 4462 return;
4099 4463