diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-02-26 19:55:27 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-02-26 19:55:27 -0500 |
commit | 654451748b779b28077d9058442d0f354251870d (patch) | |
tree | ff889a2f6226e16b1121789f809927666a9ccf13 /drivers/scsi/hpsa.c | |
parent | 64d497f55379b1e320a08ec2426468d96f5642ec (diff) | |
parent | 77c9cfc51b0d732b2524799810fb30018074fd60 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6
* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (158 commits)
[SCSI] Fix printing of failed 32-byte commands
[SCSI] Fix printing of variable length commands
[SCSI] libsrp: fix bug in ADDITIONAL CDB LENGTH interpretation
[SCSI] scsi_dh_alua: Add IBM Power Virtual SCSI ALUA device to dev list
[SCSI] scsi_dh_alua: add netapp to dev list
[SCSI] qla2xxx: Update version number to 8.03.02-k1.
[SCSI] qla2xxx: EEH: Restore PCI saved state during pci slot reset.
[SCSI] qla2xxx: Add firmware ETS burst support.
[SCSI] qla2xxx: Correct loop-resync issues during SNS scans.
[SCSI] qla2xxx: Correct use-after-free issue in terminate_rport_io callback.
[SCSI] qla2xxx: Correct EH bus-reset handling.
[SCSI] qla2xxx: Proper clean-up of BSG requests when request times out.
[SCSI] qla2xxx: Initialize payload receive length in failure path of vendor commands
[SCSI] fix duplicate removal on error path in scsi_sysfs_add_sdev
[SCSI] fix refcounting bug in scsi_get_host_dev
[SCSI] fix memory leak in scsi_report_lun_scan
[SCSI] lpfc: correct PPC build failure
[SCSI] raid_class: add raid1e
[SCSI] mpt2sas: Do not call sas_is_tlr_enabled for RAID volumes.
[SCSI] zfcp: Introduce header file for qdio structs and inline functions
...
Diffstat (limited to 'drivers/scsi/hpsa.c')
-rw-r--r-- | drivers/scsi/hpsa.c | 793 |
1 files changed, 571 insertions, 222 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index bb96fdd58e23..03697ba94251 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c | |||
@@ -52,7 +52,7 @@ | |||
52 | #include "hpsa.h" | 52 | #include "hpsa.h" |
53 | 53 | ||
54 | /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ | 54 | /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ |
55 | #define HPSA_DRIVER_VERSION "1.0.0" | 55 | #define HPSA_DRIVER_VERSION "2.0.1-3" |
56 | #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" | 56 | #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" |
57 | 57 | ||
58 | /* How long to wait (in milliseconds) for board to go into simple mode */ | 58 | /* How long to wait (in milliseconds) for board to go into simple mode */ |
@@ -77,9 +77,6 @@ MODULE_PARM_DESC(hpsa_allow_any, | |||
77 | 77 | ||
78 | /* define the PCI info for the cards we can control */ | 78 | /* define the PCI info for the cards we can control */ |
79 | static const struct pci_device_id hpsa_pci_device_id[] = { | 79 | static const struct pci_device_id hpsa_pci_device_id[] = { |
80 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223}, | ||
81 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234}, | ||
82 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D}, | ||
83 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, | 80 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, |
84 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, | 81 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, |
85 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, | 82 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, |
@@ -87,6 +84,9 @@ static const struct pci_device_id hpsa_pci_device_id[] = { | |||
87 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, | 84 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, |
88 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, | 85 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, |
89 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, | 86 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, |
87 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3233}, | ||
88 | #define PCI_DEVICE_ID_HP_CISSF 0x333f | ||
89 | {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSF, 0x103C, 0x333F}, | ||
90 | {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, | 90 | {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, |
91 | PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, | 91 | PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, |
92 | {0,} | 92 | {0,} |
@@ -99,9 +99,6 @@ MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); | |||
99 | * access = Address of the struct of function pointers | 99 | * access = Address of the struct of function pointers |
100 | */ | 100 | */ |
101 | static struct board_type products[] = { | 101 | static struct board_type products[] = { |
102 | {0x3223103C, "Smart Array P800", &SA5_access}, | ||
103 | {0x3234103C, "Smart Array P400", &SA5_access}, | ||
104 | {0x323d103c, "Smart Array P700M", &SA5_access}, | ||
105 | {0x3241103C, "Smart Array P212", &SA5_access}, | 102 | {0x3241103C, "Smart Array P212", &SA5_access}, |
106 | {0x3243103C, "Smart Array P410", &SA5_access}, | 103 | {0x3243103C, "Smart Array P410", &SA5_access}, |
107 | {0x3245103C, "Smart Array P410i", &SA5_access}, | 104 | {0x3245103C, "Smart Array P410i", &SA5_access}, |
@@ -109,6 +106,8 @@ static struct board_type products[] = { | |||
109 | {0x3249103C, "Smart Array P812", &SA5_access}, | 106 | {0x3249103C, "Smart Array P812", &SA5_access}, |
110 | {0x324a103C, "Smart Array P712m", &SA5_access}, | 107 | {0x324a103C, "Smart Array P712m", &SA5_access}, |
111 | {0x324b103C, "Smart Array P711m", &SA5_access}, | 108 | {0x324b103C, "Smart Array P711m", &SA5_access}, |
109 | {0x3233103C, "StorageWorks P1210m", &SA5_access}, | ||
110 | {0x333F103C, "StorageWorks P1210m", &SA5_access}, | ||
112 | {0xFFFF103C, "Unknown Smart Array", &SA5_access}, | 111 | {0xFFFF103C, "Unknown Smart Array", &SA5_access}, |
113 | }; | 112 | }; |
114 | 113 | ||
@@ -126,12 +125,15 @@ static void cmd_free(struct ctlr_info *h, struct CommandList *c); | |||
126 | static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); | 125 | static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); |
127 | static struct CommandList *cmd_alloc(struct ctlr_info *h); | 126 | static struct CommandList *cmd_alloc(struct ctlr_info *h); |
128 | static struct CommandList *cmd_special_alloc(struct ctlr_info *h); | 127 | static struct CommandList *cmd_special_alloc(struct ctlr_info *h); |
129 | static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h, | 128 | static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, |
130 | void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr, | 129 | void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, |
131 | int cmd_type); | 130 | int cmd_type); |
132 | 131 | ||
133 | static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, | 132 | static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, |
134 | void (*done)(struct scsi_cmnd *)); | 133 | void (*done)(struct scsi_cmnd *)); |
134 | static void hpsa_scan_start(struct Scsi_Host *); | ||
135 | static int hpsa_scan_finished(struct Scsi_Host *sh, | ||
136 | unsigned long elapsed_time); | ||
135 | 137 | ||
136 | static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); | 138 | static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); |
137 | static int hpsa_slave_alloc(struct scsi_device *sdev); | 139 | static int hpsa_slave_alloc(struct scsi_device *sdev); |
@@ -150,6 +152,11 @@ static int check_for_unit_attention(struct ctlr_info *h, | |||
150 | struct CommandList *c); | 152 | struct CommandList *c); |
151 | static void check_ioctl_unit_attention(struct ctlr_info *h, | 153 | static void check_ioctl_unit_attention(struct ctlr_info *h, |
152 | struct CommandList *c); | 154 | struct CommandList *c); |
155 | /* performant mode helper functions */ | ||
156 | static void calc_bucket_map(int *bucket, int num_buckets, | ||
157 | int nsgs, int *bucket_map); | ||
158 | static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h); | ||
159 | static inline u32 next_command(struct ctlr_info *h); | ||
153 | 160 | ||
154 | static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); | 161 | static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); |
155 | static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); | 162 | static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); |
@@ -173,10 +180,10 @@ static struct scsi_host_template hpsa_driver_template = { | |||
173 | .name = "hpsa", | 180 | .name = "hpsa", |
174 | .proc_name = "hpsa", | 181 | .proc_name = "hpsa", |
175 | .queuecommand = hpsa_scsi_queue_command, | 182 | .queuecommand = hpsa_scsi_queue_command, |
176 | .can_queue = 512, | 183 | .scan_start = hpsa_scan_start, |
184 | .scan_finished = hpsa_scan_finished, | ||
177 | .this_id = -1, | 185 | .this_id = -1, |
178 | .sg_tablesize = MAXSGENTRIES, | 186 | .sg_tablesize = MAXSGENTRIES, |
179 | .cmd_per_lun = 512, | ||
180 | .use_clustering = ENABLE_CLUSTERING, | 187 | .use_clustering = ENABLE_CLUSTERING, |
181 | .eh_device_reset_handler = hpsa_eh_device_reset_handler, | 188 | .eh_device_reset_handler = hpsa_eh_device_reset_handler, |
182 | .ioctl = hpsa_ioctl, | 189 | .ioctl = hpsa_ioctl, |
@@ -195,6 +202,12 @@ static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) | |||
195 | return (struct ctlr_info *) *priv; | 202 | return (struct ctlr_info *) *priv; |
196 | } | 203 | } |
197 | 204 | ||
205 | static inline struct ctlr_info *shost_to_hba(struct Scsi_Host *sh) | ||
206 | { | ||
207 | unsigned long *priv = shost_priv(sh); | ||
208 | return (struct ctlr_info *) *priv; | ||
209 | } | ||
210 | |||
198 | static struct task_struct *hpsa_scan_thread; | 211 | static struct task_struct *hpsa_scan_thread; |
199 | static DEFINE_MUTEX(hpsa_scan_mutex); | 212 | static DEFINE_MUTEX(hpsa_scan_mutex); |
200 | static LIST_HEAD(hpsa_scan_q); | 213 | static LIST_HEAD(hpsa_scan_q); |
@@ -312,7 +325,7 @@ static int hpsa_scan_func(__attribute__((unused)) void *data) | |||
312 | h->busy_scanning = 1; | 325 | h->busy_scanning = 1; |
313 | mutex_unlock(&hpsa_scan_mutex); | 326 | mutex_unlock(&hpsa_scan_mutex); |
314 | host_no = h->scsi_host ? h->scsi_host->host_no : -1; | 327 | host_no = h->scsi_host ? h->scsi_host->host_no : -1; |
315 | hpsa_update_scsi_devices(h, host_no); | 328 | hpsa_scan_start(h->scsi_host); |
316 | complete_all(&h->scan_wait); | 329 | complete_all(&h->scan_wait); |
317 | mutex_lock(&hpsa_scan_mutex); | 330 | mutex_lock(&hpsa_scan_mutex); |
318 | h->busy_scanning = 0; | 331 | h->busy_scanning = 0; |
@@ -379,8 +392,7 @@ static ssize_t host_store_rescan(struct device *dev, | |||
379 | { | 392 | { |
380 | struct ctlr_info *h; | 393 | struct ctlr_info *h; |
381 | struct Scsi_Host *shost = class_to_shost(dev); | 394 | struct Scsi_Host *shost = class_to_shost(dev); |
382 | unsigned long *priv = shost_priv(shost); | 395 | h = shost_to_hba(shost); |
383 | h = (struct ctlr_info *) *priv; | ||
384 | if (add_to_scan_list(h)) { | 396 | if (add_to_scan_list(h)) { |
385 | wake_up_process(hpsa_scan_thread); | 397 | wake_up_process(hpsa_scan_thread); |
386 | wait_for_completion_interruptible(&h->scan_wait); | 398 | wait_for_completion_interruptible(&h->scan_wait); |
@@ -394,10 +406,44 @@ static inline void addQ(struct hlist_head *list, struct CommandList *c) | |||
394 | hlist_add_head(&c->list, list); | 406 | hlist_add_head(&c->list, list); |
395 | } | 407 | } |
396 | 408 | ||
409 | static inline u32 next_command(struct ctlr_info *h) | ||
410 | { | ||
411 | u32 a; | ||
412 | |||
413 | if (unlikely(h->transMethod != CFGTBL_Trans_Performant)) | ||
414 | return h->access.command_completed(h); | ||
415 | |||
416 | if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) { | ||
417 | a = *(h->reply_pool_head); /* Next cmd in ring buffer */ | ||
418 | (h->reply_pool_head)++; | ||
419 | h->commands_outstanding--; | ||
420 | } else { | ||
421 | a = FIFO_EMPTY; | ||
422 | } | ||
423 | /* Check for wraparound */ | ||
424 | if (h->reply_pool_head == (h->reply_pool + h->max_commands)) { | ||
425 | h->reply_pool_head = h->reply_pool; | ||
426 | h->reply_pool_wraparound ^= 1; | ||
427 | } | ||
428 | return a; | ||
429 | } | ||
430 | |||
431 | /* set_performant_mode: Modify the tag for cciss performant | ||
432 | * set bit 0 for pull model, bits 3-1 for block fetch | ||
433 | * register number | ||
434 | */ | ||
435 | static void set_performant_mode(struct ctlr_info *h, struct CommandList *c) | ||
436 | { | ||
437 | if (likely(h->transMethod == CFGTBL_Trans_Performant)) | ||
438 | c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); | ||
439 | } | ||
440 | |||
397 | static void enqueue_cmd_and_start_io(struct ctlr_info *h, | 441 | static void enqueue_cmd_and_start_io(struct ctlr_info *h, |
398 | struct CommandList *c) | 442 | struct CommandList *c) |
399 | { | 443 | { |
400 | unsigned long flags; | 444 | unsigned long flags; |
445 | |||
446 | set_performant_mode(h, c); | ||
401 | spin_lock_irqsave(&h->lock, flags); | 447 | spin_lock_irqsave(&h->lock, flags); |
402 | addQ(&h->reqQ, c); | 448 | addQ(&h->reqQ, c); |
403 | h->Qdepth++; | 449 | h->Qdepth++; |
@@ -422,6 +468,15 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) | |||
422 | return (scsi3addr[3] & 0xC0) == 0x40; | 468 | return (scsi3addr[3] & 0xC0) == 0x40; |
423 | } | 469 | } |
424 | 470 | ||
471 | static inline int is_scsi_rev_5(struct ctlr_info *h) | ||
472 | { | ||
473 | if (!h->hba_inquiry_data) | ||
474 | return 0; | ||
475 | if ((h->hba_inquiry_data[2] & 0x07) == 5) | ||
476 | return 1; | ||
477 | return 0; | ||
478 | } | ||
479 | |||
425 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", | 480 | static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", |
426 | "UNKNOWN" | 481 | "UNKNOWN" |
427 | }; | 482 | }; |
@@ -431,7 +486,7 @@ static ssize_t raid_level_show(struct device *dev, | |||
431 | struct device_attribute *attr, char *buf) | 486 | struct device_attribute *attr, char *buf) |
432 | { | 487 | { |
433 | ssize_t l = 0; | 488 | ssize_t l = 0; |
434 | int rlevel; | 489 | unsigned char rlevel; |
435 | struct ctlr_info *h; | 490 | struct ctlr_info *h; |
436 | struct scsi_device *sdev; | 491 | struct scsi_device *sdev; |
437 | struct hpsa_scsi_dev_t *hdev; | 492 | struct hpsa_scsi_dev_t *hdev; |
@@ -455,7 +510,7 @@ static ssize_t raid_level_show(struct device *dev, | |||
455 | 510 | ||
456 | rlevel = hdev->raid_level; | 511 | rlevel = hdev->raid_level; |
457 | spin_unlock_irqrestore(&h->lock, flags); | 512 | spin_unlock_irqrestore(&h->lock, flags); |
458 | if (rlevel < 0 || rlevel > RAID_UNKNOWN) | 513 | if (rlevel > RAID_UNKNOWN) |
459 | rlevel = RAID_UNKNOWN; | 514 | rlevel = RAID_UNKNOWN; |
460 | l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); | 515 | l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); |
461 | return l; | 516 | return l; |
@@ -620,6 +675,24 @@ lun_assigned: | |||
620 | return 0; | 675 | return 0; |
621 | } | 676 | } |
622 | 677 | ||
678 | /* Replace an entry from h->dev[] array. */ | ||
679 | static void hpsa_scsi_replace_entry(struct ctlr_info *h, int hostno, | ||
680 | int entry, struct hpsa_scsi_dev_t *new_entry, | ||
681 | struct hpsa_scsi_dev_t *added[], int *nadded, | ||
682 | struct hpsa_scsi_dev_t *removed[], int *nremoved) | ||
683 | { | ||
684 | /* assumes h->devlock is held */ | ||
685 | BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); | ||
686 | removed[*nremoved] = h->dev[entry]; | ||
687 | (*nremoved)++; | ||
688 | h->dev[entry] = new_entry; | ||
689 | added[*nadded] = new_entry; | ||
690 | (*nadded)++; | ||
691 | dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d changed.\n", | ||
692 | scsi_device_type(new_entry->devtype), hostno, new_entry->bus, | ||
693 | new_entry->target, new_entry->lun); | ||
694 | } | ||
695 | |||
623 | /* Remove an entry from h->dev[] array. */ | 696 | /* Remove an entry from h->dev[] array. */ |
624 | static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, | 697 | static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, |
625 | struct hpsa_scsi_dev_t *removed[], int *nremoved) | 698 | struct hpsa_scsi_dev_t *removed[], int *nremoved) |
@@ -628,8 +701,7 @@ static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, | |||
628 | int i; | 701 | int i; |
629 | struct hpsa_scsi_dev_t *sd; | 702 | struct hpsa_scsi_dev_t *sd; |
630 | 703 | ||
631 | if (entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA) | 704 | BUG_ON(entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA); |
632 | BUG(); | ||
633 | 705 | ||
634 | sd = h->dev[entry]; | 706 | sd = h->dev[entry]; |
635 | removed[*nremoved] = h->dev[entry]; | 707 | removed[*nremoved] = h->dev[entry]; |
@@ -722,6 +794,8 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, | |||
722 | #define DEVICE_CHANGED 1 | 794 | #define DEVICE_CHANGED 1 |
723 | #define DEVICE_SAME 2 | 795 | #define DEVICE_SAME 2 |
724 | for (i = 0; i < haystack_size; i++) { | 796 | for (i = 0; i < haystack_size; i++) { |
797 | if (haystack[i] == NULL) /* previously removed. */ | ||
798 | continue; | ||
725 | if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { | 799 | if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { |
726 | *index = i; | 800 | *index = i; |
727 | if (device_is_the_same(needle, haystack[i])) | 801 | if (device_is_the_same(needle, haystack[i])) |
@@ -734,7 +808,7 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, | |||
734 | return DEVICE_NOT_FOUND; | 808 | return DEVICE_NOT_FOUND; |
735 | } | 809 | } |
736 | 810 | ||
737 | static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, | 811 | static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, |
738 | struct hpsa_scsi_dev_t *sd[], int nsds) | 812 | struct hpsa_scsi_dev_t *sd[], int nsds) |
739 | { | 813 | { |
740 | /* sd contains scsi3 addresses and devtypes, and inquiry | 814 | /* sd contains scsi3 addresses and devtypes, and inquiry |
@@ -779,12 +853,12 @@ static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, | |||
779 | continue; /* remove ^^^, hence i not incremented */ | 853 | continue; /* remove ^^^, hence i not incremented */ |
780 | } else if (device_change == DEVICE_CHANGED) { | 854 | } else if (device_change == DEVICE_CHANGED) { |
781 | changes++; | 855 | changes++; |
782 | hpsa_scsi_remove_entry(h, hostno, i, | 856 | hpsa_scsi_replace_entry(h, hostno, i, sd[entry], |
783 | removed, &nremoved); | 857 | added, &nadded, removed, &nremoved); |
784 | (void) hpsa_scsi_add_entry(h, hostno, sd[entry], | 858 | /* Set it to NULL to prevent it from being freed |
785 | added, &nadded); | 859 | * at the bottom of hpsa_update_scsi_devices() |
786 | /* add can't fail, we just removed one. */ | 860 | */ |
787 | sd[entry] = NULL; /* prevent it from being freed */ | 861 | sd[entry] = NULL; |
788 | } | 862 | } |
789 | i++; | 863 | i++; |
790 | } | 864 | } |
@@ -860,7 +934,6 @@ static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, | |||
860 | free_and_out: | 934 | free_and_out: |
861 | kfree(added); | 935 | kfree(added); |
862 | kfree(removed); | 936 | kfree(removed); |
863 | return 0; | ||
864 | } | 937 | } |
865 | 938 | ||
866 | /* | 939 | /* |
@@ -900,7 +973,7 @@ static int hpsa_slave_alloc(struct scsi_device *sdev) | |||
900 | 973 | ||
901 | static void hpsa_slave_destroy(struct scsi_device *sdev) | 974 | static void hpsa_slave_destroy(struct scsi_device *sdev) |
902 | { | 975 | { |
903 | return; /* nothing to do. */ | 976 | /* nothing to do. */ |
904 | } | 977 | } |
905 | 978 | ||
906 | static void hpsa_scsi_setup(struct ctlr_info *h) | 979 | static void hpsa_scsi_setup(struct ctlr_info *h) |
@@ -908,11 +981,10 @@ static void hpsa_scsi_setup(struct ctlr_info *h) | |||
908 | h->ndevices = 0; | 981 | h->ndevices = 0; |
909 | h->scsi_host = NULL; | 982 | h->scsi_host = NULL; |
910 | spin_lock_init(&h->devlock); | 983 | spin_lock_init(&h->devlock); |
911 | return; | ||
912 | } | 984 | } |
913 | 985 | ||
914 | static void complete_scsi_command(struct CommandList *cp, | 986 | static void complete_scsi_command(struct CommandList *cp, |
915 | int timeout, __u32 tag) | 987 | int timeout, u32 tag) |
916 | { | 988 | { |
917 | struct scsi_cmnd *cmd; | 989 | struct scsi_cmnd *cmd; |
918 | struct ctlr_info *h; | 990 | struct ctlr_info *h; |
@@ -987,7 +1059,6 @@ static void complete_scsi_command(struct CommandList *cp, | |||
987 | * required | 1059 | * required |
988 | */ | 1060 | */ |
989 | if ((asc == 0x04) && (ascq == 0x03)) { | 1061 | if ((asc == 0x04) && (ascq == 0x03)) { |
990 | cmd->result = DID_NO_CONNECT << 16; | ||
991 | dev_warn(&h->pdev->dev, "cp %p " | 1062 | dev_warn(&h->pdev->dev, "cp %p " |
992 | "has check condition: unit " | 1063 | "has check condition: unit " |
993 | "not ready, manual " | 1064 | "not ready, manual " |
@@ -995,14 +1066,22 @@ static void complete_scsi_command(struct CommandList *cp, | |||
995 | break; | 1066 | break; |
996 | } | 1067 | } |
997 | } | 1068 | } |
998 | 1069 | if (sense_key == ABORTED_COMMAND) { | |
999 | 1070 | /* Aborted command is retryable */ | |
1071 | dev_warn(&h->pdev->dev, "cp %p " | ||
1072 | "has check condition: aborted command: " | ||
1073 | "ASC: 0x%x, ASCQ: 0x%x\n", | ||
1074 | cp, asc, ascq); | ||
1075 | cmd->result = DID_SOFT_ERROR << 16; | ||
1076 | break; | ||
1077 | } | ||
1000 | /* Must be some other type of check condition */ | 1078 | /* Must be some other type of check condition */ |
1001 | dev_warn(&h->pdev->dev, "cp %p has check condition: " | 1079 | dev_warn(&h->pdev->dev, "cp %p has check condition: " |
1002 | "unknown type: " | 1080 | "unknown type: " |
1003 | "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " | 1081 | "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " |
1004 | "Returning result: 0x%x, " | 1082 | "Returning result: 0x%x, " |
1005 | "cmd=[%02x %02x %02x %02x %02x " | 1083 | "cmd=[%02x %02x %02x %02x %02x " |
1084 | "%02x %02x %02x %02x %02x %02x " | ||
1006 | "%02x %02x %02x %02x %02x]\n", | 1085 | "%02x %02x %02x %02x %02x]\n", |
1007 | cp, sense_key, asc, ascq, | 1086 | cp, sense_key, asc, ascq, |
1008 | cmd->result, | 1087 | cmd->result, |
@@ -1010,7 +1089,10 @@ static void complete_scsi_command(struct CommandList *cp, | |||
1010 | cmd->cmnd[2], cmd->cmnd[3], | 1089 | cmd->cmnd[2], cmd->cmnd[3], |
1011 | cmd->cmnd[4], cmd->cmnd[5], | 1090 | cmd->cmnd[4], cmd->cmnd[5], |
1012 | cmd->cmnd[6], cmd->cmnd[7], | 1091 | cmd->cmnd[6], cmd->cmnd[7], |
1013 | cmd->cmnd[8], cmd->cmnd[9]); | 1092 | cmd->cmnd[8], cmd->cmnd[9], |
1093 | cmd->cmnd[10], cmd->cmnd[11], | ||
1094 | cmd->cmnd[12], cmd->cmnd[13], | ||
1095 | cmd->cmnd[14], cmd->cmnd[15]); | ||
1014 | break; | 1096 | break; |
1015 | } | 1097 | } |
1016 | 1098 | ||
@@ -1086,7 +1168,7 @@ static void complete_scsi_command(struct CommandList *cp, | |||
1086 | dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); | 1168 | dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); |
1087 | break; | 1169 | break; |
1088 | case CMD_UNSOLICITED_ABORT: | 1170 | case CMD_UNSOLICITED_ABORT: |
1089 | cmd->result = DID_ABORT << 16; | 1171 | cmd->result = DID_RESET << 16; |
1090 | dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited " | 1172 | dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited " |
1091 | "abort\n", cp); | 1173 | "abort\n", cp); |
1092 | break; | 1174 | break; |
@@ -1119,9 +1201,11 @@ static int hpsa_scsi_detect(struct ctlr_info *h) | |||
1119 | sh->max_cmd_len = MAX_COMMAND_SIZE; | 1201 | sh->max_cmd_len = MAX_COMMAND_SIZE; |
1120 | sh->max_lun = HPSA_MAX_LUN; | 1202 | sh->max_lun = HPSA_MAX_LUN; |
1121 | sh->max_id = HPSA_MAX_LUN; | 1203 | sh->max_id = HPSA_MAX_LUN; |
1204 | sh->can_queue = h->nr_cmds; | ||
1205 | sh->cmd_per_lun = h->nr_cmds; | ||
1122 | h->scsi_host = sh; | 1206 | h->scsi_host = sh; |
1123 | sh->hostdata[0] = (unsigned long) h; | 1207 | sh->hostdata[0] = (unsigned long) h; |
1124 | sh->irq = h->intr[SIMPLE_MODE_INT]; | 1208 | sh->irq = h->intr[PERF_MODE_INT]; |
1125 | sh->unique_id = sh->irq; | 1209 | sh->unique_id = sh->irq; |
1126 | error = scsi_add_host(sh, &h->pdev->dev); | 1210 | error = scsi_add_host(sh, &h->pdev->dev); |
1127 | if (error) | 1211 | if (error) |
@@ -1133,11 +1217,11 @@ static int hpsa_scsi_detect(struct ctlr_info *h) | |||
1133 | dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host" | 1217 | dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host" |
1134 | " failed for controller %d\n", h->ctlr); | 1218 | " failed for controller %d\n", h->ctlr); |
1135 | scsi_host_put(sh); | 1219 | scsi_host_put(sh); |
1136 | return -1; | 1220 | return error; |
1137 | fail: | 1221 | fail: |
1138 | dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc" | 1222 | dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc" |
1139 | " failed for controller %d\n", h->ctlr); | 1223 | " failed for controller %d\n", h->ctlr); |
1140 | return -1; | 1224 | return -ENOMEM; |
1141 | } | 1225 | } |
1142 | 1226 | ||
1143 | static void hpsa_pci_unmap(struct pci_dev *pdev, | 1227 | static void hpsa_pci_unmap(struct pci_dev *pdev, |
@@ -1160,7 +1244,7 @@ static void hpsa_map_one(struct pci_dev *pdev, | |||
1160 | size_t buflen, | 1244 | size_t buflen, |
1161 | int data_direction) | 1245 | int data_direction) |
1162 | { | 1246 | { |
1163 | __u64 addr64; | 1247 | u64 addr64; |
1164 | 1248 | ||
1165 | if (buflen == 0 || data_direction == PCI_DMA_NONE) { | 1249 | if (buflen == 0 || data_direction == PCI_DMA_NONE) { |
1166 | cp->Header.SGList = 0; | 1250 | cp->Header.SGList = 0; |
@@ -1168,14 +1252,14 @@ static void hpsa_map_one(struct pci_dev *pdev, | |||
1168 | return; | 1252 | return; |
1169 | } | 1253 | } |
1170 | 1254 | ||
1171 | addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction); | 1255 | addr64 = (u64) pci_map_single(pdev, buf, buflen, data_direction); |
1172 | cp->SG[0].Addr.lower = | 1256 | cp->SG[0].Addr.lower = |
1173 | (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF); | 1257 | (u32) (addr64 & (u64) 0x00000000FFFFFFFF); |
1174 | cp->SG[0].Addr.upper = | 1258 | cp->SG[0].Addr.upper = |
1175 | (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF); | 1259 | (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); |
1176 | cp->SG[0].Len = buflen; | 1260 | cp->SG[0].Len = buflen; |
1177 | cp->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */ | 1261 | cp->Header.SGList = (u8) 1; /* no. SGs contig in this cmd */ |
1178 | cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */ | 1262 | cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */ |
1179 | } | 1263 | } |
1180 | 1264 | ||
1181 | static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, | 1265 | static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, |
@@ -1274,7 +1358,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, | |||
1274 | 1358 | ||
1275 | if (c == NULL) { /* trouble... */ | 1359 | if (c == NULL) { /* trouble... */ |
1276 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | 1360 | dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); |
1277 | return -1; | 1361 | return -ENOMEM; |
1278 | } | 1362 | } |
1279 | 1363 | ||
1280 | fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD); | 1364 | fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD); |
@@ -1366,9 +1450,8 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, | |||
1366 | dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); | 1450 | dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); |
1367 | return -1; | 1451 | return -1; |
1368 | } | 1452 | } |
1369 | 1453 | /* address the controller */ | |
1370 | memset(&scsi3addr[0], 0, 8); /* address the controller */ | 1454 | memset(scsi3addr, 0, sizeof(scsi3addr)); |
1371 | |||
1372 | fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, | 1455 | fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, |
1373 | buf, bufsize, 0, scsi3addr, TYPE_CMD); | 1456 | buf, bufsize, 0, scsi3addr, TYPE_CMD); |
1374 | if (extended_response) | 1457 | if (extended_response) |
@@ -1409,13 +1492,12 @@ static int hpsa_update_device_info(struct ctlr_info *h, | |||
1409 | unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device) | 1492 | unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device) |
1410 | { | 1493 | { |
1411 | #define OBDR_TAPE_INQ_SIZE 49 | 1494 | #define OBDR_TAPE_INQ_SIZE 49 |
1412 | unsigned char *inq_buff = NULL; | 1495 | unsigned char *inq_buff; |
1413 | 1496 | ||
1414 | inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); | 1497 | inq_buff = kzalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); |
1415 | if (!inq_buff) | 1498 | if (!inq_buff) |
1416 | goto bail_out; | 1499 | goto bail_out; |
1417 | 1500 | ||
1418 | memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE); | ||
1419 | /* Do an inquiry to the device to see what it is. */ | 1501 | /* Do an inquiry to the device to see what it is. */ |
1420 | if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, | 1502 | if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, |
1421 | (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { | 1503 | (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { |
@@ -1485,32 +1567,51 @@ static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) | |||
1485 | * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) | 1567 | * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) |
1486 | */ | 1568 | */ |
1487 | static void figure_bus_target_lun(struct ctlr_info *h, | 1569 | static void figure_bus_target_lun(struct ctlr_info *h, |
1488 | __u8 *lunaddrbytes, int *bus, int *target, int *lun, | 1570 | u8 *lunaddrbytes, int *bus, int *target, int *lun, |
1489 | struct hpsa_scsi_dev_t *device) | 1571 | struct hpsa_scsi_dev_t *device) |
1490 | { | 1572 | { |
1491 | 1573 | u32 lunid; | |
1492 | __u32 lunid; | ||
1493 | 1574 | ||
1494 | if (is_logical_dev_addr_mode(lunaddrbytes)) { | 1575 | if (is_logical_dev_addr_mode(lunaddrbytes)) { |
1495 | /* logical device */ | 1576 | /* logical device */ |
1496 | memcpy(&lunid, lunaddrbytes, sizeof(lunid)); | 1577 | if (unlikely(is_scsi_rev_5(h))) { |
1497 | lunid = le32_to_cpu(lunid); | 1578 | /* p1210m, logical drives lun assignments |
1498 | 1579 | * match SCSI REPORT LUNS data. | |
1499 | if (is_msa2xxx(h, device)) { | 1580 | */ |
1500 | *bus = 1; | 1581 | lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); |
1501 | *target = (lunid >> 16) & 0x3fff; | ||
1502 | *lun = lunid & 0x00ff; | ||
1503 | } else { | ||
1504 | *bus = 0; | 1582 | *bus = 0; |
1505 | *lun = 0; | 1583 | *target = 0; |
1506 | *target = lunid & 0x3fff; | 1584 | *lun = (lunid & 0x3fff) + 1; |
1585 | } else { | ||
1586 | /* not p1210m... */ | ||
1587 | lunid = le32_to_cpu(*((__le32 *) lunaddrbytes)); | ||
1588 | if (is_msa2xxx(h, device)) { | ||
1589 | /* msa2xxx way, put logicals on bus 1 | ||
1590 | * and match target/lun numbers box | ||
1591 | * reports. | ||
1592 | */ | ||
1593 | *bus = 1; | ||
1594 | *target = (lunid >> 16) & 0x3fff; | ||
1595 | *lun = lunid & 0x00ff; | ||
1596 | } else { | ||
1597 | /* Traditional smart array way. */ | ||
1598 | *bus = 0; | ||
1599 | *lun = 0; | ||
1600 | *target = lunid & 0x3fff; | ||
1601 | } | ||
1507 | } | 1602 | } |
1508 | } else { | 1603 | } else { |
1509 | /* physical device */ | 1604 | /* physical device */ |
1510 | if (is_hba_lunid(lunaddrbytes)) | 1605 | if (is_hba_lunid(lunaddrbytes)) |
1511 | *bus = 3; | 1606 | if (unlikely(is_scsi_rev_5(h))) { |
1607 | *bus = 0; /* put p1210m ctlr at 0,0,0 */ | ||
1608 | *target = 0; | ||
1609 | *lun = 0; | ||
1610 | return; | ||
1611 | } else | ||
1612 | *bus = 3; /* traditional smartarray */ | ||
1512 | else | 1613 | else |
1513 | *bus = 2; | 1614 | *bus = 2; /* physical disk */ |
1514 | *target = -1; | 1615 | *target = -1; |
1515 | *lun = -1; /* we will fill these in later. */ | 1616 | *lun = -1; /* we will fill these in later. */ |
1516 | } | 1617 | } |
@@ -1529,7 +1630,7 @@ static void figure_bus_target_lun(struct ctlr_info *h, | |||
1529 | */ | 1630 | */ |
1530 | static int add_msa2xxx_enclosure_device(struct ctlr_info *h, | 1631 | static int add_msa2xxx_enclosure_device(struct ctlr_info *h, |
1531 | struct hpsa_scsi_dev_t *tmpdevice, | 1632 | struct hpsa_scsi_dev_t *tmpdevice, |
1532 | struct hpsa_scsi_dev_t *this_device, __u8 *lunaddrbytes, | 1633 | struct hpsa_scsi_dev_t *this_device, u8 *lunaddrbytes, |
1533 | int bus, int target, int lun, unsigned long lunzerobits[], | 1634 | int bus, int target, int lun, unsigned long lunzerobits[], |
1534 | int *nmsa2xxx_enclosures) | 1635 | int *nmsa2xxx_enclosures) |
1535 | { | 1636 | { |
@@ -1550,6 +1651,9 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h, | |||
1550 | if (is_hba_lunid(scsi3addr)) | 1651 | if (is_hba_lunid(scsi3addr)) |
1551 | return 0; /* Don't add the RAID controller here. */ | 1652 | return 0; /* Don't add the RAID controller here. */ |
1552 | 1653 | ||
1654 | if (is_scsi_rev_5(h)) | ||
1655 | return 0; /* p1210m doesn't need to do this. */ | ||
1656 | |||
1553 | #define MAX_MSA2XXX_ENCLOSURES 32 | 1657 | #define MAX_MSA2XXX_ENCLOSURES 32 |
1554 | if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) { | 1658 | if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) { |
1555 | dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX " | 1659 | dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX " |
@@ -1576,18 +1680,14 @@ static int add_msa2xxx_enclosure_device(struct ctlr_info *h, | |||
1576 | */ | 1680 | */ |
1577 | static int hpsa_gather_lun_info(struct ctlr_info *h, | 1681 | static int hpsa_gather_lun_info(struct ctlr_info *h, |
1578 | int reportlunsize, | 1682 | int reportlunsize, |
1579 | struct ReportLUNdata *physdev, __u32 *nphysicals, | 1683 | struct ReportLUNdata *physdev, u32 *nphysicals, |
1580 | struct ReportLUNdata *logdev, __u32 *nlogicals) | 1684 | struct ReportLUNdata *logdev, u32 *nlogicals) |
1581 | { | 1685 | { |
1582 | if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) { | 1686 | if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) { |
1583 | dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); | 1687 | dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); |
1584 | return -1; | 1688 | return -1; |
1585 | } | 1689 | } |
1586 | memcpy(nphysicals, &physdev->LUNListLength[0], sizeof(*nphysicals)); | 1690 | *nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8; |
1587 | *nphysicals = be32_to_cpu(*nphysicals) / 8; | ||
1588 | #ifdef DEBUG | ||
1589 | dev_info(&h->pdev->dev, "number of physical luns is %d\n", *nphysicals); | ||
1590 | #endif | ||
1591 | if (*nphysicals > HPSA_MAX_PHYS_LUN) { | 1691 | if (*nphysicals > HPSA_MAX_PHYS_LUN) { |
1592 | dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." | 1692 | dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." |
1593 | " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, | 1693 | " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, |
@@ -1598,11 +1698,7 @@ static int hpsa_gather_lun_info(struct ctlr_info *h, | |||
1598 | dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); | 1698 | dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); |
1599 | return -1; | 1699 | return -1; |
1600 | } | 1700 | } |
1601 | memcpy(nlogicals, &logdev->LUNListLength[0], sizeof(*nlogicals)); | 1701 | *nlogicals = be32_to_cpu(*((__be32 *) logdev->LUNListLength)) / 8; |
1602 | *nlogicals = be32_to_cpu(*nlogicals) / 8; | ||
1603 | #ifdef DEBUG | ||
1604 | dev_info(&h->pdev->dev, "number of logical luns is %d\n", *nlogicals); | ||
1605 | #endif | ||
1606 | /* Reject Logicals in excess of our max capability. */ | 1702 | /* Reject Logicals in excess of our max capability. */ |
1607 | if (*nlogicals > HPSA_MAX_LUN) { | 1703 | if (*nlogicals > HPSA_MAX_LUN) { |
1608 | dev_warn(&h->pdev->dev, | 1704 | dev_warn(&h->pdev->dev, |
@@ -1621,6 +1717,31 @@ static int hpsa_gather_lun_info(struct ctlr_info *h, | |||
1621 | return 0; | 1717 | return 0; |
1622 | } | 1718 | } |
1623 | 1719 | ||
1720 | u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, | ||
1721 | int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list, | ||
1722 | struct ReportLUNdata *logdev_list) | ||
1723 | { | ||
1724 | /* Helper function, figure out where the LUN ID info is coming from | ||
1725 | * given index i, lists of physical and logical devices, where in | ||
1726 | * the list the raid controller is supposed to appear (first or last) | ||
1727 | */ | ||
1728 | |||
1729 | int logicals_start = nphysicals + (raid_ctlr_position == 0); | ||
1730 | int last_device = nphysicals + nlogicals + (raid_ctlr_position == 0); | ||
1731 | |||
1732 | if (i == raid_ctlr_position) | ||
1733 | return RAID_CTLR_LUNID; | ||
1734 | |||
1735 | if (i < logicals_start) | ||
1736 | return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; | ||
1737 | |||
1738 | if (i < last_device) | ||
1739 | return &logdev_list->LUN[i - nphysicals - | ||
1740 | (raid_ctlr_position == 0)][0]; | ||
1741 | BUG(); | ||
1742 | return NULL; | ||
1743 | } | ||
1744 | |||
1624 | static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | 1745 | static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) |
1625 | { | 1746 | { |
1626 | /* the idea here is we could get notified | 1747 | /* the idea here is we could get notified |
@@ -1636,14 +1757,15 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
1636 | struct ReportLUNdata *physdev_list = NULL; | 1757 | struct ReportLUNdata *physdev_list = NULL; |
1637 | struct ReportLUNdata *logdev_list = NULL; | 1758 | struct ReportLUNdata *logdev_list = NULL; |
1638 | unsigned char *inq_buff = NULL; | 1759 | unsigned char *inq_buff = NULL; |
1639 | __u32 nphysicals = 0; | 1760 | u32 nphysicals = 0; |
1640 | __u32 nlogicals = 0; | 1761 | u32 nlogicals = 0; |
1641 | __u32 ndev_allocated = 0; | 1762 | u32 ndev_allocated = 0; |
1642 | struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; | 1763 | struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; |
1643 | int ncurrent = 0; | 1764 | int ncurrent = 0; |
1644 | int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8; | 1765 | int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8; |
1645 | int i, nmsa2xxx_enclosures, ndevs_to_allocate; | 1766 | int i, nmsa2xxx_enclosures, ndevs_to_allocate; |
1646 | int bus, target, lun; | 1767 | int bus, target, lun; |
1768 | int raid_ctlr_position; | ||
1647 | DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR); | 1769 | DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR); |
1648 | 1770 | ||
1649 | currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA, | 1771 | currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA, |
@@ -1681,23 +1803,22 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) | |||
1681 | ndev_allocated++; | 1803 | ndev_allocated++; |
1682 | } | 1804 | } |
1683 | 1805 | ||
1806 | if (unlikely(is_scsi_rev_5(h))) | ||
1807 | raid_ctlr_position = 0; | ||
1808 | else | ||
1809 | raid_ctlr_position = nphysicals + nlogicals; | ||
1810 | |||
1684 | /* adjust our table of devices */ | 1811 | /* adjust our table of devices */ |
1685 | nmsa2xxx_enclosures = 0; | 1812 | nmsa2xxx_enclosures = 0; |
1686 | for (i = 0; i < nphysicals + nlogicals + 1; i++) { | 1813 | for (i = 0; i < nphysicals + nlogicals + 1; i++) { |
1687 | __u8 *lunaddrbytes; | 1814 | u8 *lunaddrbytes; |
1688 | 1815 | ||
1689 | /* Figure out where the LUN ID info is coming from */ | 1816 | /* Figure out where the LUN ID info is coming from */ |
1690 | if (i < nphysicals) | 1817 | lunaddrbytes = figure_lunaddrbytes(h, raid_ctlr_position, |
1691 | lunaddrbytes = &physdev_list->LUN[i][0]; | 1818 | i, nphysicals, nlogicals, physdev_list, logdev_list); |
1692 | else | ||
1693 | if (i < nphysicals + nlogicals) | ||
1694 | lunaddrbytes = | ||
1695 | &logdev_list->LUN[i-nphysicals][0]; | ||
1696 | else /* jam in the RAID controller at the end */ | ||
1697 | lunaddrbytes = RAID_CTLR_LUNID; | ||
1698 | |||
1699 | /* skip masked physical devices. */ | 1819 | /* skip masked physical devices. */ |
1700 | if (lunaddrbytes[3] & 0xC0 && i < nphysicals) | 1820 | if (lunaddrbytes[3] & 0xC0 && |
1821 | i < nphysicals + (raid_ctlr_position == 0)) | ||
1701 | continue; | 1822 | continue; |
1702 | 1823 | ||
1703 | /* Get device type, vendor, model, device id */ | 1824 | /* Get device type, vendor, model, device id */ |
@@ -1777,7 +1898,6 @@ out: | |||
1777 | kfree(inq_buff); | 1898 | kfree(inq_buff); |
1778 | kfree(physdev_list); | 1899 | kfree(physdev_list); |
1779 | kfree(logdev_list); | 1900 | kfree(logdev_list); |
1780 | return; | ||
1781 | } | 1901 | } |
1782 | 1902 | ||
1783 | /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci | 1903 | /* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci |
@@ -1790,7 +1910,7 @@ static int hpsa_scatter_gather(struct pci_dev *pdev, | |||
1790 | { | 1910 | { |
1791 | unsigned int len; | 1911 | unsigned int len; |
1792 | struct scatterlist *sg; | 1912 | struct scatterlist *sg; |
1793 | __u64 addr64; | 1913 | u64 addr64; |
1794 | int use_sg, i; | 1914 | int use_sg, i; |
1795 | 1915 | ||
1796 | BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES); | 1916 | BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES); |
@@ -1803,20 +1923,20 @@ static int hpsa_scatter_gather(struct pci_dev *pdev, | |||
1803 | goto sglist_finished; | 1923 | goto sglist_finished; |
1804 | 1924 | ||
1805 | scsi_for_each_sg(cmd, sg, use_sg, i) { | 1925 | scsi_for_each_sg(cmd, sg, use_sg, i) { |
1806 | addr64 = (__u64) sg_dma_address(sg); | 1926 | addr64 = (u64) sg_dma_address(sg); |
1807 | len = sg_dma_len(sg); | 1927 | len = sg_dma_len(sg); |
1808 | cp->SG[i].Addr.lower = | 1928 | cp->SG[i].Addr.lower = |
1809 | (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF); | 1929 | (u32) (addr64 & (u64) 0x00000000FFFFFFFF); |
1810 | cp->SG[i].Addr.upper = | 1930 | cp->SG[i].Addr.upper = |
1811 | (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF); | 1931 | (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF); |
1812 | cp->SG[i].Len = len; | 1932 | cp->SG[i].Len = len; |
1813 | cp->SG[i].Ext = 0; /* we are not chaining */ | 1933 | cp->SG[i].Ext = 0; /* we are not chaining */ |
1814 | } | 1934 | } |
1815 | 1935 | ||
1816 | sglist_finished: | 1936 | sglist_finished: |
1817 | 1937 | ||
1818 | cp->Header.SGList = (__u8) use_sg; /* no. SGs contig in this cmd */ | 1938 | cp->Header.SGList = (u8) use_sg; /* no. SGs contig in this cmd */ |
1819 | cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */ | 1939 | cp->Header.SGTotal = (u16) use_sg; /* total sgs in this cmd list */ |
1820 | return 0; | 1940 | return 0; |
1821 | } | 1941 | } |
1822 | 1942 | ||
@@ -1860,7 +1980,8 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, | |||
1860 | c->scsi_cmd = cmd; | 1980 | c->scsi_cmd = cmd; |
1861 | c->Header.ReplyQueue = 0; /* unused in simple mode */ | 1981 | c->Header.ReplyQueue = 0; /* unused in simple mode */ |
1862 | memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); | 1982 | memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); |
1863 | c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */ | 1983 | c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); |
1984 | c->Header.Tag.lower |= DIRECT_LOOKUP_BIT; | ||
1864 | 1985 | ||
1865 | /* Fill in the request block... */ | 1986 | /* Fill in the request block... */ |
1866 | 1987 | ||
@@ -1914,6 +2035,48 @@ static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, | |||
1914 | return 0; | 2035 | return 0; |
1915 | } | 2036 | } |
1916 | 2037 | ||
2038 | static void hpsa_scan_start(struct Scsi_Host *sh) | ||
2039 | { | ||
2040 | struct ctlr_info *h = shost_to_hba(sh); | ||
2041 | unsigned long flags; | ||
2042 | |||
2043 | /* wait until any scan already in progress is finished. */ | ||
2044 | while (1) { | ||
2045 | spin_lock_irqsave(&h->scan_lock, flags); | ||
2046 | if (h->scan_finished) | ||
2047 | break; | ||
2048 | spin_unlock_irqrestore(&h->scan_lock, flags); | ||
2049 | wait_event(h->scan_wait_queue, h->scan_finished); | ||
2050 | /* Note: We don't need to worry about a race between this | ||
2051 | * thread and driver unload because the midlayer will | ||
2052 | * have incremented the reference count, so unload won't | ||
2053 | * happen if we're in here. | ||
2054 | */ | ||
2055 | } | ||
2056 | h->scan_finished = 0; /* mark scan as in progress */ | ||
2057 | spin_unlock_irqrestore(&h->scan_lock, flags); | ||
2058 | |||
2059 | hpsa_update_scsi_devices(h, h->scsi_host->host_no); | ||
2060 | |||
2061 | spin_lock_irqsave(&h->scan_lock, flags); | ||
2062 | h->scan_finished = 1; /* mark scan as finished. */ | ||
2063 | wake_up_all(&h->scan_wait_queue); | ||
2064 | spin_unlock_irqrestore(&h->scan_lock, flags); | ||
2065 | } | ||
2066 | |||
2067 | static int hpsa_scan_finished(struct Scsi_Host *sh, | ||
2068 | unsigned long elapsed_time) | ||
2069 | { | ||
2070 | struct ctlr_info *h = shost_to_hba(sh); | ||
2071 | unsigned long flags; | ||
2072 | int finished; | ||
2073 | |||
2074 | spin_lock_irqsave(&h->scan_lock, flags); | ||
2075 | finished = h->scan_finished; | ||
2076 | spin_unlock_irqrestore(&h->scan_lock, flags); | ||
2077 | return finished; | ||
2078 | } | ||
2079 | |||
1917 | static void hpsa_unregister_scsi(struct ctlr_info *h) | 2080 | static void hpsa_unregister_scsi(struct ctlr_info *h) |
1918 | { | 2081 | { |
1919 | /* we are being forcibly unloaded, and may not refuse. */ | 2082 | /* we are being forcibly unloaded, and may not refuse. */ |
@@ -1926,7 +2089,6 @@ static int hpsa_register_scsi(struct ctlr_info *h) | |||
1926 | { | 2089 | { |
1927 | int rc; | 2090 | int rc; |
1928 | 2091 | ||
1929 | hpsa_update_scsi_devices(h, -1); | ||
1930 | rc = hpsa_scsi_detect(h); | 2092 | rc = hpsa_scsi_detect(h); |
1931 | if (rc != 0) | 2093 | if (rc != 0) |
1932 | dev_err(&h->pdev->dev, "hpsa_register_scsi: failed" | 2094 | dev_err(&h->pdev->dev, "hpsa_register_scsi: failed" |
@@ -2003,14 +2165,14 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) | |||
2003 | h = sdev_to_hba(scsicmd->device); | 2165 | h = sdev_to_hba(scsicmd->device); |
2004 | if (h == NULL) /* paranoia */ | 2166 | if (h == NULL) /* paranoia */ |
2005 | return FAILED; | 2167 | return FAILED; |
2006 | dev_warn(&h->pdev->dev, "resetting drive\n"); | ||
2007 | |||
2008 | dev = scsicmd->device->hostdata; | 2168 | dev = scsicmd->device->hostdata; |
2009 | if (!dev) { | 2169 | if (!dev) { |
2010 | dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " | 2170 | dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " |
2011 | "device lookup failed.\n"); | 2171 | "device lookup failed.\n"); |
2012 | return FAILED; | 2172 | return FAILED; |
2013 | } | 2173 | } |
2174 | dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n", | ||
2175 | h->scsi_host->host_no, dev->bus, dev->target, dev->lun); | ||
2014 | /* send a reset to the SCSI LUN which the command was sent to */ | 2176 | /* send a reset to the SCSI LUN which the command was sent to */ |
2015 | rc = hpsa_send_reset(h, dev->scsi3addr); | 2177 | rc = hpsa_send_reset(h, dev->scsi3addr); |
2016 | if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) | 2178 | if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) |
@@ -2053,8 +2215,8 @@ static struct CommandList *cmd_alloc(struct ctlr_info *h) | |||
2053 | c->cmdindex = i; | 2215 | c->cmdindex = i; |
2054 | 2216 | ||
2055 | INIT_HLIST_NODE(&c->list); | 2217 | INIT_HLIST_NODE(&c->list); |
2056 | c->busaddr = (__u32) cmd_dma_handle; | 2218 | c->busaddr = (u32) cmd_dma_handle; |
2057 | temp64.val = (__u64) err_dma_handle; | 2219 | temp64.val = (u64) err_dma_handle; |
2058 | c->ErrDesc.Addr.lower = temp64.val32.lower; | 2220 | c->ErrDesc.Addr.lower = temp64.val32.lower; |
2059 | c->ErrDesc.Addr.upper = temp64.val32.upper; | 2221 | c->ErrDesc.Addr.upper = temp64.val32.upper; |
2060 | c->ErrDesc.Len = sizeof(*c->err_info); | 2222 | c->ErrDesc.Len = sizeof(*c->err_info); |
@@ -2091,8 +2253,8 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h) | |||
2091 | memset(c->err_info, 0, sizeof(*c->err_info)); | 2253 | memset(c->err_info, 0, sizeof(*c->err_info)); |
2092 | 2254 | ||
2093 | INIT_HLIST_NODE(&c->list); | 2255 | INIT_HLIST_NODE(&c->list); |
2094 | c->busaddr = (__u32) cmd_dma_handle; | 2256 | c->busaddr = (u32) cmd_dma_handle; |
2095 | temp64.val = (__u64) err_dma_handle; | 2257 | temp64.val = (u64) err_dma_handle; |
2096 | c->ErrDesc.Addr.lower = temp64.val32.lower; | 2258 | c->ErrDesc.Addr.lower = temp64.val32.lower; |
2097 | c->ErrDesc.Addr.upper = temp64.val32.upper; | 2259 | c->ErrDesc.Addr.upper = temp64.val32.upper; |
2098 | c->ErrDesc.Len = sizeof(*c->err_info); | 2260 | c->ErrDesc.Len = sizeof(*c->err_info); |
@@ -2125,50 +2287,6 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) | |||
2125 | 2287 | ||
2126 | #ifdef CONFIG_COMPAT | 2288 | #ifdef CONFIG_COMPAT |
2127 | 2289 | ||
2128 | static int do_ioctl(struct scsi_device *dev, int cmd, void *arg) | ||
2129 | { | ||
2130 | int ret; | ||
2131 | |||
2132 | lock_kernel(); | ||
2133 | ret = hpsa_ioctl(dev, cmd, arg); | ||
2134 | unlock_kernel(); | ||
2135 | return ret; | ||
2136 | } | ||
2137 | |||
2138 | static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg); | ||
2139 | static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, | ||
2140 | int cmd, void *arg); | ||
2141 | |||
2142 | static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg) | ||
2143 | { | ||
2144 | switch (cmd) { | ||
2145 | case CCISS_GETPCIINFO: | ||
2146 | case CCISS_GETINTINFO: | ||
2147 | case CCISS_SETINTINFO: | ||
2148 | case CCISS_GETNODENAME: | ||
2149 | case CCISS_SETNODENAME: | ||
2150 | case CCISS_GETHEARTBEAT: | ||
2151 | case CCISS_GETBUSTYPES: | ||
2152 | case CCISS_GETFIRMVER: | ||
2153 | case CCISS_GETDRIVVER: | ||
2154 | case CCISS_REVALIDVOLS: | ||
2155 | case CCISS_DEREGDISK: | ||
2156 | case CCISS_REGNEWDISK: | ||
2157 | case CCISS_REGNEWD: | ||
2158 | case CCISS_RESCANDISK: | ||
2159 | case CCISS_GETLUNINFO: | ||
2160 | return do_ioctl(dev, cmd, arg); | ||
2161 | |||
2162 | case CCISS_PASSTHRU32: | ||
2163 | return hpsa_ioctl32_passthru(dev, cmd, arg); | ||
2164 | case CCISS_BIG_PASSTHRU32: | ||
2165 | return hpsa_ioctl32_big_passthru(dev, cmd, arg); | ||
2166 | |||
2167 | default: | ||
2168 | return -ENOIOCTLCMD; | ||
2169 | } | ||
2170 | } | ||
2171 | |||
2172 | static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) | 2290 | static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) |
2173 | { | 2291 | { |
2174 | IOCTL32_Command_struct __user *arg32 = | 2292 | IOCTL32_Command_struct __user *arg32 = |
@@ -2193,7 +2311,7 @@ static int hpsa_ioctl32_passthru(struct scsi_device *dev, int cmd, void *arg) | |||
2193 | if (err) | 2311 | if (err) |
2194 | return -EFAULT; | 2312 | return -EFAULT; |
2195 | 2313 | ||
2196 | err = do_ioctl(dev, CCISS_PASSTHRU, (void *)p); | 2314 | err = hpsa_ioctl(dev, CCISS_PASSTHRU, (void *)p); |
2197 | if (err) | 2315 | if (err) |
2198 | return err; | 2316 | return err; |
2199 | err |= copy_in_user(&arg32->error_info, &p->error_info, | 2317 | err |= copy_in_user(&arg32->error_info, &p->error_info, |
@@ -2230,7 +2348,7 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, | |||
2230 | if (err) | 2348 | if (err) |
2231 | return -EFAULT; | 2349 | return -EFAULT; |
2232 | 2350 | ||
2233 | err = do_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p); | 2351 | err = hpsa_ioctl(dev, CCISS_BIG_PASSTHRU, (void *)p); |
2234 | if (err) | 2352 | if (err) |
2235 | return err; | 2353 | return err; |
2236 | err |= copy_in_user(&arg32->error_info, &p->error_info, | 2354 | err |= copy_in_user(&arg32->error_info, &p->error_info, |
@@ -2239,6 +2357,36 @@ static int hpsa_ioctl32_big_passthru(struct scsi_device *dev, | |||
2239 | return -EFAULT; | 2357 | return -EFAULT; |
2240 | return err; | 2358 | return err; |
2241 | } | 2359 | } |
2360 | |||
2361 | static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg) | ||
2362 | { | ||
2363 | switch (cmd) { | ||
2364 | case CCISS_GETPCIINFO: | ||
2365 | case CCISS_GETINTINFO: | ||
2366 | case CCISS_SETINTINFO: | ||
2367 | case CCISS_GETNODENAME: | ||
2368 | case CCISS_SETNODENAME: | ||
2369 | case CCISS_GETHEARTBEAT: | ||
2370 | case CCISS_GETBUSTYPES: | ||
2371 | case CCISS_GETFIRMVER: | ||
2372 | case CCISS_GETDRIVVER: | ||
2373 | case CCISS_REVALIDVOLS: | ||
2374 | case CCISS_DEREGDISK: | ||
2375 | case CCISS_REGNEWDISK: | ||
2376 | case CCISS_REGNEWD: | ||
2377 | case CCISS_RESCANDISK: | ||
2378 | case CCISS_GETLUNINFO: | ||
2379 | return hpsa_ioctl(dev, cmd, arg); | ||
2380 | |||
2381 | case CCISS_PASSTHRU32: | ||
2382 | return hpsa_ioctl32_passthru(dev, cmd, arg); | ||
2383 | case CCISS_BIG_PASSTHRU32: | ||
2384 | return hpsa_ioctl32_big_passthru(dev, cmd, arg); | ||
2385 | |||
2386 | default: | ||
2387 | return -ENOIOCTLCMD; | ||
2388 | } | ||
2389 | } | ||
2242 | #endif | 2390 | #endif |
2243 | 2391 | ||
2244 | static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) | 2392 | static int hpsa_getpciinfo_ioctl(struct ctlr_info *h, void __user *argp) |
@@ -2378,8 +2526,8 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp) | |||
2378 | BYTE sg_used = 0; | 2526 | BYTE sg_used = 0; |
2379 | int status = 0; | 2527 | int status = 0; |
2380 | int i; | 2528 | int i; |
2381 | __u32 left; | 2529 | u32 left; |
2382 | __u32 sz; | 2530 | u32 sz; |
2383 | BYTE __user *data_ptr; | 2531 | BYTE __user *data_ptr; |
2384 | 2532 | ||
2385 | if (!argp) | 2533 | if (!argp) |
@@ -2527,7 +2675,7 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) | |||
2527 | case CCISS_DEREGDISK: | 2675 | case CCISS_DEREGDISK: |
2528 | case CCISS_REGNEWDISK: | 2676 | case CCISS_REGNEWDISK: |
2529 | case CCISS_REGNEWD: | 2677 | case CCISS_REGNEWD: |
2530 | hpsa_update_scsi_devices(h, dev->host->host_no); | 2678 | hpsa_scan_start(h->scsi_host); |
2531 | return 0; | 2679 | return 0; |
2532 | case CCISS_GETPCIINFO: | 2680 | case CCISS_GETPCIINFO: |
2533 | return hpsa_getpciinfo_ioctl(h, argp); | 2681 | return hpsa_getpciinfo_ioctl(h, argp); |
@@ -2542,8 +2690,8 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg) | |||
2542 | } | 2690 | } |
2543 | } | 2691 | } |
2544 | 2692 | ||
2545 | static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h, | 2693 | static void fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, |
2546 | void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr, | 2694 | void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, |
2547 | int cmd_type) | 2695 | int cmd_type) |
2548 | { | 2696 | { |
2549 | int pci_dir = XFER_NONE; | 2697 | int pci_dir = XFER_NONE; |
@@ -2710,19 +2858,20 @@ static inline unsigned long get_next_completion(struct ctlr_info *h) | |||
2710 | return h->access.command_completed(h); | 2858 | return h->access.command_completed(h); |
2711 | } | 2859 | } |
2712 | 2860 | ||
2713 | static inline int interrupt_pending(struct ctlr_info *h) | 2861 | static inline bool interrupt_pending(struct ctlr_info *h) |
2714 | { | 2862 | { |
2715 | return h->access.intr_pending(h); | 2863 | return h->access.intr_pending(h); |
2716 | } | 2864 | } |
2717 | 2865 | ||
2718 | static inline long interrupt_not_for_us(struct ctlr_info *h) | 2866 | static inline long interrupt_not_for_us(struct ctlr_info *h) |
2719 | { | 2867 | { |
2720 | return ((h->access.intr_pending(h) == 0) || | 2868 | return !(h->msi_vector || h->msix_vector) && |
2721 | (h->interrupts_enabled == 0)); | 2869 | ((h->access.intr_pending(h) == 0) || |
2870 | (h->interrupts_enabled == 0)); | ||
2722 | } | 2871 | } |
2723 | 2872 | ||
2724 | static inline int bad_tag(struct ctlr_info *h, __u32 tag_index, | 2873 | static inline int bad_tag(struct ctlr_info *h, u32 tag_index, |
2725 | __u32 raw_tag) | 2874 | u32 raw_tag) |
2726 | { | 2875 | { |
2727 | if (unlikely(tag_index >= h->nr_cmds)) { | 2876 | if (unlikely(tag_index >= h->nr_cmds)) { |
2728 | dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); | 2877 | dev_warn(&h->pdev->dev, "bad tag 0x%08x ignored.\n", raw_tag); |
@@ -2731,7 +2880,7 @@ static inline int bad_tag(struct ctlr_info *h, __u32 tag_index, | |||
2731 | return 0; | 2880 | return 0; |
2732 | } | 2881 | } |
2733 | 2882 | ||
2734 | static inline void finish_cmd(struct CommandList *c, __u32 raw_tag) | 2883 | static inline void finish_cmd(struct CommandList *c, u32 raw_tag) |
2735 | { | 2884 | { |
2736 | removeQ(c); | 2885 | removeQ(c); |
2737 | if (likely(c->cmd_type == CMD_SCSI)) | 2886 | if (likely(c->cmd_type == CMD_SCSI)) |
@@ -2740,42 +2889,79 @@ static inline void finish_cmd(struct CommandList *c, __u32 raw_tag) | |||
2740 | complete(c->waiting); | 2889 | complete(c->waiting); |
2741 | } | 2890 | } |
2742 | 2891 | ||
2892 | static inline u32 hpsa_tag_contains_index(u32 tag) | ||
2893 | { | ||
2894 | #define DIRECT_LOOKUP_BIT 0x10 | ||
2895 | return tag & DIRECT_LOOKUP_BIT; | ||
2896 | } | ||
2897 | |||
2898 | static inline u32 hpsa_tag_to_index(u32 tag) | ||
2899 | { | ||
2900 | #define DIRECT_LOOKUP_SHIFT 5 | ||
2901 | return tag >> DIRECT_LOOKUP_SHIFT; | ||
2902 | } | ||
2903 | |||
2904 | static inline u32 hpsa_tag_discard_error_bits(u32 tag) | ||
2905 | { | ||
2906 | #define HPSA_ERROR_BITS 0x03 | ||
2907 | return tag & ~HPSA_ERROR_BITS; | ||
2908 | } | ||
2909 | |||
2910 | /* process completion of an indexed ("direct lookup") command */ | ||
2911 | static inline u32 process_indexed_cmd(struct ctlr_info *h, | ||
2912 | u32 raw_tag) | ||
2913 | { | ||
2914 | u32 tag_index; | ||
2915 | struct CommandList *c; | ||
2916 | |||
2917 | tag_index = hpsa_tag_to_index(raw_tag); | ||
2918 | if (bad_tag(h, tag_index, raw_tag)) | ||
2919 | return next_command(h); | ||
2920 | c = h->cmd_pool + tag_index; | ||
2921 | finish_cmd(c, raw_tag); | ||
2922 | return next_command(h); | ||
2923 | } | ||
2924 | |||
2925 | /* process completion of a non-indexed command */ | ||
2926 | static inline u32 process_nonindexed_cmd(struct ctlr_info *h, | ||
2927 | u32 raw_tag) | ||
2928 | { | ||
2929 | u32 tag; | ||
2930 | struct CommandList *c = NULL; | ||
2931 | struct hlist_node *tmp; | ||
2932 | |||
2933 | tag = hpsa_tag_discard_error_bits(raw_tag); | ||
2934 | hlist_for_each_entry(c, tmp, &h->cmpQ, list) { | ||
2935 | if ((c->busaddr & 0xFFFFFFE0) == (tag & 0xFFFFFFE0)) { | ||
2936 | finish_cmd(c, raw_tag); | ||
2937 | return next_command(h); | ||
2938 | } | ||
2939 | } | ||
2940 | bad_tag(h, h->nr_cmds + 1, raw_tag); | ||
2941 | return next_command(h); | ||
2942 | } | ||
2943 | |||
2743 | static irqreturn_t do_hpsa_intr(int irq, void *dev_id) | 2944 | static irqreturn_t do_hpsa_intr(int irq, void *dev_id) |
2744 | { | 2945 | { |
2745 | struct ctlr_info *h = dev_id; | 2946 | struct ctlr_info *h = dev_id; |
2746 | struct CommandList *c; | ||
2747 | unsigned long flags; | 2947 | unsigned long flags; |
2748 | __u32 raw_tag, tag, tag_index; | 2948 | u32 raw_tag; |
2749 | struct hlist_node *tmp; | ||
2750 | 2949 | ||
2751 | if (interrupt_not_for_us(h)) | 2950 | if (interrupt_not_for_us(h)) |
2752 | return IRQ_NONE; | 2951 | return IRQ_NONE; |
2753 | spin_lock_irqsave(&h->lock, flags); | 2952 | spin_lock_irqsave(&h->lock, flags); |
2754 | while (interrupt_pending(h)) { | 2953 | raw_tag = get_next_completion(h); |
2755 | while ((raw_tag = get_next_completion(h)) != FIFO_EMPTY) { | 2954 | while (raw_tag != FIFO_EMPTY) { |
2756 | if (likely(HPSA_TAG_CONTAINS_INDEX(raw_tag))) { | 2955 | if (hpsa_tag_contains_index(raw_tag)) |
2757 | tag_index = HPSA_TAG_TO_INDEX(raw_tag); | 2956 | raw_tag = process_indexed_cmd(h, raw_tag); |
2758 | if (bad_tag(h, tag_index, raw_tag)) | 2957 | else |
2759 | return IRQ_HANDLED; | 2958 | raw_tag = process_nonindexed_cmd(h, raw_tag); |
2760 | c = h->cmd_pool + tag_index; | ||
2761 | finish_cmd(c, raw_tag); | ||
2762 | continue; | ||
2763 | } | ||
2764 | tag = HPSA_TAG_DISCARD_ERROR_BITS(raw_tag); | ||
2765 | c = NULL; | ||
2766 | hlist_for_each_entry(c, tmp, &h->cmpQ, list) { | ||
2767 | if (c->busaddr == tag) { | ||
2768 | finish_cmd(c, raw_tag); | ||
2769 | break; | ||
2770 | } | ||
2771 | } | ||
2772 | } | ||
2773 | } | 2959 | } |
2774 | spin_unlock_irqrestore(&h->lock, flags); | 2960 | spin_unlock_irqrestore(&h->lock, flags); |
2775 | return IRQ_HANDLED; | 2961 | return IRQ_HANDLED; |
2776 | } | 2962 | } |
2777 | 2963 | ||
2778 | /* Send a message CDB to the firmware. */ | 2964 | /* Send a message CDB to the firmwart. */ |
2779 | static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, | 2965 | static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, |
2780 | unsigned char type) | 2966 | unsigned char type) |
2781 | { | 2967 | { |
@@ -2841,7 +3027,7 @@ static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode, | |||
2841 | 3027 | ||
2842 | for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { | 3028 | for (i = 0; i < HPSA_MSG_SEND_RETRY_LIMIT; i++) { |
2843 | tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); | 3029 | tag = readl(vaddr + SA5_REPLY_PORT_OFFSET); |
2844 | if (HPSA_TAG_DISCARD_ERROR_BITS(tag) == paddr32) | 3030 | if (hpsa_tag_discard_error_bits(tag) == paddr32) |
2845 | break; | 3031 | break; |
2846 | msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); | 3032 | msleep(HPSA_MSG_SEND_RETRY_INTERVAL_MSECS); |
2847 | } | 3033 | } |
@@ -3063,7 +3249,7 @@ static int find_PCI_BAR_index(struct pci_dev *pdev, unsigned long pci_bar_addr) | |||
3063 | */ | 3249 | */ |
3064 | 3250 | ||
3065 | static void __devinit hpsa_interrupt_mode(struct ctlr_info *h, | 3251 | static void __devinit hpsa_interrupt_mode(struct ctlr_info *h, |
3066 | struct pci_dev *pdev, __u32 board_id) | 3252 | struct pci_dev *pdev, u32 board_id) |
3067 | { | 3253 | { |
3068 | #ifdef CONFIG_PCI_MSI | 3254 | #ifdef CONFIG_PCI_MSI |
3069 | int err; | 3255 | int err; |
@@ -3107,22 +3293,22 @@ static void __devinit hpsa_interrupt_mode(struct ctlr_info *h, | |||
3107 | default_int_mode: | 3293 | default_int_mode: |
3108 | #endif /* CONFIG_PCI_MSI */ | 3294 | #endif /* CONFIG_PCI_MSI */ |
3109 | /* if we get here we're going to use the default interrupt mode */ | 3295 | /* if we get here we're going to use the default interrupt mode */ |
3110 | h->intr[SIMPLE_MODE_INT] = pdev->irq; | 3296 | h->intr[PERF_MODE_INT] = pdev->irq; |
3111 | return; | ||
3112 | } | 3297 | } |
3113 | 3298 | ||
3114 | static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) | 3299 | static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) |
3115 | { | 3300 | { |
3116 | ushort subsystem_vendor_id, subsystem_device_id, command; | 3301 | ushort subsystem_vendor_id, subsystem_device_id, command; |
3117 | __u32 board_id, scratchpad = 0; | 3302 | u32 board_id, scratchpad = 0; |
3118 | __u64 cfg_offset; | 3303 | u64 cfg_offset; |
3119 | __u32 cfg_base_addr; | 3304 | u32 cfg_base_addr; |
3120 | __u64 cfg_base_addr_index; | 3305 | u64 cfg_base_addr_index; |
3306 | u32 trans_offset; | ||
3121 | int i, prod_index, err; | 3307 | int i, prod_index, err; |
3122 | 3308 | ||
3123 | subsystem_vendor_id = pdev->subsystem_vendor; | 3309 | subsystem_vendor_id = pdev->subsystem_vendor; |
3124 | subsystem_device_id = pdev->subsystem_device; | 3310 | subsystem_device_id = pdev->subsystem_device; |
3125 | board_id = (((__u32) (subsystem_device_id << 16) & 0xffff0000) | | 3311 | board_id = (((u32) (subsystem_device_id << 16) & 0xffff0000) | |
3126 | subsystem_vendor_id); | 3312 | subsystem_vendor_id); |
3127 | 3313 | ||
3128 | for (i = 0; i < ARRAY_SIZE(products); i++) | 3314 | for (i = 0; i < ARRAY_SIZE(products); i++) |
@@ -3199,7 +3385,7 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) | |||
3199 | 3385 | ||
3200 | /* get the address index number */ | 3386 | /* get the address index number */ |
3201 | cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET); | 3387 | cfg_base_addr = readl(h->vaddr + SA5_CTCFG_OFFSET); |
3202 | cfg_base_addr &= (__u32) 0x0000ffff; | 3388 | cfg_base_addr &= (u32) 0x0000ffff; |
3203 | cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr); | 3389 | cfg_base_addr_index = find_PCI_BAR_index(pdev, cfg_base_addr); |
3204 | if (cfg_base_addr_index == -1) { | 3390 | if (cfg_base_addr_index == -1) { |
3205 | dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); | 3391 | dev_warn(&pdev->dev, "cannot find cfg_base_addr_index\n"); |
@@ -3211,11 +3397,14 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) | |||
3211 | h->cfgtable = remap_pci_mem(pci_resource_start(pdev, | 3397 | h->cfgtable = remap_pci_mem(pci_resource_start(pdev, |
3212 | cfg_base_addr_index) + cfg_offset, | 3398 | cfg_base_addr_index) + cfg_offset, |
3213 | sizeof(h->cfgtable)); | 3399 | sizeof(h->cfgtable)); |
3214 | h->board_id = board_id; | 3400 | /* Find performant mode table. */ |
3215 | 3401 | trans_offset = readl(&(h->cfgtable->TransMethodOffset)); | |
3216 | /* Query controller for max supported commands: */ | 3402 | h->transtable = remap_pci_mem(pci_resource_start(pdev, |
3217 | h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); | 3403 | cfg_base_addr_index)+cfg_offset+trans_offset, |
3404 | sizeof(*h->transtable)); | ||
3218 | 3405 | ||
3406 | h->board_id = board_id; | ||
3407 | h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); | ||
3219 | h->product_name = products[prod_index].product_name; | 3408 | h->product_name = products[prod_index].product_name; |
3220 | h->access = *(products[prod_index].access); | 3409 | h->access = *(products[prod_index].access); |
3221 | /* Allow room for some ioctls */ | 3410 | /* Allow room for some ioctls */ |
@@ -3232,7 +3421,7 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) | |||
3232 | #ifdef CONFIG_X86 | 3421 | #ifdef CONFIG_X86 |
3233 | { | 3422 | { |
3234 | /* Need to enable prefetch in the SCSI core for 6400 in x86 */ | 3423 | /* Need to enable prefetch in the SCSI core for 6400 in x86 */ |
3235 | __u32 prefetch; | 3424 | u32 prefetch; |
3236 | prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); | 3425 | prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); |
3237 | prefetch |= 0x100; | 3426 | prefetch |= 0x100; |
3238 | writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); | 3427 | writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); |
@@ -3244,7 +3433,7 @@ static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev) | |||
3244 | * physical memory. | 3433 | * physical memory. |
3245 | */ | 3434 | */ |
3246 | if (board_id == 0x3225103C) { | 3435 | if (board_id == 0x3225103C) { |
3247 | __u32 dma_prefetch; | 3436 | u32 dma_prefetch; |
3248 | dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); | 3437 | dma_prefetch = readl(h->vaddr + I2O_DMA1_CFG); |
3249 | dma_prefetch |= 0x8000; | 3438 | dma_prefetch |= 0x8000; |
3250 | writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); | 3439 | writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG); |
@@ -3286,10 +3475,26 @@ err_out_free_res: | |||
3286 | return err; | 3475 | return err; |
3287 | } | 3476 | } |
3288 | 3477 | ||
3478 | static void __devinit hpsa_hba_inquiry(struct ctlr_info *h) | ||
3479 | { | ||
3480 | int rc; | ||
3481 | |||
3482 | #define HBA_INQUIRY_BYTE_COUNT 64 | ||
3483 | h->hba_inquiry_data = kmalloc(HBA_INQUIRY_BYTE_COUNT, GFP_KERNEL); | ||
3484 | if (!h->hba_inquiry_data) | ||
3485 | return; | ||
3486 | rc = hpsa_scsi_do_inquiry(h, RAID_CTLR_LUNID, 0, | ||
3487 | h->hba_inquiry_data, HBA_INQUIRY_BYTE_COUNT); | ||
3488 | if (rc != 0) { | ||
3489 | kfree(h->hba_inquiry_data); | ||
3490 | h->hba_inquiry_data = NULL; | ||
3491 | } | ||
3492 | } | ||
3493 | |||
3289 | static int __devinit hpsa_init_one(struct pci_dev *pdev, | 3494 | static int __devinit hpsa_init_one(struct pci_dev *pdev, |
3290 | const struct pci_device_id *ent) | 3495 | const struct pci_device_id *ent) |
3291 | { | 3496 | { |
3292 | int i; | 3497 | int i, rc; |
3293 | int dac; | 3498 | int dac; |
3294 | struct ctlr_info *h; | 3499 | struct ctlr_info *h; |
3295 | 3500 | ||
@@ -3314,17 +3519,23 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, | |||
3314 | } | 3519 | } |
3315 | } | 3520 | } |
3316 | 3521 | ||
3317 | BUILD_BUG_ON(sizeof(struct CommandList) % 8); | 3522 | /* Command structures must be aligned on a 32-byte boundary because |
3523 | * the 5 lower bits of the address are used by the hardware. and by | ||
3524 | * the driver. See comments in hpsa.h for more info. | ||
3525 | */ | ||
3526 | #define COMMANDLIST_ALIGNMENT 32 | ||
3527 | BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT); | ||
3318 | h = kzalloc(sizeof(*h), GFP_KERNEL); | 3528 | h = kzalloc(sizeof(*h), GFP_KERNEL); |
3319 | if (!h) | 3529 | if (!h) |
3320 | return -1; | 3530 | return -ENOMEM; |
3321 | 3531 | ||
3322 | h->busy_initializing = 1; | 3532 | h->busy_initializing = 1; |
3323 | INIT_HLIST_HEAD(&h->cmpQ); | 3533 | INIT_HLIST_HEAD(&h->cmpQ); |
3324 | INIT_HLIST_HEAD(&h->reqQ); | 3534 | INIT_HLIST_HEAD(&h->reqQ); |
3325 | mutex_init(&h->busy_shutting_down); | 3535 | mutex_init(&h->busy_shutting_down); |
3326 | init_completion(&h->scan_wait); | 3536 | init_completion(&h->scan_wait); |
3327 | if (hpsa_pci_init(h, pdev) != 0) | 3537 | rc = hpsa_pci_init(h, pdev); |
3538 | if (rc != 0) | ||
3328 | goto clean1; | 3539 | goto clean1; |
3329 | 3540 | ||
3330 | sprintf(h->devname, "hpsa%d", number_of_controllers); | 3541 | sprintf(h->devname, "hpsa%d", number_of_controllers); |
@@ -3333,27 +3544,32 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, | |||
3333 | h->pdev = pdev; | 3544 | h->pdev = pdev; |
3334 | 3545 | ||
3335 | /* configure PCI DMA stuff */ | 3546 | /* configure PCI DMA stuff */ |
3336 | if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) | 3547 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); |
3548 | if (rc == 0) { | ||
3337 | dac = 1; | 3549 | dac = 1; |
3338 | else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) | 3550 | } else { |
3339 | dac = 0; | 3551 | rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); |
3340 | else { | 3552 | if (rc == 0) { |
3341 | dev_err(&pdev->dev, "no suitable DMA available\n"); | 3553 | dac = 0; |
3342 | goto clean1; | 3554 | } else { |
3555 | dev_err(&pdev->dev, "no suitable DMA available\n"); | ||
3556 | goto clean1; | ||
3557 | } | ||
3343 | } | 3558 | } |
3344 | 3559 | ||
3345 | /* make sure the board interrupts are off */ | 3560 | /* make sure the board interrupts are off */ |
3346 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | 3561 | h->access.set_intr_mask(h, HPSA_INTR_OFF); |
3347 | if (request_irq(h->intr[SIMPLE_MODE_INT], do_hpsa_intr, | 3562 | rc = request_irq(h->intr[PERF_MODE_INT], do_hpsa_intr, |
3348 | IRQF_DISABLED | IRQF_SHARED, h->devname, h)) { | 3563 | IRQF_DISABLED, h->devname, h); |
3564 | if (rc) { | ||
3349 | dev_err(&pdev->dev, "unable to get irq %d for %s\n", | 3565 | dev_err(&pdev->dev, "unable to get irq %d for %s\n", |
3350 | h->intr[SIMPLE_MODE_INT], h->devname); | 3566 | h->intr[PERF_MODE_INT], h->devname); |
3351 | goto clean2; | 3567 | goto clean2; |
3352 | } | 3568 | } |
3353 | 3569 | ||
3354 | dev_info(&pdev->dev, "%s: <0x%x> at PCI %s IRQ %d%s using DAC\n", | 3570 | dev_info(&pdev->dev, "%s: <0x%x> at IRQ %d%s using DAC\n", |
3355 | h->devname, pdev->device, pci_name(pdev), | 3571 | h->devname, pdev->device, |
3356 | h->intr[SIMPLE_MODE_INT], dac ? "" : " not"); | 3572 | h->intr[PERF_MODE_INT], dac ? "" : " not"); |
3357 | 3573 | ||
3358 | h->cmd_pool_bits = | 3574 | h->cmd_pool_bits = |
3359 | kmalloc(((h->nr_cmds + BITS_PER_LONG - | 3575 | kmalloc(((h->nr_cmds + BITS_PER_LONG - |
@@ -3368,9 +3584,13 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, | |||
3368 | || (h->cmd_pool == NULL) | 3584 | || (h->cmd_pool == NULL) |
3369 | || (h->errinfo_pool == NULL)) { | 3585 | || (h->errinfo_pool == NULL)) { |
3370 | dev_err(&pdev->dev, "out of memory"); | 3586 | dev_err(&pdev->dev, "out of memory"); |
3587 | rc = -ENOMEM; | ||
3371 | goto clean4; | 3588 | goto clean4; |
3372 | } | 3589 | } |
3373 | spin_lock_init(&h->lock); | 3590 | spin_lock_init(&h->lock); |
3591 | spin_lock_init(&h->scan_lock); | ||
3592 | init_waitqueue_head(&h->scan_wait_queue); | ||
3593 | h->scan_finished = 1; /* no scan currently in progress */ | ||
3374 | 3594 | ||
3375 | pci_set_drvdata(pdev, h); | 3595 | pci_set_drvdata(pdev, h); |
3376 | memset(h->cmd_pool_bits, 0, | 3596 | memset(h->cmd_pool_bits, 0, |
@@ -3382,6 +3602,8 @@ static int __devinit hpsa_init_one(struct pci_dev *pdev, | |||
3382 | /* Turn the interrupts on so we can service requests */ | 3602 | /* Turn the interrupts on so we can service requests */ |
3383 | h->access.set_intr_mask(h, HPSA_INTR_ON); | 3603 | h->access.set_intr_mask(h, HPSA_INTR_ON); |
3384 | 3604 | ||
3605 | hpsa_put_ctlr_into_performant_mode(h); | ||
3606 | hpsa_hba_inquiry(h); | ||
3385 | hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ | 3607 | hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */ |
3386 | h->busy_initializing = 0; | 3608 | h->busy_initializing = 0; |
3387 | return 1; | 3609 | return 1; |
@@ -3397,12 +3619,12 @@ clean4: | |||
3397 | h->nr_cmds * sizeof(struct ErrorInfo), | 3619 | h->nr_cmds * sizeof(struct ErrorInfo), |
3398 | h->errinfo_pool, | 3620 | h->errinfo_pool, |
3399 | h->errinfo_pool_dhandle); | 3621 | h->errinfo_pool_dhandle); |
3400 | free_irq(h->intr[SIMPLE_MODE_INT], h); | 3622 | free_irq(h->intr[PERF_MODE_INT], h); |
3401 | clean2: | 3623 | clean2: |
3402 | clean1: | 3624 | clean1: |
3403 | h->busy_initializing = 0; | 3625 | h->busy_initializing = 0; |
3404 | kfree(h); | 3626 | kfree(h); |
3405 | return -1; | 3627 | return rc; |
3406 | } | 3628 | } |
3407 | 3629 | ||
3408 | static void hpsa_flush_cache(struct ctlr_info *h) | 3630 | static void hpsa_flush_cache(struct ctlr_info *h) |
@@ -3441,7 +3663,7 @@ static void hpsa_shutdown(struct pci_dev *pdev) | |||
3441 | */ | 3663 | */ |
3442 | hpsa_flush_cache(h); | 3664 | hpsa_flush_cache(h); |
3443 | h->access.set_intr_mask(h, HPSA_INTR_OFF); | 3665 | h->access.set_intr_mask(h, HPSA_INTR_OFF); |
3444 | free_irq(h->intr[2], h); | 3666 | free_irq(h->intr[PERF_MODE_INT], h); |
3445 | #ifdef CONFIG_PCI_MSI | 3667 | #ifdef CONFIG_PCI_MSI |
3446 | if (h->msix_vector) | 3668 | if (h->msix_vector) |
3447 | pci_disable_msix(h->pdev); | 3669 | pci_disable_msix(h->pdev); |
@@ -3470,7 +3692,11 @@ static void __devexit hpsa_remove_one(struct pci_dev *pdev) | |||
3470 | pci_free_consistent(h->pdev, | 3692 | pci_free_consistent(h->pdev, |
3471 | h->nr_cmds * sizeof(struct ErrorInfo), | 3693 | h->nr_cmds * sizeof(struct ErrorInfo), |
3472 | h->errinfo_pool, h->errinfo_pool_dhandle); | 3694 | h->errinfo_pool, h->errinfo_pool_dhandle); |
3695 | pci_free_consistent(h->pdev, h->reply_pool_size, | ||
3696 | h->reply_pool, h->reply_pool_dhandle); | ||
3473 | kfree(h->cmd_pool_bits); | 3697 | kfree(h->cmd_pool_bits); |
3698 | kfree(h->blockFetchTable); | ||
3699 | kfree(h->hba_inquiry_data); | ||
3474 | /* | 3700 | /* |
3475 | * Deliberately omit pci_disable_device(): it does something nasty to | 3701 | * Deliberately omit pci_disable_device(): it does something nasty to |
3476 | * Smart Array controllers that pci_enable_device does not undo | 3702 | * Smart Array controllers that pci_enable_device does not undo |
@@ -3502,6 +3728,129 @@ static struct pci_driver hpsa_pci_driver = { | |||
3502 | .resume = hpsa_resume, | 3728 | .resume = hpsa_resume, |
3503 | }; | 3729 | }; |
3504 | 3730 | ||
3731 | /* Fill in bucket_map[], given nsgs (the max number of | ||
3732 | * scatter gather elements supported) and bucket[], | ||
3733 | * which is an array of 8 integers. The bucket[] array | ||
3734 | * contains 8 different DMA transfer sizes (in 16 | ||
3735 | * byte increments) which the controller uses to fetch | ||
3736 | * commands. This function fills in bucket_map[], which | ||
3737 | * maps a given number of scatter gather elements to one of | ||
3738 | * the 8 DMA transfer sizes. The point of it is to allow the | ||
3739 | * controller to only do as much DMA as needed to fetch the | ||
3740 | * command, with the DMA transfer size encoded in the lower | ||
3741 | * bits of the command address. | ||
3742 | */ | ||
3743 | static void calc_bucket_map(int bucket[], int num_buckets, | ||
3744 | int nsgs, int *bucket_map) | ||
3745 | { | ||
3746 | int i, j, b, size; | ||
3747 | |||
3748 | /* even a command with 0 SGs requires 4 blocks */ | ||
3749 | #define MINIMUM_TRANSFER_BLOCKS 4 | ||
3750 | #define NUM_BUCKETS 8 | ||
3751 | /* Note, bucket_map must have nsgs+1 entries. */ | ||
3752 | for (i = 0; i <= nsgs; i++) { | ||
3753 | /* Compute size of a command with i SG entries */ | ||
3754 | size = i + MINIMUM_TRANSFER_BLOCKS; | ||
3755 | b = num_buckets; /* Assume the biggest bucket */ | ||
3756 | /* Find the bucket that is just big enough */ | ||
3757 | for (j = 0; j < 8; j++) { | ||
3758 | if (bucket[j] >= size) { | ||
3759 | b = j; | ||
3760 | break; | ||
3761 | } | ||
3762 | } | ||
3763 | /* for a command with i SG entries, use bucket b. */ | ||
3764 | bucket_map[i] = b; | ||
3765 | } | ||
3766 | } | ||
3767 | |||
3768 | static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h) | ||
3769 | { | ||
3770 | u32 trans_support; | ||
3771 | u64 trans_offset; | ||
3772 | /* 5 = 1 s/g entry or 4k | ||
3773 | * 6 = 2 s/g entry or 8k | ||
3774 | * 8 = 4 s/g entry or 16k | ||
3775 | * 10 = 6 s/g entry or 24k | ||
3776 | */ | ||
3777 | int bft[8] = {5, 6, 8, 10, 12, 20, 28, 35}; /* for scatter/gathers */ | ||
3778 | int i = 0; | ||
3779 | int l = 0; | ||
3780 | unsigned long register_value; | ||
3781 | |||
3782 | trans_support = readl(&(h->cfgtable->TransportSupport)); | ||
3783 | if (!(trans_support & PERFORMANT_MODE)) | ||
3784 | return; | ||
3785 | |||
3786 | h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); | ||
3787 | h->max_sg_entries = 32; | ||
3788 | /* Performant mode ring buffer and supporting data structures */ | ||
3789 | h->reply_pool_size = h->max_commands * sizeof(u64); | ||
3790 | h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, | ||
3791 | &(h->reply_pool_dhandle)); | ||
3792 | |||
3793 | /* Need a block fetch table for performant mode */ | ||
3794 | h->blockFetchTable = kmalloc(((h->max_sg_entries+1) * | ||
3795 | sizeof(u32)), GFP_KERNEL); | ||
3796 | |||
3797 | if ((h->reply_pool == NULL) | ||
3798 | || (h->blockFetchTable == NULL)) | ||
3799 | goto clean_up; | ||
3800 | |||
3801 | h->reply_pool_wraparound = 1; /* spec: init to 1 */ | ||
3802 | |||
3803 | /* Controller spec: zero out this buffer. */ | ||
3804 | memset(h->reply_pool, 0, h->reply_pool_size); | ||
3805 | h->reply_pool_head = h->reply_pool; | ||
3806 | |||
3807 | trans_offset = readl(&(h->cfgtable->TransMethodOffset)); | ||
3808 | bft[7] = h->max_sg_entries + 4; | ||
3809 | calc_bucket_map(bft, ARRAY_SIZE(bft), 32, h->blockFetchTable); | ||
3810 | for (i = 0; i < 8; i++) | ||
3811 | writel(bft[i], &h->transtable->BlockFetch[i]); | ||
3812 | |||
3813 | /* size of controller ring buffer */ | ||
3814 | writel(h->max_commands, &h->transtable->RepQSize); | ||
3815 | writel(1, &h->transtable->RepQCount); | ||
3816 | writel(0, &h->transtable->RepQCtrAddrLow32); | ||
3817 | writel(0, &h->transtable->RepQCtrAddrHigh32); | ||
3818 | writel(h->reply_pool_dhandle, &h->transtable->RepQAddr0Low32); | ||
3819 | writel(0, &h->transtable->RepQAddr0High32); | ||
3820 | writel(CFGTBL_Trans_Performant, | ||
3821 | &(h->cfgtable->HostWrite.TransportRequest)); | ||
3822 | writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); | ||
3823 | /* under certain very rare conditions, this can take awhile. | ||
3824 | * (e.g.: hot replace a failed 144GB drive in a RAID 5 set right | ||
3825 | * as we enter this code.) */ | ||
3826 | for (l = 0; l < MAX_CONFIG_WAIT; l++) { | ||
3827 | register_value = readl(h->vaddr + SA5_DOORBELL); | ||
3828 | if (!(register_value & CFGTBL_ChangeReq)) | ||
3829 | break; | ||
3830 | /* delay and try again */ | ||
3831 | set_current_state(TASK_INTERRUPTIBLE); | ||
3832 | schedule_timeout(10); | ||
3833 | } | ||
3834 | register_value = readl(&(h->cfgtable->TransportActive)); | ||
3835 | if (!(register_value & CFGTBL_Trans_Performant)) { | ||
3836 | dev_warn(&h->pdev->dev, "unable to get board into" | ||
3837 | " performant mode\n"); | ||
3838 | return; | ||
3839 | } | ||
3840 | |||
3841 | /* Change the access methods to the performant access methods */ | ||
3842 | h->access = SA5_performant_access; | ||
3843 | h->transMethod = CFGTBL_Trans_Performant; | ||
3844 | |||
3845 | return; | ||
3846 | |||
3847 | clean_up: | ||
3848 | if (h->reply_pool) | ||
3849 | pci_free_consistent(h->pdev, h->reply_pool_size, | ||
3850 | h->reply_pool, h->reply_pool_dhandle); | ||
3851 | kfree(h->blockFetchTable); | ||
3852 | } | ||
3853 | |||
3505 | /* | 3854 | /* |
3506 | * This is it. Register the PCI driver information for the cards we control | 3855 | * This is it. Register the PCI driver information for the cards we control |
3507 | * the OS will call our registered routines when it finds one of our cards. | 3856 | * the OS will call our registered routines when it finds one of our cards. |