diff options
author | Ed Lin <ed.lin@promise.com> | 2009-03-31 21:30:36 -0400 |
---|---|---|
committer | James Bottomley <James.Bottomley@HansenPartnership.com> | 2009-04-03 10:23:09 -0400 |
commit | 0f3f6ee68f771b61dc6bc5ae7c2e355cfc5884d1 (patch) | |
tree | 8b7420d8d5d368e309d7b98cf2fb3f7839403873 /drivers/scsi | |
parent | 591a3a5f604c2bee0ba6a614469575918238f4f9 (diff) |
[SCSI] stex: add new 6G controller support
This adds the support of a new SAS 6G controller (st_yel)
Signed-off-by: Ed Lin <ed.lin@promise.com>
Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
Diffstat (limited to 'drivers/scsi')
-rw-r--r-- | drivers/scsi/stex.c | 382 |
1 files changed, 351 insertions, 31 deletions
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c index 75f5079e569..4e2b2afc13f 100644 --- a/drivers/scsi/stex.c +++ b/drivers/scsi/stex.c | |||
@@ -55,6 +55,13 @@ enum { | |||
55 | OIS = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */ | 55 | OIS = 0x30, /* MU_OUTBOUND_INTERRUPT_STATUS */ |
56 | OIM = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */ | 56 | OIM = 0x3c, /* MU_OUTBOUND_INTERRUPT_MASK */ |
57 | 57 | ||
58 | YH2I_INT = 0x20, | ||
59 | YINT_EN = 0x34, | ||
60 | YI2H_INT = 0x9c, | ||
61 | YI2H_INT_C = 0xa0, | ||
62 | YH2I_REQ = 0xc0, | ||
63 | YH2I_REQ_HI = 0xc4, | ||
64 | |||
58 | /* MU register value */ | 65 | /* MU register value */ |
59 | MU_INBOUND_DOORBELL_HANDSHAKE = 1, | 66 | MU_INBOUND_DOORBELL_HANDSHAKE = 1, |
60 | MU_INBOUND_DOORBELL_REQHEADCHANGED = 2, | 67 | MU_INBOUND_DOORBELL_REQHEADCHANGED = 2, |
@@ -95,9 +102,14 @@ enum { | |||
95 | TASK_ATTRIBUTE_ORDERED = 0x2, | 102 | TASK_ATTRIBUTE_ORDERED = 0x2, |
96 | TASK_ATTRIBUTE_ACA = 0x4, | 103 | TASK_ATTRIBUTE_ACA = 0x4, |
97 | 104 | ||
105 | SS_STS_NORMAL = 0x80000000, | ||
106 | SS_STS_DONE = 0x40000000, | ||
107 | SS_STS_HANDSHAKE = 0x20000000, | ||
108 | |||
109 | SS_HEAD_HANDSHAKE = 0x80, | ||
110 | |||
98 | STEX_CDB_LENGTH = 16, | 111 | STEX_CDB_LENGTH = 16, |
99 | STATUS_VAR_LEN = 128, | 112 | STATUS_VAR_LEN = 128, |
100 | ST_MAX_SG = 32, | ||
101 | 113 | ||
102 | /* sg flags */ | 114 | /* sg flags */ |
103 | SG_CF_EOT = 0x80, /* end of table */ | 115 | SG_CF_EOT = 0x80, /* end of table */ |
@@ -111,6 +123,7 @@ enum { | |||
111 | st_vsc = 1, | 123 | st_vsc = 1, |
112 | st_yosemite = 2, | 124 | st_yosemite = 2, |
113 | st_seq = 3, | 125 | st_seq = 3, |
126 | st_yel = 4, | ||
114 | 127 | ||
115 | PASSTHRU_REQ_TYPE = 0x00000001, | 128 | PASSTHRU_REQ_TYPE = 0x00000001, |
116 | PASSTHRU_REQ_NO_WAKEUP = 0x00000100, | 129 | PASSTHRU_REQ_NO_WAKEUP = 0x00000100, |
@@ -151,12 +164,26 @@ struct st_sgitem { | |||
151 | __le64 addr; | 164 | __le64 addr; |
152 | }; | 165 | }; |
153 | 166 | ||
167 | struct st_ss_sgitem { | ||
168 | __le32 addr; | ||
169 | __le32 addr_hi; | ||
170 | __le32 count; | ||
171 | }; | ||
172 | |||
154 | struct st_sgtable { | 173 | struct st_sgtable { |
155 | __le16 sg_count; | 174 | __le16 sg_count; |
156 | __le16 max_sg_count; | 175 | __le16 max_sg_count; |
157 | __le32 sz_in_byte; | 176 | __le32 sz_in_byte; |
158 | }; | 177 | }; |
159 | 178 | ||
179 | struct st_msg_header { | ||
180 | __le64 handle; | ||
181 | u8 flag; | ||
182 | u8 channel; | ||
183 | __le16 timeout; | ||
184 | u32 reserved; | ||
185 | }; | ||
186 | |||
160 | struct handshake_frame { | 187 | struct handshake_frame { |
161 | __le64 rb_phy; /* request payload queue physical address */ | 188 | __le64 rb_phy; /* request payload queue physical address */ |
162 | __le16 req_sz; /* size of each request payload */ | 189 | __le16 req_sz; /* size of each request payload */ |
@@ -172,7 +199,8 @@ struct handshake_frame { | |||
172 | __le32 partner_ver_build; | 199 | __le32 partner_ver_build; |
173 | __le32 extra_offset; /* NEW */ | 200 | __le32 extra_offset; /* NEW */ |
174 | __le32 extra_size; /* NEW */ | 201 | __le32 extra_size; /* NEW */ |
175 | u32 reserved1[2]; | 202 | __le32 scratch_size; |
203 | u32 reserved1; | ||
176 | }; | 204 | }; |
177 | 205 | ||
178 | struct req_msg { | 206 | struct req_msg { |
@@ -263,6 +291,10 @@ struct st_hba { | |||
263 | struct Scsi_Host *host; | 291 | struct Scsi_Host *host; |
264 | struct pci_dev *pdev; | 292 | struct pci_dev *pdev; |
265 | 293 | ||
294 | struct req_msg * (*alloc_rq) (struct st_hba *); | ||
295 | int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *); | ||
296 | void (*send) (struct st_hba *, struct req_msg *, u16); | ||
297 | |||
266 | u32 req_head; | 298 | u32 req_head; |
267 | u32 req_tail; | 299 | u32 req_tail; |
268 | u32 status_head; | 300 | u32 status_head; |
@@ -272,6 +304,7 @@ struct st_hba { | |||
272 | void *copy_buffer; /* temp buffer for driver-handled commands */ | 304 | void *copy_buffer; /* temp buffer for driver-handled commands */ |
273 | struct st_ccb *ccb; | 305 | struct st_ccb *ccb; |
274 | struct st_ccb *wait_ccb; | 306 | struct st_ccb *wait_ccb; |
307 | __le32 *scratch; | ||
275 | 308 | ||
276 | unsigned int mu_status; | 309 | unsigned int mu_status; |
277 | unsigned int cardtype; | 310 | unsigned int cardtype; |
@@ -284,6 +317,9 @@ struct st_hba { | |||
284 | }; | 317 | }; |
285 | 318 | ||
286 | struct st_card_info { | 319 | struct st_card_info { |
320 | struct req_msg * (*alloc_rq) (struct st_hba *); | ||
321 | int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *); | ||
322 | void (*send) (struct st_hba *, struct req_msg *, u16); | ||
287 | unsigned int max_id; | 323 | unsigned int max_id; |
288 | unsigned int max_lun; | 324 | unsigned int max_lun; |
289 | unsigned int max_channel; | 325 | unsigned int max_channel; |
@@ -352,6 +388,12 @@ static struct req_msg *stex_alloc_req(struct st_hba *hba) | |||
352 | return req; | 388 | return req; |
353 | } | 389 | } |
354 | 390 | ||
391 | static struct req_msg *stex_ss_alloc_req(struct st_hba *hba) | ||
392 | { | ||
393 | return (struct req_msg *)(hba->dma_mem + | ||
394 | hba->req_head * hba->rq_size + sizeof(struct st_msg_header)); | ||
395 | } | ||
396 | |||
355 | static int stex_map_sg(struct st_hba *hba, | 397 | static int stex_map_sg(struct st_hba *hba, |
356 | struct req_msg *req, struct st_ccb *ccb) | 398 | struct req_msg *req, struct st_ccb *ccb) |
357 | { | 399 | { |
@@ -384,6 +426,39 @@ static int stex_map_sg(struct st_hba *hba, | |||
384 | return nseg; | 426 | return nseg; |
385 | } | 427 | } |
386 | 428 | ||
429 | static int stex_ss_map_sg(struct st_hba *hba, | ||
430 | struct req_msg *req, struct st_ccb *ccb) | ||
431 | { | ||
432 | struct scsi_cmnd *cmd; | ||
433 | struct scatterlist *sg; | ||
434 | struct st_sgtable *dst; | ||
435 | struct st_ss_sgitem *table; | ||
436 | int i, nseg; | ||
437 | |||
438 | cmd = ccb->cmd; | ||
439 | nseg = scsi_dma_map(cmd); | ||
440 | BUG_ON(nseg < 0); | ||
441 | if (nseg) { | ||
442 | dst = (struct st_sgtable *)req->variable; | ||
443 | |||
444 | ccb->sg_count = nseg; | ||
445 | dst->sg_count = cpu_to_le16((u16)nseg); | ||
446 | dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize); | ||
447 | dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd)); | ||
448 | |||
449 | table = (struct st_ss_sgitem *)(dst + 1); | ||
450 | scsi_for_each_sg(cmd, sg, nseg, i) { | ||
451 | table[i].count = cpu_to_le32((u32)sg_dma_len(sg)); | ||
452 | table[i].addr = | ||
453 | cpu_to_le32(sg_dma_address(sg) & 0xffffffff); | ||
454 | table[i].addr_hi = | ||
455 | cpu_to_le32((sg_dma_address(sg) >> 16) >> 16); | ||
456 | } | ||
457 | } | ||
458 | |||
459 | return nseg; | ||
460 | } | ||
461 | |||
387 | static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) | 462 | static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb) |
388 | { | 463 | { |
389 | struct st_frame *p; | 464 | struct st_frame *p; |
@@ -424,6 +499,37 @@ stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) | |||
424 | readl(hba->mmio_base + IDBL); /* flush */ | 499 | readl(hba->mmio_base + IDBL); /* flush */ |
425 | } | 500 | } |
426 | 501 | ||
502 | static void | ||
503 | stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag) | ||
504 | { | ||
505 | struct scsi_cmnd *cmd; | ||
506 | struct st_msg_header *msg_h; | ||
507 | dma_addr_t addr; | ||
508 | |||
509 | req->tag = cpu_to_le16(tag); | ||
510 | |||
511 | hba->ccb[tag].req = req; | ||
512 | hba->out_req_cnt++; | ||
513 | |||
514 | cmd = hba->ccb[tag].cmd; | ||
515 | msg_h = (struct st_msg_header *)req - 1; | ||
516 | if (likely(cmd)) { | ||
517 | msg_h->channel = (u8)cmd->device->channel; | ||
518 | msg_h->timeout = cpu_to_le16(cmd->request->timeout/HZ); | ||
519 | } | ||
520 | addr = hba->dma_handle + hba->req_head * hba->rq_size; | ||
521 | addr += (hba->ccb[tag].sg_count+4)/11; | ||
522 | msg_h->handle = cpu_to_le64(addr); | ||
523 | |||
524 | ++hba->req_head; | ||
525 | hba->req_head %= hba->rq_count+1; | ||
526 | |||
527 | writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI); | ||
528 | readl(hba->mmio_base + YH2I_REQ_HI); /* flush */ | ||
529 | writel(addr, hba->mmio_base + YH2I_REQ); | ||
530 | readl(hba->mmio_base + YH2I_REQ); /* flush */ | ||
531 | } | ||
532 | |||
427 | static int | 533 | static int |
428 | stex_slave_alloc(struct scsi_device *sdev) | 534 | stex_slave_alloc(struct scsi_device *sdev) |
429 | { | 535 | { |
@@ -504,7 +610,8 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | |||
504 | case INQUIRY: | 610 | case INQUIRY: |
505 | if (id != host->max_id - 1) | 611 | if (id != host->max_id - 1) |
506 | break; | 612 | break; |
507 | if (lun == 0 && (cmd->cmnd[1] & INQUIRY_EVPD) == 0) { | 613 | if (!lun && !cmd->device->channel && |
614 | (cmd->cmnd[1] & INQUIRY_EVPD) == 0) { | ||
508 | scsi_sg_copy_from_buffer(cmd, (void *)console_inq_page, | 615 | scsi_sg_copy_from_buffer(cmd, (void *)console_inq_page, |
509 | sizeof(console_inq_page)); | 616 | sizeof(console_inq_page)); |
510 | cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; | 617 | cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8; |
@@ -542,7 +649,7 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | |||
542 | if (unlikely(tag >= host->can_queue)) | 649 | if (unlikely(tag >= host->can_queue)) |
543 | return SCSI_MLQUEUE_HOST_BUSY; | 650 | return SCSI_MLQUEUE_HOST_BUSY; |
544 | 651 | ||
545 | req = stex_alloc_req(hba); | 652 | req = hba->alloc_rq(hba); |
546 | 653 | ||
547 | req->lun = lun; | 654 | req->lun = lun; |
548 | req->target = id; | 655 | req->target = id; |
@@ -561,10 +668,12 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) | |||
561 | hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE; | 668 | hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE; |
562 | hba->ccb[tag].sense_buffer = cmd->sense_buffer; | 669 | hba->ccb[tag].sense_buffer = cmd->sense_buffer; |
563 | 670 | ||
564 | if (cmd->sc_data_direction != DMA_NONE) | 671 | if (!hba->map_sg(hba, req, &hba->ccb[tag])) { |
565 | stex_map_sg(hba, req, &hba->ccb[tag]); | 672 | hba->ccb[tag].sg_count = 0; |
673 | memset(&req->variable[0], 0, 8); | ||
674 | } | ||
566 | 675 | ||
567 | stex_send_cmd(hba, req, tag); | 676 | hba->send(hba, req, tag); |
568 | return 0; | 677 | return 0; |
569 | } | 678 | } |
570 | 679 | ||
@@ -746,7 +855,103 @@ static irqreturn_t stex_intr(int irq, void *__hba) | |||
746 | return IRQ_RETVAL(handled); | 855 | return IRQ_RETVAL(handled); |
747 | } | 856 | } |
748 | 857 | ||
749 | static int stex_handshake(struct st_hba *hba) | 858 | static void stex_ss_mu_intr(struct st_hba *hba) |
859 | { | ||
860 | struct status_msg *resp; | ||
861 | struct st_ccb *ccb; | ||
862 | __le32 *scratch; | ||
863 | unsigned int size; | ||
864 | int count = 0; | ||
865 | u32 value; | ||
866 | u16 tag; | ||
867 | |||
868 | if (unlikely(hba->out_req_cnt <= 0 || | ||
869 | hba->mu_status == MU_STATE_RESETTING)) | ||
870 | return; | ||
871 | |||
872 | while (count < hba->sts_count) { | ||
873 | scratch = hba->scratch + hba->status_tail; | ||
874 | value = le32_to_cpu(*scratch); | ||
875 | if (unlikely(!(value & SS_STS_NORMAL))) | ||
876 | return; | ||
877 | |||
878 | resp = hba->status_buffer + hba->status_tail; | ||
879 | *scratch = 0; | ||
880 | ++count; | ||
881 | ++hba->status_tail; | ||
882 | hba->status_tail %= hba->sts_count+1; | ||
883 | |||
884 | tag = (u16)value; | ||
885 | if (unlikely(tag >= hba->host->can_queue)) { | ||
886 | printk(KERN_WARNING DRV_NAME | ||
887 | "(%s): invalid tag\n", pci_name(hba->pdev)); | ||
888 | continue; | ||
889 | } | ||
890 | |||
891 | hba->out_req_cnt--; | ||
892 | ccb = &hba->ccb[tag]; | ||
893 | if (unlikely(hba->wait_ccb == ccb)) | ||
894 | hba->wait_ccb = NULL; | ||
895 | if (unlikely(ccb->req == NULL)) { | ||
896 | printk(KERN_WARNING DRV_NAME | ||
897 | "(%s): lagging req\n", pci_name(hba->pdev)); | ||
898 | continue; | ||
899 | } | ||
900 | |||
901 | ccb->req = NULL; | ||
902 | if (likely(value & SS_STS_DONE)) { /* normal case */ | ||
903 | ccb->srb_status = SRB_STATUS_SUCCESS; | ||
904 | ccb->scsi_status = SAM_STAT_GOOD; | ||
905 | } else { | ||
906 | ccb->srb_status = resp->srb_status; | ||
907 | ccb->scsi_status = resp->scsi_status; | ||
908 | size = resp->payload_sz * sizeof(u32); | ||
909 | if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN || | ||
910 | size > sizeof(*resp))) { | ||
911 | printk(KERN_WARNING DRV_NAME | ||
912 | "(%s): bad status size\n", | ||
913 | pci_name(hba->pdev)); | ||
914 | } else { | ||
915 | size -= sizeof(*resp) - STATUS_VAR_LEN; | ||
916 | if (size) | ||
917 | stex_copy_data(ccb, resp, size); | ||
918 | } | ||
919 | if (likely(ccb->cmd != NULL)) | ||
920 | stex_check_cmd(hba, ccb, resp); | ||
921 | } | ||
922 | |||
923 | if (likely(ccb->cmd != NULL)) { | ||
924 | scsi_dma_unmap(ccb->cmd); | ||
925 | stex_scsi_done(ccb); | ||
926 | } else | ||
927 | ccb->req_type = 0; | ||
928 | } | ||
929 | } | ||
930 | |||
931 | static irqreturn_t stex_ss_intr(int irq, void *__hba) | ||
932 | { | ||
933 | struct st_hba *hba = __hba; | ||
934 | void __iomem *base = hba->mmio_base; | ||
935 | u32 data; | ||
936 | unsigned long flags; | ||
937 | int handled = 0; | ||
938 | |||
939 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
940 | |||
941 | data = readl(base + YI2H_INT); | ||
942 | if (data && data != 0xffffffff) { | ||
943 | /* clear the interrupt */ | ||
944 | writel(data, base + YI2H_INT_C); | ||
945 | stex_ss_mu_intr(hba); | ||
946 | handled = 1; | ||
947 | } | ||
948 | |||
949 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
950 | |||
951 | return IRQ_RETVAL(handled); | ||
952 | } | ||
953 | |||
954 | static int stex_common_handshake(struct st_hba *hba) | ||
750 | { | 955 | { |
751 | void __iomem *base = hba->mmio_base; | 956 | void __iomem *base = hba->mmio_base; |
752 | struct handshake_frame *h; | 957 | struct handshake_frame *h; |
@@ -827,10 +1032,79 @@ static int stex_handshake(struct st_hba *hba) | |||
827 | readl(base + IMR1); | 1032 | readl(base + IMR1); |
828 | writel(0, base + OMR1); | 1033 | writel(0, base + OMR1); |
829 | readl(base + OMR1); /* flush */ | 1034 | readl(base + OMR1); /* flush */ |
830 | hba->mu_status = MU_STATE_STARTED; | ||
831 | return 0; | 1035 | return 0; |
832 | } | 1036 | } |
833 | 1037 | ||
1038 | static int stex_ss_handshake(struct st_hba *hba) | ||
1039 | { | ||
1040 | void __iomem *base = hba->mmio_base; | ||
1041 | struct st_msg_header *msg_h; | ||
1042 | struct handshake_frame *h; | ||
1043 | __le32 *scratch = hba->scratch; | ||
1044 | u32 data; | ||
1045 | unsigned long before; | ||
1046 | int ret = 0; | ||
1047 | |||
1048 | h = (struct handshake_frame *)(hba->alloc_rq(hba)); | ||
1049 | msg_h = (struct st_msg_header *)h - 1; | ||
1050 | msg_h->handle = cpu_to_le64(hba->dma_handle); | ||
1051 | msg_h->flag = SS_HEAD_HANDSHAKE; | ||
1052 | |||
1053 | h->rb_phy = cpu_to_le64(hba->dma_handle); | ||
1054 | h->req_sz = cpu_to_le16(hba->rq_size); | ||
1055 | h->req_cnt = cpu_to_le16(hba->rq_count+1); | ||
1056 | h->status_sz = cpu_to_le16(sizeof(struct status_msg)); | ||
1057 | h->status_cnt = cpu_to_le16(hba->sts_count+1); | ||
1058 | stex_gettime(&h->hosttime); | ||
1059 | h->partner_type = HMU_PARTNER_TYPE; | ||
1060 | h->extra_offset = h->extra_size = 0; | ||
1061 | h->scratch_size = cpu_to_le32((hba->sts_count+1)*sizeof(u32)); | ||
1062 | |||
1063 | data = readl(base + YINT_EN); | ||
1064 | data &= ~4; | ||
1065 | writel(data, base + YINT_EN); | ||
1066 | writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI); | ||
1067 | writel(hba->dma_handle, base + YH2I_REQ); | ||
1068 | |||
1069 | scratch = hba->scratch; | ||
1070 | before = jiffies; | ||
1071 | while (!(le32_to_cpu(*scratch) & SS_STS_HANDSHAKE)) { | ||
1072 | if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) { | ||
1073 | printk(KERN_ERR DRV_NAME | ||
1074 | "(%s): no signature after handshake frame\n", | ||
1075 | pci_name(hba->pdev)); | ||
1076 | ret = -1; | ||
1077 | break; | ||
1078 | } | ||
1079 | rmb(); | ||
1080 | msleep(1); | ||
1081 | } | ||
1082 | |||
1083 | *scratch = 0; | ||
1084 | msg_h->flag = 0; | ||
1085 | return ret; | ||
1086 | } | ||
1087 | |||
1088 | static int stex_handshake(struct st_hba *hba) | ||
1089 | { | ||
1090 | int err; | ||
1091 | unsigned long flags; | ||
1092 | |||
1093 | err = (hba->cardtype == st_yel) ? | ||
1094 | stex_ss_handshake(hba) : stex_common_handshake(hba); | ||
1095 | if (err == 0) { | ||
1096 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
1097 | hba->req_head = 0; | ||
1098 | hba->req_tail = 0; | ||
1099 | hba->status_head = 0; | ||
1100 | hba->status_tail = 0; | ||
1101 | hba->out_req_cnt = 0; | ||
1102 | hba->mu_status = MU_STATE_STARTED; | ||
1103 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
1104 | } | ||
1105 | return err; | ||
1106 | } | ||
1107 | |||
834 | static int stex_abort(struct scsi_cmnd *cmd) | 1108 | static int stex_abort(struct scsi_cmnd *cmd) |
835 | { | 1109 | { |
836 | struct Scsi_Host *host = cmd->device->host; | 1110 | struct Scsi_Host *host = cmd->device->host; |
@@ -859,15 +1133,23 @@ static int stex_abort(struct scsi_cmnd *cmd) | |||
859 | goto out; | 1133 | goto out; |
860 | } | 1134 | } |
861 | 1135 | ||
862 | data = readl(base + ODBL); | 1136 | if (hba->cardtype == st_yel) { |
863 | if (data == 0 || data == 0xffffffff) | 1137 | data = readl(base + YI2H_INT); |
864 | goto fail_out; | 1138 | if (data == 0 || data == 0xffffffff) |
1139 | goto fail_out; | ||
865 | 1140 | ||
866 | writel(data, base + ODBL); | 1141 | writel(data, base + YI2H_INT_C); |
867 | readl(base + ODBL); /* flush */ | 1142 | stex_ss_mu_intr(hba); |
1143 | } else { | ||
1144 | data = readl(base + ODBL); | ||
1145 | if (data == 0 || data == 0xffffffff) | ||
1146 | goto fail_out; | ||
868 | 1147 | ||
869 | stex_mu_intr(hba, data); | 1148 | writel(data, base + ODBL); |
1149 | readl(base + ODBL); /* flush */ | ||
870 | 1150 | ||
1151 | stex_mu_intr(hba, data); | ||
1152 | } | ||
871 | if (hba->wait_ccb == NULL) { | 1153 | if (hba->wait_ccb == NULL) { |
872 | printk(KERN_WARNING DRV_NAME | 1154 | printk(KERN_WARNING DRV_NAME |
873 | "(%s): lost interrupt\n", pci_name(hba->pdev)); | 1155 | "(%s): lost interrupt\n", pci_name(hba->pdev)); |
@@ -947,13 +1229,6 @@ static int stex_reset(struct scsi_cmnd *cmd) | |||
947 | pci_name(hba->pdev)); | 1229 | pci_name(hba->pdev)); |
948 | return FAILED; | 1230 | return FAILED; |
949 | } | 1231 | } |
950 | spin_lock_irqsave(hba->host->host_lock, flags); | ||
951 | hba->req_head = 0; | ||
952 | hba->req_tail = 0; | ||
953 | hba->status_head = 0; | ||
954 | hba->status_tail = 0; | ||
955 | hba->out_req_cnt = 0; | ||
956 | spin_unlock_irqrestore(hba->host->host_lock, flags); | ||
957 | return SUCCESS; | 1232 | return SUCCESS; |
958 | } | 1233 | } |
959 | 1234 | ||
@@ -1021,7 +1296,6 @@ static struct scsi_host_template driver_template = { | |||
1021 | .eh_abort_handler = stex_abort, | 1296 | .eh_abort_handler = stex_abort, |
1022 | .eh_host_reset_handler = stex_reset, | 1297 | .eh_host_reset_handler = stex_reset, |
1023 | .this_id = -1, | 1298 | .this_id = -1, |
1024 | .sg_tablesize = ST_MAX_SG, | ||
1025 | }; | 1299 | }; |
1026 | 1300 | ||
1027 | static struct pci_device_id stex_pci_tbl[] = { | 1301 | static struct pci_device_id stex_pci_tbl[] = { |
@@ -1039,10 +1313,14 @@ static struct pci_device_id stex_pci_tbl[] = { | |||
1039 | { 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc }, | 1313 | { 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc }, |
1040 | 1314 | ||
1041 | /* st_yosemite */ | 1315 | /* st_yosemite */ |
1042 | { 0x105a, 0x8650, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yosemite }, | 1316 | { 0x105a, 0x8650, 0x105a, PCI_ANY_ID, 0, 0, st_yosemite }, |
1043 | 1317 | ||
1044 | /* st_seq */ | 1318 | /* st_seq */ |
1045 | { 0x105a, 0x3360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_seq }, | 1319 | { 0x105a, 0x3360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_seq }, |
1320 | |||
1321 | /* st_yel */ | ||
1322 | { 0x105a, 0x8650, 0x1033, PCI_ANY_ID, 0, 0, st_yel }, | ||
1323 | { 0x105a, 0x8760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yel }, | ||
1046 | { } /* terminate list */ | 1324 | { } /* terminate list */ |
1047 | }; | 1325 | }; |
1048 | 1326 | ||
@@ -1055,6 +1333,9 @@ static struct st_card_info stex_card_info[] = { | |||
1055 | .rq_count = 32, | 1333 | .rq_count = 32, |
1056 | .rq_size = 1048, | 1334 | .rq_size = 1048, |
1057 | .sts_count = 32, | 1335 | .sts_count = 32, |
1336 | .alloc_rq = stex_alloc_req, | ||
1337 | .map_sg = stex_map_sg, | ||
1338 | .send = stex_send_cmd, | ||
1058 | }, | 1339 | }, |
1059 | 1340 | ||
1060 | /* st_vsc */ | 1341 | /* st_vsc */ |
@@ -1065,6 +1346,9 @@ static struct st_card_info stex_card_info[] = { | |||
1065 | .rq_count = 32, | 1346 | .rq_count = 32, |
1066 | .rq_size = 1048, | 1347 | .rq_size = 1048, |
1067 | .sts_count = 32, | 1348 | .sts_count = 32, |
1349 | .alloc_rq = stex_alloc_req, | ||
1350 | .map_sg = stex_map_sg, | ||
1351 | .send = stex_send_cmd, | ||
1068 | }, | 1352 | }, |
1069 | 1353 | ||
1070 | /* st_yosemite */ | 1354 | /* st_yosemite */ |
@@ -1075,6 +1359,9 @@ static struct st_card_info stex_card_info[] = { | |||
1075 | .rq_count = 256, | 1359 | .rq_count = 256, |
1076 | .rq_size = 1048, | 1360 | .rq_size = 1048, |
1077 | .sts_count = 256, | 1361 | .sts_count = 256, |
1362 | .alloc_rq = stex_alloc_req, | ||
1363 | .map_sg = stex_map_sg, | ||
1364 | .send = stex_send_cmd, | ||
1078 | }, | 1365 | }, |
1079 | 1366 | ||
1080 | /* st_seq */ | 1367 | /* st_seq */ |
@@ -1085,6 +1372,22 @@ static struct st_card_info stex_card_info[] = { | |||
1085 | .rq_count = 32, | 1372 | .rq_count = 32, |
1086 | .rq_size = 1048, | 1373 | .rq_size = 1048, |
1087 | .sts_count = 32, | 1374 | .sts_count = 32, |
1375 | .alloc_rq = stex_alloc_req, | ||
1376 | .map_sg = stex_map_sg, | ||
1377 | .send = stex_send_cmd, | ||
1378 | }, | ||
1379 | |||
1380 | /* st_yel */ | ||
1381 | { | ||
1382 | .max_id = 129, | ||
1383 | .max_lun = 256, | ||
1384 | .max_channel = 3, | ||
1385 | .rq_count = 801, | ||
1386 | .rq_size = 512, | ||
1387 | .sts_count = 801, | ||
1388 | .alloc_rq = stex_ss_alloc_req, | ||
1389 | .map_sg = stex_ss_map_sg, | ||
1390 | .send = stex_ss_send_cmd, | ||
1088 | }, | 1391 | }, |
1089 | }; | 1392 | }; |
1090 | 1393 | ||
@@ -1117,7 +1420,8 @@ static int stex_request_irq(struct st_hba *hba) | |||
1117 | } else | 1420 | } else |
1118 | hba->msi_enabled = 0; | 1421 | hba->msi_enabled = 0; |
1119 | 1422 | ||
1120 | status = request_irq(pdev->irq, stex_intr, IRQF_SHARED, DRV_NAME, hba); | 1423 | status = request_irq(pdev->irq, hba->cardtype == st_yel ? |
1424 | stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba); | ||
1121 | 1425 | ||
1122 | if (status != 0) { | 1426 | if (status != 0) { |
1123 | if (hba->msi_enabled) | 1427 | if (hba->msi_enabled) |
@@ -1141,7 +1445,7 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1141 | struct st_hba *hba; | 1445 | struct st_hba *hba; |
1142 | struct Scsi_Host *host; | 1446 | struct Scsi_Host *host; |
1143 | const struct st_card_info *ci = NULL; | 1447 | const struct st_card_info *ci = NULL; |
1144 | u32 sts_offset, cp_offset; | 1448 | u32 sts_offset, cp_offset, scratch_offset; |
1145 | int err; | 1449 | int err; |
1146 | 1450 | ||
1147 | err = pci_enable_device(pdev); | 1451 | err = pci_enable_device(pdev); |
@@ -1186,7 +1490,9 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1186 | 1490 | ||
1187 | hba->cardtype = (unsigned int) id->driver_data; | 1491 | hba->cardtype = (unsigned int) id->driver_data; |
1188 | ci = &stex_card_info[hba->cardtype]; | 1492 | ci = &stex_card_info[hba->cardtype]; |
1189 | sts_offset = (ci->rq_count+1) * ci->rq_size; | 1493 | sts_offset = scratch_offset = (ci->rq_count+1) * ci->rq_size; |
1494 | if (hba->cardtype == st_yel) | ||
1495 | sts_offset += (ci->sts_count+1) * sizeof(u32); | ||
1190 | cp_offset = sts_offset + (ci->sts_count+1) * sizeof(struct status_msg); | 1496 | cp_offset = sts_offset + (ci->sts_count+1) * sizeof(struct status_msg); |
1191 | hba->dma_size = cp_offset + sizeof(struct st_frame); | 1497 | hba->dma_size = cp_offset + sizeof(struct st_frame); |
1192 | if (hba->cardtype == st_seq || | 1498 | if (hba->cardtype == st_seq || |
@@ -1211,13 +1517,22 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id) | |||
1211 | goto out_pci_free; | 1517 | goto out_pci_free; |
1212 | } | 1518 | } |
1213 | 1519 | ||
1520 | if (hba->cardtype == st_yel) | ||
1521 | hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset); | ||
1214 | hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset); | 1522 | hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset); |
1215 | hba->copy_buffer = hba->dma_mem + cp_offset; | 1523 | hba->copy_buffer = hba->dma_mem + cp_offset; |
1216 | hba->rq_count = ci->rq_count; | 1524 | hba->rq_count = ci->rq_count; |
1217 | hba->rq_size = ci->rq_size; | 1525 | hba->rq_size = ci->rq_size; |
1218 | hba->sts_count = ci->sts_count; | 1526 | hba->sts_count = ci->sts_count; |
1527 | hba->alloc_rq = ci->alloc_rq; | ||
1528 | hba->map_sg = ci->map_sg; | ||
1529 | hba->send = ci->send; | ||
1219 | hba->mu_status = MU_STATE_STARTING; | 1530 | hba->mu_status = MU_STATE_STARTING; |
1220 | 1531 | ||
1532 | if (hba->cardtype == st_yel) | ||
1533 | host->sg_tablesize = 38; | ||
1534 | else | ||
1535 | host->sg_tablesize = 32; | ||
1221 | host->can_queue = ci->rq_count; | 1536 | host->can_queue = ci->rq_count; |
1222 | host->cmd_per_lun = ci->rq_count; | 1537 | host->cmd_per_lun = ci->rq_count; |
1223 | host->max_id = ci->max_id; | 1538 | host->max_id = ci->max_id; |
@@ -1282,15 +1597,20 @@ out_disable: | |||
1282 | static void stex_hba_stop(struct st_hba *hba) | 1597 | static void stex_hba_stop(struct st_hba *hba) |
1283 | { | 1598 | { |
1284 | struct req_msg *req; | 1599 | struct req_msg *req; |
1600 | struct st_msg_header *msg_h; | ||
1285 | unsigned long flags; | 1601 | unsigned long flags; |
1286 | unsigned long before; | 1602 | unsigned long before; |
1287 | u16 tag = 0; | 1603 | u16 tag = 0; |
1288 | 1604 | ||
1289 | spin_lock_irqsave(hba->host->host_lock, flags); | 1605 | spin_lock_irqsave(hba->host->host_lock, flags); |
1290 | req = stex_alloc_req(hba); | 1606 | req = hba->alloc_rq(hba); |
1291 | memset(req->cdb, 0, STEX_CDB_LENGTH); | 1607 | if (hba->cardtype == st_yel) { |
1608 | msg_h = (struct st_msg_header *)req - 1; | ||
1609 | memset(msg_h, 0, hba->rq_size); | ||
1610 | } else | ||
1611 | memset(req, 0, hba->rq_size); | ||
1292 | 1612 | ||
1293 | if (hba->cardtype == st_yosemite) { | 1613 | if (hba->cardtype == st_yosemite || hba->cardtype == st_yel) { |
1294 | req->cdb[0] = MGT_CMD; | 1614 | req->cdb[0] = MGT_CMD; |
1295 | req->cdb[1] = MGT_CMD_SIGNATURE; | 1615 | req->cdb[1] = MGT_CMD_SIGNATURE; |
1296 | req->cdb[2] = CTLR_CONFIG_CMD; | 1616 | req->cdb[2] = CTLR_CONFIG_CMD; |
@@ -1307,7 +1627,7 @@ static void stex_hba_stop(struct st_hba *hba) | |||
1307 | hba->ccb[tag].sense_buffer = NULL; | 1627 | hba->ccb[tag].sense_buffer = NULL; |
1308 | hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE; | 1628 | hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE; |
1309 | 1629 | ||
1310 | stex_send_cmd(hba, req, tag); | 1630 | hba->send(hba, req, tag); |
1311 | spin_unlock_irqrestore(hba->host->host_lock, flags); | 1631 | spin_unlock_irqrestore(hba->host->host_lock, flags); |
1312 | 1632 | ||
1313 | before = jiffies; | 1633 | before = jiffies; |