aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/stex.c
diff options
context:
space:
mode:
authorEd Lin <ed.lin@promise.com>2006-09-01 02:31:51 -0400
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-09-02 14:57:02 -0400
commitcf355883f506051a8ce3ac4539752829320b6c8c (patch)
tree2835cac08b85aa9778482938cf4fdf2fbd7d1ebd /drivers/scsi/stex.c
parent5a25ba1677ab8d63890016a8c1bca68a3e0fbc7d (diff)
[SCSI] stex: add shared tags from block
Use block shared tags entirely within the driver. In the case of shutdown, assume that there are no other outstanding commands, so tag 0 is fine. Signed-off-by: Ed Lin <ed.lin@promise.com> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/scsi/stex.c')
-rw-r--r--drivers/scsi/stex.c177
1 files changed, 57 insertions, 120 deletions
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index fd093302bf1a..15fb99f224ee 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -34,6 +34,7 @@
34#include <scsi/scsi_device.h> 34#include <scsi/scsi_device.h>
35#include <scsi/scsi_cmnd.h> 35#include <scsi/scsi_cmnd.h>
36#include <scsi/scsi_host.h> 36#include <scsi/scsi_host.h>
37#include <scsi/scsi_tcq.h>
37 38
38#define DRV_NAME "stex" 39#define DRV_NAME "stex"
39#define ST_DRIVER_VERSION "2.9.0.13" 40#define ST_DRIVER_VERSION "2.9.0.13"
@@ -95,7 +96,6 @@ enum {
95 96
96 /* request count, etc. */ 97 /* request count, etc. */
97 MU_MAX_REQUEST = 32, 98 MU_MAX_REQUEST = 32,
98 TAG_BITMAP_LENGTH = MU_MAX_REQUEST,
99 99
100 /* one message wasted, use MU_MAX_REQUEST+1 100 /* one message wasted, use MU_MAX_REQUEST+1
101 to handle MU_MAX_REQUEST messages */ 101 to handle MU_MAX_REQUEST messages */
@@ -265,7 +265,6 @@ struct st_hba {
265 struct Scsi_Host *host; 265 struct Scsi_Host *host;
266 struct pci_dev *pdev; 266 struct pci_dev *pdev;
267 267
268 u32 tag;
269 u32 req_head; 268 u32 req_head;
270 u32 req_tail; 269 u32 req_tail;
271 u32 status_head; 270 u32 status_head;
@@ -309,40 +308,6 @@ static void stex_gettime(__le32 *time)
309 *(time + 1) = cpu_to_le32((tv.tv_sec >> 16) >> 16); 308 *(time + 1) = cpu_to_le32((tv.tv_sec >> 16) >> 16);
310} 309}
311 310
312static u16 __stex_alloc_tag(unsigned long *bitmap)
313{
314 int i;
315 i = find_first_zero_bit(bitmap, TAG_BITMAP_LENGTH);
316 if (i < TAG_BITMAP_LENGTH)
317 __set_bit(i, bitmap);
318 return (u16)i;
319}
320
321static u16 stex_alloc_tag(struct st_hba *hba, unsigned long *bitmap)
322{
323 unsigned long flags;
324 u16 tag;
325
326 spin_lock_irqsave(hba->host->host_lock, flags);
327 tag = __stex_alloc_tag(bitmap);
328 spin_unlock_irqrestore(hba->host->host_lock, flags);
329 return tag;
330}
331
332static void __stex_free_tag(unsigned long *bitmap, u16 tag)
333{
334 __clear_bit((int)tag, bitmap);
335}
336
337static void stex_free_tag(struct st_hba *hba, unsigned long *bitmap, u16 tag)
338{
339 unsigned long flags;
340
341 spin_lock_irqsave(hba->host->host_lock, flags);
342 __stex_free_tag(bitmap, tag);
343 spin_unlock_irqrestore(hba->host->host_lock, flags);
344}
345
346static struct status_msg *stex_get_status(struct st_hba *hba) 311static struct status_msg *stex_get_status(struct st_hba *hba)
347{ 312{
348 struct status_msg *status = 313 struct status_msg *status =
@@ -535,57 +500,31 @@ stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
535} 500}
536 501
537static int 502static int
503stex_slave_alloc(struct scsi_device *sdev)
504{
505 /* Cheat: usually extracted from Inquiry data */
506 sdev->tagged_supported = 1;
507
508 scsi_activate_tcq(sdev, sdev->host->can_queue);
509
510 return 0;
511}
512
513static int
538stex_slave_config(struct scsi_device *sdev) 514stex_slave_config(struct scsi_device *sdev)
539{ 515{
540 sdev->use_10_for_rw = 1; 516 sdev->use_10_for_rw = 1;
541 sdev->use_10_for_ms = 1; 517 sdev->use_10_for_ms = 1;
542 sdev->timeout = 60 * HZ; 518 sdev->timeout = 60 * HZ;
519 sdev->tagged_supported = 1;
520
543 return 0; 521 return 0;
544} 522}
545 523
546static void 524static void
547stex_slave_destroy(struct scsi_device *sdev) 525stex_slave_destroy(struct scsi_device *sdev)
548{ 526{
549 struct st_hba *hba = (struct st_hba *) sdev->host->hostdata; 527 scsi_deactivate_tcq(sdev, 1);
550 struct req_msg *req;
551 unsigned long flags;
552 unsigned long before;
553 u16 tag;
554
555 if (sdev->type != TYPE_DISK)
556 return;
557
558 before = jiffies;
559 while ((tag = stex_alloc_tag(hba, (unsigned long *)&hba->tag))
560 == TAG_BITMAP_LENGTH) {
561 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ))
562 return;
563 msleep(10);
564 }
565
566 spin_lock_irqsave(hba->host->host_lock, flags);
567 req = stex_alloc_req(hba);
568 memset(req->cdb, 0, STEX_CDB_LENGTH);
569
570 req->target = sdev->id;
571 req->lun = sdev->channel; /* firmware lun issue work around */
572 req->cdb[0] = SYNCHRONIZE_CACHE;
573
574 hba->ccb[tag].cmd = NULL;
575 hba->ccb[tag].sg_count = 0;
576 hba->ccb[tag].sense_bufflen = 0;
577 hba->ccb[tag].sense_buffer = NULL;
578 hba->ccb[tag].req_type |= PASSTHRU_REQ_TYPE;
579
580 stex_send_cmd(hba, req, tag);
581 spin_unlock_irqrestore(hba->host->host_lock, flags);
582
583 wait_event_timeout(hba->waitq,
584 !(hba->ccb[tag].req_type), ST_INTERNAL_TIMEOUT * HZ);
585 if (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE)
586 return;
587
588 stex_free_tag(hba, (unsigned long *)&hba->tag, tag);
589} 528}
590 529
591static int 530static int
@@ -650,8 +589,9 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
650 589
651 cmd->scsi_done = done; 590 cmd->scsi_done = done;
652 591
653 if (unlikely((tag = __stex_alloc_tag((unsigned long *)&hba->tag)) 592 tag = cmd->request->tag;
654 == TAG_BITMAP_LENGTH)) 593
594 if (unlikely(tag >= host->can_queue))
655 return SCSI_MLQUEUE_HOST_BUSY; 595 return SCSI_MLQUEUE_HOST_BUSY;
656 596
657 req = stex_alloc_req(hba); 597 req = stex_alloc_req(hba);
@@ -771,26 +711,18 @@ static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
771 while (hba->status_tail != hba->status_head) { 711 while (hba->status_tail != hba->status_head) {
772 resp = stex_get_status(hba); 712 resp = stex_get_status(hba);
773 tag = le16_to_cpu(resp->tag); 713 tag = le16_to_cpu(resp->tag);
774 if (unlikely(tag >= TAG_BITMAP_LENGTH)) { 714 if (unlikely(tag >= hba->host->can_queue)) {
775 printk(KERN_WARNING DRV_NAME 715 printk(KERN_WARNING DRV_NAME
776 "(%s): invalid tag\n", pci_name(hba->pdev)); 716 "(%s): invalid tag\n", pci_name(hba->pdev));
777 continue; 717 continue;
778 } 718 }
779 if (unlikely((hba->tag & (1 << tag)) == 0)) {
780 printk(KERN_WARNING DRV_NAME
781 "(%s): null tag\n", pci_name(hba->pdev));
782 continue;
783 }
784 719
785 hba->out_req_cnt--;
786 ccb = &hba->ccb[tag]; 720 ccb = &hba->ccb[tag];
787 if (hba->wait_ccb == ccb) 721 if (hba->wait_ccb == ccb)
788 hba->wait_ccb = NULL; 722 hba->wait_ccb = NULL;
789 if (unlikely(ccb->req == NULL)) { 723 if (unlikely(ccb->req == NULL)) {
790 printk(KERN_WARNING DRV_NAME 724 printk(KERN_WARNING DRV_NAME
791 "(%s): lagging req\n", pci_name(hba->pdev)); 725 "(%s): lagging req\n", pci_name(hba->pdev));
792 __stex_free_tag((unsigned long *)&hba->tag, tag);
793 stex_unmap_sg(hba, ccb->cmd); /* ??? */
794 continue; 726 continue;
795 } 727 }
796 728
@@ -808,7 +740,15 @@ static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
808 ccb->srb_status = resp->srb_status; 740 ccb->srb_status = resp->srb_status;
809 ccb->scsi_status = resp->scsi_status; 741 ccb->scsi_status = resp->scsi_status;
810 742
811 if (ccb->req_type & PASSTHRU_REQ_TYPE) { 743 if (likely(ccb->cmd != NULL)) {
744 if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD &&
745 ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
746 stex_controller_info(hba, ccb);
747 stex_unmap_sg(hba, ccb->cmd);
748 stex_scsi_done(ccb);
749 hba->out_req_cnt--;
750 } else if (ccb->req_type & PASSTHRU_REQ_TYPE) {
751 hba->out_req_cnt--;
812 if (ccb->req_type & PASSTHRU_REQ_NO_WAKEUP) { 752 if (ccb->req_type & PASSTHRU_REQ_NO_WAKEUP) {
813 ccb->req_type = 0; 753 ccb->req_type = 0;
814 continue; 754 continue;
@@ -816,14 +756,7 @@ static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
816 ccb->req_type = 0; 756 ccb->req_type = 0;
817 if (waitqueue_active(&hba->waitq)) 757 if (waitqueue_active(&hba->waitq))
818 wake_up(&hba->waitq); 758 wake_up(&hba->waitq);
819 continue;
820 } 759 }
821 if (ccb->cmd->cmnd[0] == PASSTHRU_CMD &&
822 ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER)
823 stex_controller_info(hba, ccb);
824 __stex_free_tag((unsigned long *)&hba->tag, tag);
825 stex_unmap_sg(hba, ccb->cmd);
826 stex_scsi_done(ccb);
827 } 760 }
828 761
829update_status: 762update_status:
@@ -933,21 +866,24 @@ static int stex_abort(struct scsi_cmnd *cmd)
933{ 866{
934 struct Scsi_Host *host = cmd->device->host; 867 struct Scsi_Host *host = cmd->device->host;
935 struct st_hba *hba = (struct st_hba *)host->hostdata; 868 struct st_hba *hba = (struct st_hba *)host->hostdata;
936 u16 tag; 869 u16 tag = cmd->request->tag;
937 void __iomem *base; 870 void __iomem *base;
938 u32 data; 871 u32 data;
939 int result = SUCCESS; 872 int result = SUCCESS;
940 unsigned long flags; 873 unsigned long flags;
941 base = hba->mmio_base; 874 base = hba->mmio_base;
942 spin_lock_irqsave(host->host_lock, flags); 875 spin_lock_irqsave(host->host_lock, flags);
943 876 if (tag < host->can_queue && hba->ccb[tag].cmd == cmd)
944 for (tag = 0; tag < MU_MAX_REQUEST; tag++) 877 hba->wait_ccb = &hba->ccb[tag];
945 if (hba->ccb[tag].cmd == cmd && (hba->tag & (1 << tag))) { 878 else {
946 hba->wait_ccb = &(hba->ccb[tag]); 879 for (tag = 0; tag < host->can_queue; tag++)
947 break; 880 if (hba->ccb[tag].cmd == cmd) {
948 } 881 hba->wait_ccb = &hba->ccb[tag];
949 if (tag >= MU_MAX_REQUEST) 882 break;
950 goto out; 883 }
884 if (tag >= host->can_queue)
885 goto out;
886 }
951 887
952 data = readl(base + ODBL); 888 data = readl(base + ODBL);
953 if (data == 0 || data == 0xffffffff) 889 if (data == 0 || data == 0xffffffff)
@@ -965,6 +901,7 @@ static int stex_abort(struct scsi_cmnd *cmd)
965 } 901 }
966 902
967fail_out: 903fail_out:
904 stex_unmap_sg(hba, cmd);
968 hba->wait_ccb->req = NULL; /* nullify the req's future return */ 905 hba->wait_ccb->req = NULL; /* nullify the req's future return */
969 hba->wait_ccb = NULL; 906 hba->wait_ccb = NULL;
970 result = FAILED; 907 result = FAILED;
@@ -1025,7 +962,6 @@ static int stex_reset(struct scsi_cmnd *cmd)
1025 return FAILED; 962 return FAILED;
1026 } 963 }
1027 spin_lock_irqsave(hba->host->host_lock, flags); 964 spin_lock_irqsave(hba->host->host_lock, flags);
1028 hba->tag = 0;
1029 hba->req_head = 0; 965 hba->req_head = 0;
1030 hba->req_tail = 0; 966 hba->req_tail = 0;
1031 hba->status_head = 0; 967 hba->status_head = 0;
@@ -1061,6 +997,7 @@ static struct scsi_host_template driver_template = {
1061 .proc_name = DRV_NAME, 997 .proc_name = DRV_NAME,
1062 .bios_param = stex_biosparam, 998 .bios_param = stex_biosparam,
1063 .queuecommand = stex_queuecommand, 999 .queuecommand = stex_queuecommand,
1000 .slave_alloc = stex_slave_alloc,
1064 .slave_configure = stex_slave_config, 1001 .slave_configure = stex_slave_config,
1065 .slave_destroy = stex_slave_destroy, 1002 .slave_destroy = stex_slave_destroy,
1066 .eh_abort_handler = stex_abort, 1003 .eh_abort_handler = stex_abort,
@@ -1171,6 +1108,14 @@ stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1171 if (err) 1108 if (err)
1172 goto out_free_irq; 1109 goto out_free_irq;
1173 1110
1111 scsi_init_shared_tag_map(host, ST_CAN_QUEUE);
1112 if (host->bqt == NULL) {
1113 err = -ENOMEM;
1114 printk(KERN_ERR DRV_NAME "(%s): init shared queue failed\n",
1115 pci_name(pdev));
1116 goto out_free_irq;
1117 }
1118
1174 pci_set_drvdata(pdev, hba); 1119 pci_set_drvdata(pdev, hba);
1175 1120
1176 err = scsi_add_host(host, &pdev->dev); 1121 err = scsi_add_host(host, &pdev->dev);
@@ -1206,15 +1151,7 @@ static void stex_hba_stop(struct st_hba *hba)
1206 struct req_msg *req; 1151 struct req_msg *req;
1207 unsigned long flags; 1152 unsigned long flags;
1208 unsigned long before; 1153 unsigned long before;
1209 u16 tag; 1154 u16 tag = 0;
1210
1211 before = jiffies;
1212 while ((tag = stex_alloc_tag(hba, (unsigned long *)&hba->tag))
1213 == TAG_BITMAP_LENGTH) {
1214 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ))
1215 return;
1216 msleep(10);
1217 }
1218 1155
1219 spin_lock_irqsave(hba->host->host_lock, flags); 1156 spin_lock_irqsave(hba->host->host_lock, flags);
1220 req = stex_alloc_req(hba); 1157 req = stex_alloc_req(hba);
@@ -1233,12 +1170,12 @@ static void stex_hba_stop(struct st_hba *hba)
1233 stex_send_cmd(hba, req, tag); 1170 stex_send_cmd(hba, req, tag);
1234 spin_unlock_irqrestore(hba->host->host_lock, flags); 1171 spin_unlock_irqrestore(hba->host->host_lock, flags);
1235 1172
1236 wait_event_timeout(hba->waitq, 1173 before = jiffies;
1237 !(hba->ccb[tag].req_type), ST_INTERNAL_TIMEOUT * HZ); 1174 while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) {
1238 if (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) 1175 if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ))
1239 return; 1176 return;
1240 1177 msleep(10);
1241 stex_free_tag(hba, (unsigned long *)&hba->tag, tag); 1178 }
1242} 1179}
1243 1180
1244static void stex_hba_free(struct st_hba *hba) 1181static void stex_hba_free(struct st_hba *hba)