aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/scsi/aic7xxx/aic7xxx_osm.c
diff options
context:
space:
mode:
authorJames Bottomley <James.Bottomley@steeleye.com>2005-05-16 17:39:38 -0400
committerJames Bottomley <jejb@mulgrave.(none)>2005-05-20 16:54:34 -0400
commite4e360c325c90f7830baaa2a27cd7a1f2bdeb6b0 (patch)
tree238462ee734bd13cb6b7036b4dc207ecf57f3a48 /drivers/scsi/aic7xxx/aic7xxx_osm.c
parentfad01ef88d2a27303757924c1fc013b31fe9a76b (diff)
[SCSI] remove aic7xxx busyq
The aic7xxx driver has two spurious queues in it's linux glue code: the busyq which queues incoming commands to the driver and the completeq which queues finished commands before sending them back to the mid-layer This patch just removes the busyq and makes the aic finally return the correct status to get the mid-layer to manage its queueing, so a command is either committed to the sequencer or returned to the midlayer for requeue. Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/scsi/aic7xxx/aic7xxx_osm.c')
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c625
1 files changed, 190 insertions, 435 deletions
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 0ed9ccc3091e..9017942407d8 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -458,26 +458,20 @@ static struct ahc_linux_device* ahc_linux_alloc_device(struct ahc_softc*,
458 u_int); 458 u_int);
459static void ahc_linux_free_device(struct ahc_softc*, 459static void ahc_linux_free_device(struct ahc_softc*,
460 struct ahc_linux_device*); 460 struct ahc_linux_device*);
461static void ahc_linux_run_device_queue(struct ahc_softc*, 461static int ahc_linux_run_command(struct ahc_softc*,
462 struct ahc_linux_device*); 462 struct ahc_linux_device *,
463 struct scsi_cmnd *);
463static void ahc_linux_setup_tag_info_global(char *p); 464static void ahc_linux_setup_tag_info_global(char *p);
464static aic_option_callback_t ahc_linux_setup_tag_info; 465static aic_option_callback_t ahc_linux_setup_tag_info;
465static int aic7xxx_setup(char *s); 466static int aic7xxx_setup(char *s);
466static int ahc_linux_next_unit(void); 467static int ahc_linux_next_unit(void);
467static void ahc_runq_tasklet(unsigned long data);
468static struct ahc_cmd *ahc_linux_run_complete_queue(struct ahc_softc *ahc); 468static struct ahc_cmd *ahc_linux_run_complete_queue(struct ahc_softc *ahc);
469 469
470/********************************* Inlines ************************************/ 470/********************************* Inlines ************************************/
471static __inline void ahc_schedule_runq(struct ahc_softc *ahc);
472static __inline struct ahc_linux_device* 471static __inline struct ahc_linux_device*
473 ahc_linux_get_device(struct ahc_softc *ahc, u_int channel, 472 ahc_linux_get_device(struct ahc_softc *ahc, u_int channel,
474 u_int target, u_int lun, int alloc); 473 u_int target, u_int lun, int alloc);
475static __inline void ahc_schedule_completeq(struct ahc_softc *ahc); 474static __inline void ahc_schedule_completeq(struct ahc_softc *ahc);
476static __inline void ahc_linux_check_device_queue(struct ahc_softc *ahc,
477 struct ahc_linux_device *dev);
478static __inline struct ahc_linux_device *
479 ahc_linux_next_device_to_run(struct ahc_softc *ahc);
480static __inline void ahc_linux_run_device_queues(struct ahc_softc *ahc);
481static __inline void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*); 475static __inline void ahc_linux_unmap_scb(struct ahc_softc*, struct scb*);
482 476
483static __inline int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb, 477static __inline int ahc_linux_map_seg(struct ahc_softc *ahc, struct scb *scb,
@@ -494,15 +488,6 @@ ahc_schedule_completeq(struct ahc_softc *ahc)
494 } 488 }
495} 489}
496 490
497/*
498 * Must be called with our lock held.
499 */
500static __inline void
501ahc_schedule_runq(struct ahc_softc *ahc)
502{
503 tasklet_schedule(&ahc->platform_data->runq_tasklet);
504}
505
506static __inline struct ahc_linux_device* 491static __inline struct ahc_linux_device*
507ahc_linux_get_device(struct ahc_softc *ahc, u_int channel, u_int target, 492ahc_linux_get_device(struct ahc_softc *ahc, u_int channel, u_int target,
508 u_int lun, int alloc) 493 u_int lun, int alloc)
@@ -569,45 +554,6 @@ ahc_linux_run_complete_queue(struct ahc_softc *ahc)
569} 554}
570 555
571static __inline void 556static __inline void
572ahc_linux_check_device_queue(struct ahc_softc *ahc,
573 struct ahc_linux_device *dev)
574{
575 if ((dev->flags & AHC_DEV_FREEZE_TIL_EMPTY) != 0
576 && dev->active == 0) {
577 dev->flags &= ~AHC_DEV_FREEZE_TIL_EMPTY;
578 dev->qfrozen--;
579 }
580
581 if (TAILQ_FIRST(&dev->busyq) == NULL
582 || dev->openings == 0 || dev->qfrozen != 0)
583 return;
584
585 ahc_linux_run_device_queue(ahc, dev);
586}
587
588static __inline struct ahc_linux_device *
589ahc_linux_next_device_to_run(struct ahc_softc *ahc)
590{
591
592 if ((ahc->flags & AHC_RESOURCE_SHORTAGE) != 0
593 || (ahc->platform_data->qfrozen != 0))
594 return (NULL);
595 return (TAILQ_FIRST(&ahc->platform_data->device_runq));
596}
597
598static __inline void
599ahc_linux_run_device_queues(struct ahc_softc *ahc)
600{
601 struct ahc_linux_device *dev;
602
603 while ((dev = ahc_linux_next_device_to_run(ahc)) != NULL) {
604 TAILQ_REMOVE(&ahc->platform_data->device_runq, dev, links);
605 dev->flags &= ~AHC_DEV_ON_RUN_LIST;
606 ahc_linux_check_device_queue(ahc, dev);
607 }
608}
609
610static __inline void
611ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb) 557ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
612{ 558{
613 Scsi_Cmnd *cmd; 559 Scsi_Cmnd *cmd;
@@ -871,7 +817,6 @@ ahc_linux_queue(Scsi_Cmnd * cmd, void (*scsi_done) (Scsi_Cmnd *))
871{ 817{
872 struct ahc_softc *ahc; 818 struct ahc_softc *ahc;
873 struct ahc_linux_device *dev; 819 struct ahc_linux_device *dev;
874 u_long flags;
875 820
876 ahc = *(struct ahc_softc **)cmd->device->host->hostdata; 821 ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
877 822
@@ -880,42 +825,22 @@ ahc_linux_queue(Scsi_Cmnd * cmd, void (*scsi_done) (Scsi_Cmnd *))
880 */ 825 */
881 cmd->scsi_done = scsi_done; 826 cmd->scsi_done = scsi_done;
882 827
883 ahc_midlayer_entrypoint_lock(ahc, &flags);
884
885 /* 828 /*
886 * Close the race of a command that was in the process of 829 * Close the race of a command that was in the process of
887 * being queued to us just as our simq was frozen. Let 830 * being queued to us just as our simq was frozen. Let
888 * DV commands through so long as we are only frozen to 831 * DV commands through so long as we are only frozen to
889 * perform DV. 832 * perform DV.
890 */ 833 */
891 if (ahc->platform_data->qfrozen != 0) { 834 if (ahc->platform_data->qfrozen != 0)
835 return SCSI_MLQUEUE_HOST_BUSY;
892 836
893 ahc_cmd_set_transaction_status(cmd, CAM_REQUEUE_REQ);
894 ahc_linux_queue_cmd_complete(ahc, cmd);
895 ahc_schedule_completeq(ahc);
896 ahc_midlayer_entrypoint_unlock(ahc, &flags);
897 return (0);
898 }
899 dev = ahc_linux_get_device(ahc, cmd->device->channel, cmd->device->id, 837 dev = ahc_linux_get_device(ahc, cmd->device->channel, cmd->device->id,
900 cmd->device->lun, /*alloc*/TRUE); 838 cmd->device->lun, /*alloc*/TRUE);
901 if (dev == NULL) { 839 BUG_ON(dev == NULL);
902 ahc_cmd_set_transaction_status(cmd, CAM_RESRC_UNAVAIL); 840
903 ahc_linux_queue_cmd_complete(ahc, cmd);
904 ahc_schedule_completeq(ahc);
905 ahc_midlayer_entrypoint_unlock(ahc, &flags);
906 printf("%s: aic7xxx_linux_queue - Unable to allocate device!\n",
907 ahc_name(ahc));
908 return (0);
909 }
910 cmd->result = CAM_REQ_INPROG << 16; 841 cmd->result = CAM_REQ_INPROG << 16;
911 TAILQ_INSERT_TAIL(&dev->busyq, (struct ahc_cmd *)cmd, acmd_links.tqe); 842
912 if ((dev->flags & AHC_DEV_ON_RUN_LIST) == 0) { 843 return ahc_linux_run_command(ahc, dev, cmd);
913 TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links);
914 dev->flags |= AHC_DEV_ON_RUN_LIST;
915 ahc_linux_run_device_queues(ahc);
916 }
917 ahc_midlayer_entrypoint_unlock(ahc, &flags);
918 return (0);
919} 844}
920 845
921#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) 846#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
@@ -987,8 +912,7 @@ ahc_linux_slave_destroy(Scsi_Device *device)
987 if (dev != NULL 912 if (dev != NULL
988 && (dev->flags & AHC_DEV_SLAVE_CONFIGURED) != 0) { 913 && (dev->flags & AHC_DEV_SLAVE_CONFIGURED) != 0) {
989 dev->flags |= AHC_DEV_UNCONFIGURED; 914 dev->flags |= AHC_DEV_UNCONFIGURED;
990 if (TAILQ_EMPTY(&dev->busyq) 915 if (dev->active == 0
991 && dev->active == 0
992 && (dev->flags & AHC_DEV_TIMER_ACTIVE) == 0) 916 && (dev->flags & AHC_DEV_TIMER_ACTIVE) == 0)
993 ahc_linux_free_device(ahc, dev); 917 ahc_linux_free_device(ahc, dev);
994 } 918 }
@@ -1206,33 +1130,6 @@ Scsi_Host_Template aic7xxx_driver_template = {
1206 1130
1207/**************************** Tasklet Handler *********************************/ 1131/**************************** Tasklet Handler *********************************/
1208 1132
1209/*
1210 * In 2.4.X and above, this routine is called from a tasklet,
1211 * so we must re-acquire our lock prior to executing this code.
1212 * In all prior kernels, ahc_schedule_runq() calls this routine
1213 * directly and ahc_schedule_runq() is called with our lock held.
1214 */
1215static void
1216ahc_runq_tasklet(unsigned long data)
1217{
1218 struct ahc_softc* ahc;
1219 struct ahc_linux_device *dev;
1220 u_long flags;
1221
1222 ahc = (struct ahc_softc *)data;
1223 ahc_lock(ahc, &flags);
1224 while ((dev = ahc_linux_next_device_to_run(ahc)) != NULL) {
1225
1226 TAILQ_REMOVE(&ahc->platform_data->device_runq, dev, links);
1227 dev->flags &= ~AHC_DEV_ON_RUN_LIST;
1228 ahc_linux_check_device_queue(ahc, dev);
1229 /* Yeild to our interrupt handler */
1230 ahc_unlock(ahc, &flags);
1231 ahc_lock(ahc, &flags);
1232 }
1233 ahc_unlock(ahc, &flags);
1234}
1235
1236/******************************** Macros **************************************/ 1133/******************************** Macros **************************************/
1237#define BUILD_SCSIID(ahc, cmd) \ 1134#define BUILD_SCSIID(ahc, cmd) \
1238 ((((cmd)->device->id << TID_SHIFT) & TID) \ 1135 ((((cmd)->device->id << TID_SHIFT) & TID) \
@@ -1728,8 +1625,6 @@ ahc_platform_alloc(struct ahc_softc *ahc, void *platform_arg)
1728 ahc->platform_data->completeq_timer.function = 1625 ahc->platform_data->completeq_timer.function =
1729 (ahc_linux_callback_t *)ahc_linux_thread_run_complete_queue; 1626 (ahc_linux_callback_t *)ahc_linux_thread_run_complete_queue;
1730 init_MUTEX_LOCKED(&ahc->platform_data->eh_sem); 1627 init_MUTEX_LOCKED(&ahc->platform_data->eh_sem);
1731 tasklet_init(&ahc->platform_data->runq_tasklet, ahc_runq_tasklet,
1732 (unsigned long)ahc);
1733 ahc->seltime = (aic7xxx_seltime & 0x3) << 4; 1628 ahc->seltime = (aic7xxx_seltime & 0x3) << 4;
1734 ahc->seltime_b = (aic7xxx_seltime & 0x3) << 4; 1629 ahc->seltime_b = (aic7xxx_seltime & 0x3) << 4;
1735 if (aic7xxx_pci_parity == 0) 1630 if (aic7xxx_pci_parity == 0)
@@ -1747,7 +1642,6 @@ ahc_platform_free(struct ahc_softc *ahc)
1747 1642
1748 if (ahc->platform_data != NULL) { 1643 if (ahc->platform_data != NULL) {
1749 del_timer_sync(&ahc->platform_data->completeq_timer); 1644 del_timer_sync(&ahc->platform_data->completeq_timer);
1750 tasklet_kill(&ahc->platform_data->runq_tasklet);
1751 if (ahc->platform_data->host != NULL) { 1645 if (ahc->platform_data->host != NULL) {
1752#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) 1646#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
1753 scsi_remove_host(ahc->platform_data->host); 1647 scsi_remove_host(ahc->platform_data->host);
@@ -1906,71 +1800,7 @@ int
1906ahc_platform_abort_scbs(struct ahc_softc *ahc, int target, char channel, 1800ahc_platform_abort_scbs(struct ahc_softc *ahc, int target, char channel,
1907 int lun, u_int tag, role_t role, uint32_t status) 1801 int lun, u_int tag, role_t role, uint32_t status)
1908{ 1802{
1909 int chan; 1803 return 0;
1910 int maxchan;
1911 int targ;
1912 int maxtarg;
1913 int clun;
1914 int maxlun;
1915 int count;
1916
1917 if (tag != SCB_LIST_NULL)
1918 return (0);
1919
1920 chan = 0;
1921 if (channel != ALL_CHANNELS) {
1922 chan = channel - 'A';
1923 maxchan = chan + 1;
1924 } else {
1925 maxchan = (ahc->features & AHC_TWIN) ? 2 : 1;
1926 }
1927 targ = 0;
1928 if (target != CAM_TARGET_WILDCARD) {
1929 targ = target;
1930 maxtarg = targ + 1;
1931 } else {
1932 maxtarg = (ahc->features & AHC_WIDE) ? 16 : 8;
1933 }
1934 clun = 0;
1935 if (lun != CAM_LUN_WILDCARD) {
1936 clun = lun;
1937 maxlun = clun + 1;
1938 } else {
1939 maxlun = AHC_NUM_LUNS;
1940 }
1941
1942 count = 0;
1943 for (; chan < maxchan; chan++) {
1944
1945 for (; targ < maxtarg; targ++) {
1946
1947 for (; clun < maxlun; clun++) {
1948 struct ahc_linux_device *dev;
1949 struct ahc_busyq *busyq;
1950 struct ahc_cmd *acmd;
1951
1952 dev = ahc_linux_get_device(ahc, chan,
1953 targ, clun,
1954 /*alloc*/FALSE);
1955 if (dev == NULL)
1956 continue;
1957
1958 busyq = &dev->busyq;
1959 while ((acmd = TAILQ_FIRST(busyq)) != NULL) {
1960 Scsi_Cmnd *cmd;
1961
1962 cmd = &acmd_scsi_cmd(acmd);
1963 TAILQ_REMOVE(busyq, acmd,
1964 acmd_links.tqe);
1965 count++;
1966 cmd->result = status << 16;
1967 ahc_linux_queue_cmd_complete(ahc, cmd);
1968 }
1969 }
1970 }
1971 }
1972
1973 return (count);
1974} 1804}
1975 1805
1976static void 1806static void
@@ -2045,213 +1875,203 @@ ahc_linux_device_queue_depth(struct ahc_softc *ahc,
2045 } 1875 }
2046} 1876}
2047 1877
2048static void 1878static int
2049ahc_linux_run_device_queue(struct ahc_softc *ahc, struct ahc_linux_device *dev) 1879ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
1880 struct scsi_cmnd *cmd)
2050{ 1881{
2051 struct ahc_cmd *acmd;
2052 struct scsi_cmnd *cmd;
2053 struct scb *scb; 1882 struct scb *scb;
2054 struct hardware_scb *hscb; 1883 struct hardware_scb *hscb;
2055 struct ahc_initiator_tinfo *tinfo; 1884 struct ahc_initiator_tinfo *tinfo;
2056 struct ahc_tmode_tstate *tstate; 1885 struct ahc_tmode_tstate *tstate;
2057 uint16_t mask; 1886 uint16_t mask;
1887 struct scb_tailq *untagged_q = NULL;
2058 1888
2059 if ((dev->flags & AHC_DEV_ON_RUN_LIST) != 0) 1889 /*
2060 panic("running device on run list"); 1890 * Schedule us to run later. The only reason we are not
1891 * running is because the whole controller Q is frozen.
1892 */
1893 if (ahc->platform_data->qfrozen != 0)
1894 return SCSI_MLQUEUE_HOST_BUSY;
2061 1895
2062 while ((acmd = TAILQ_FIRST(&dev->busyq)) != NULL 1896 /*
2063 && dev->openings > 0 && dev->qfrozen == 0) { 1897 * We only allow one untagged transaction
1898 * per target in the initiator role unless
1899 * we are storing a full busy target *lun*
1900 * table in SCB space.
1901 */
1902 if (!blk_rq_tagged(cmd->request)
1903 && (ahc->features & AHC_SCB_BTT) == 0) {
1904 int target_offset;
2064 1905
2065 /* 1906 target_offset = cmd->device->id + cmd->device->channel * 8;
2066 * Schedule us to run later. The only reason we are not 1907 untagged_q = &(ahc->untagged_queues[target_offset]);
2067 * running is because the whole controller Q is frozen. 1908 if (!TAILQ_EMPTY(untagged_q))
2068 */ 1909 /* if we're already executing an untagged command
2069 if (ahc->platform_data->qfrozen != 0) { 1910 * we're busy to another */
2070 TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, 1911 return SCSI_MLQUEUE_DEVICE_BUSY;
2071 dev, links); 1912 }
2072 dev->flags |= AHC_DEV_ON_RUN_LIST; 1913
2073 return; 1914 /*
2074 } 1915 * Get an scb to use.
2075 /* 1916 */
2076 * Get an scb to use. 1917 if ((scb = ahc_get_scb(ahc)) == NULL) {
2077 */
2078 if ((scb = ahc_get_scb(ahc)) == NULL) {
2079 TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq,
2080 dev, links);
2081 dev->flags |= AHC_DEV_ON_RUN_LIST;
2082 ahc->flags |= AHC_RESOURCE_SHORTAGE; 1918 ahc->flags |= AHC_RESOURCE_SHORTAGE;
2083 return; 1919 return SCSI_MLQUEUE_HOST_BUSY;
2084 } 1920 }
2085 TAILQ_REMOVE(&dev->busyq, acmd, acmd_links.tqe);
2086 cmd = &acmd_scsi_cmd(acmd);
2087 scb->io_ctx = cmd;
2088 scb->platform_data->dev = dev;
2089 hscb = scb->hscb;
2090 cmd->host_scribble = (char *)scb;
2091 1921
2092 /* 1922 scb->io_ctx = cmd;
2093 * Fill out basics of the HSCB. 1923 scb->platform_data->dev = dev;
2094 */ 1924 hscb = scb->hscb;
2095 hscb->control = 0; 1925 cmd->host_scribble = (char *)scb;
2096 hscb->scsiid = BUILD_SCSIID(ahc, cmd);
2097 hscb->lun = cmd->device->lun;
2098 mask = SCB_GET_TARGET_MASK(ahc, scb);
2099 tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb),
2100 SCB_GET_OUR_ID(scb),
2101 SCB_GET_TARGET(ahc, scb), &tstate);
2102 hscb->scsirate = tinfo->scsirate;
2103 hscb->scsioffset = tinfo->curr.offset;
2104 if ((tstate->ultraenb & mask) != 0)
2105 hscb->control |= ULTRAENB;
2106
2107 if ((ahc->user_discenable & mask) != 0)
2108 hscb->control |= DISCENB;
2109
2110 if ((tstate->auto_negotiate & mask) != 0) {
2111 scb->flags |= SCB_AUTO_NEGOTIATE;
2112 scb->hscb->control |= MK_MESSAGE;
2113 }
2114 1926
2115 if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) { 1927 /*
1928 * Fill out basics of the HSCB.
1929 */
1930 hscb->control = 0;
1931 hscb->scsiid = BUILD_SCSIID(ahc, cmd);
1932 hscb->lun = cmd->device->lun;
1933 mask = SCB_GET_TARGET_MASK(ahc, scb);
1934 tinfo = ahc_fetch_transinfo(ahc, SCB_GET_CHANNEL(ahc, scb),
1935 SCB_GET_OUR_ID(scb),
1936 SCB_GET_TARGET(ahc, scb), &tstate);
1937 hscb->scsirate = tinfo->scsirate;
1938 hscb->scsioffset = tinfo->curr.offset;
1939 if ((tstate->ultraenb & mask) != 0)
1940 hscb->control |= ULTRAENB;
1941
1942 if ((ahc->user_discenable & mask) != 0)
1943 hscb->control |= DISCENB;
1944
1945 if ((tstate->auto_negotiate & mask) != 0) {
1946 scb->flags |= SCB_AUTO_NEGOTIATE;
1947 scb->hscb->control |= MK_MESSAGE;
1948 }
1949
1950 if ((dev->flags & (AHC_DEV_Q_TAGGED|AHC_DEV_Q_BASIC)) != 0) {
2116#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) 1951#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0)
2117 int msg_bytes; 1952 int msg_bytes;
2118 uint8_t tag_msgs[2]; 1953 uint8_t tag_msgs[2];
2119 1954
2120 msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs); 1955 msg_bytes = scsi_populate_tag_msg(cmd, tag_msgs);
2121 if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) { 1956 if (msg_bytes && tag_msgs[0] != MSG_SIMPLE_TASK) {
2122 hscb->control |= tag_msgs[0]; 1957 hscb->control |= tag_msgs[0];
2123 if (tag_msgs[0] == MSG_ORDERED_TASK) 1958 if (tag_msgs[0] == MSG_ORDERED_TASK)
2124 dev->commands_since_idle_or_otag = 0;
2125 } else
2126#endif
2127 if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH
2128 && (dev->flags & AHC_DEV_Q_TAGGED) != 0) {
2129 hscb->control |= MSG_ORDERED_TASK;
2130 dev->commands_since_idle_or_otag = 0; 1959 dev->commands_since_idle_or_otag = 0;
2131 } else { 1960 } else
2132 hscb->control |= MSG_SIMPLE_TASK; 1961#endif
2133 } 1962 if (dev->commands_since_idle_or_otag == AHC_OTAG_THRESH
2134 } 1963 && (dev->flags & AHC_DEV_Q_TAGGED) != 0) {
2135 1964 hscb->control |= MSG_ORDERED_TASK;
2136 hscb->cdb_len = cmd->cmd_len; 1965 dev->commands_since_idle_or_otag = 0;
2137 if (hscb->cdb_len <= 12) {
2138 memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len);
2139 } else { 1966 } else {
2140 memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len); 1967 hscb->control |= MSG_SIMPLE_TASK;
2141 scb->flags |= SCB_CDB32_PTR;
2142 } 1968 }
1969 }
2143 1970
2144 scb->platform_data->xfer_len = 0; 1971 hscb->cdb_len = cmd->cmd_len;
2145 ahc_set_residual(scb, 0); 1972 if (hscb->cdb_len <= 12) {
2146 ahc_set_sense_residual(scb, 0); 1973 memcpy(hscb->shared_data.cdb, cmd->cmnd, hscb->cdb_len);
2147 scb->sg_count = 0; 1974 } else {
2148 if (cmd->use_sg != 0) { 1975 memcpy(hscb->cdb32, cmd->cmnd, hscb->cdb_len);
2149 struct ahc_dma_seg *sg; 1976 scb->flags |= SCB_CDB32_PTR;
2150 struct scatterlist *cur_seg; 1977 }
2151 struct scatterlist *end_seg;
2152 int nseg;
2153
2154 cur_seg = (struct scatterlist *)cmd->request_buffer;
2155 nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg,
2156 cmd->sc_data_direction);
2157 end_seg = cur_seg + nseg;
2158 /* Copy the segments into the SG list. */
2159 sg = scb->sg_list;
2160 /*
2161 * The sg_count may be larger than nseg if
2162 * a transfer crosses a 32bit page.
2163 */
2164 while (cur_seg < end_seg) {
2165 dma_addr_t addr;
2166 bus_size_t len;
2167 int consumed;
2168
2169 addr = sg_dma_address(cur_seg);
2170 len = sg_dma_len(cur_seg);
2171 consumed = ahc_linux_map_seg(ahc, scb,
2172 sg, addr, len);
2173 sg += consumed;
2174 scb->sg_count += consumed;
2175 cur_seg++;
2176 }
2177 sg--;
2178 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
2179
2180 /*
2181 * Reset the sg list pointer.
2182 */
2183 scb->hscb->sgptr =
2184 ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
2185 1978
2186 /* 1979 scb->platform_data->xfer_len = 0;
2187 * Copy the first SG into the "current" 1980 ahc_set_residual(scb, 0);
2188 * data pointer area. 1981 ahc_set_sense_residual(scb, 0);
2189 */ 1982 scb->sg_count = 0;
2190 scb->hscb->dataptr = scb->sg_list->addr; 1983 if (cmd->use_sg != 0) {
2191 scb->hscb->datacnt = scb->sg_list->len; 1984 struct ahc_dma_seg *sg;
2192 } else if (cmd->request_bufflen != 0) { 1985 struct scatterlist *cur_seg;
2193 struct ahc_dma_seg *sg; 1986 struct scatterlist *end_seg;
1987 int nseg;
1988
1989 cur_seg = (struct scatterlist *)cmd->request_buffer;
1990 nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg,
1991 cmd->sc_data_direction);
1992 end_seg = cur_seg + nseg;
1993 /* Copy the segments into the SG list. */
1994 sg = scb->sg_list;
1995 /*
1996 * The sg_count may be larger than nseg if
1997 * a transfer crosses a 32bit page.
1998 */
1999 while (cur_seg < end_seg) {
2194 dma_addr_t addr; 2000 dma_addr_t addr;
2195 2001 bus_size_t len;
2196 sg = scb->sg_list; 2002 int consumed;
2197 addr = pci_map_single(ahc->dev_softc, 2003
2198 cmd->request_buffer, 2004 addr = sg_dma_address(cur_seg);
2199 cmd->request_bufflen, 2005 len = sg_dma_len(cur_seg);
2200 cmd->sc_data_direction); 2006 consumed = ahc_linux_map_seg(ahc, scb,
2201 scb->platform_data->buf_busaddr = addr; 2007 sg, addr, len);
2202 scb->sg_count = ahc_linux_map_seg(ahc, scb, 2008 sg += consumed;
2203 sg, addr, 2009 scb->sg_count += consumed;
2204 cmd->request_bufflen); 2010 cur_seg++;
2205 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
2206
2207 /*
2208 * Reset the sg list pointer.
2209 */
2210 scb->hscb->sgptr =
2211 ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
2212
2213 /*
2214 * Copy the first SG into the "current"
2215 * data pointer area.
2216 */
2217 scb->hscb->dataptr = sg->addr;
2218 scb->hscb->datacnt = sg->len;
2219 } else {
2220 scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
2221 scb->hscb->dataptr = 0;
2222 scb->hscb->datacnt = 0;
2223 scb->sg_count = 0;
2224 } 2011 }
2012 sg--;
2013 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
2225 2014
2226 ahc_sync_sglist(ahc, scb, BUS_DMASYNC_PREWRITE); 2015 /*
2227 LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links); 2016 * Reset the sg list pointer.
2228 dev->openings--; 2017 */
2229 dev->active++; 2018 scb->hscb->sgptr =
2230 dev->commands_issued++; 2019 ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
2231 if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0) 2020
2232 dev->commands_since_idle_or_otag++; 2021 /*
2022 * Copy the first SG into the "current"
2023 * data pointer area.
2024 */
2025 scb->hscb->dataptr = scb->sg_list->addr;
2026 scb->hscb->datacnt = scb->sg_list->len;
2027 } else if (cmd->request_bufflen != 0) {
2028 struct ahc_dma_seg *sg;
2029 dma_addr_t addr;
2030
2031 sg = scb->sg_list;
2032 addr = pci_map_single(ahc->dev_softc,
2033 cmd->request_buffer,
2034 cmd->request_bufflen,
2035 cmd->sc_data_direction);
2036 scb->platform_data->buf_busaddr = addr;
2037 scb->sg_count = ahc_linux_map_seg(ahc, scb,
2038 sg, addr,
2039 cmd->request_bufflen);
2040 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
2233 2041
2234 /* 2042 /*
2235 * We only allow one untagged transaction 2043 * Reset the sg list pointer.
2236 * per target in the initiator role unless
2237 * we are storing a full busy target *lun*
2238 * table in SCB space.
2239 */ 2044 */
2240 if ((scb->hscb->control & (TARGET_SCB|TAG_ENB)) == 0 2045 scb->hscb->sgptr =
2241 && (ahc->features & AHC_SCB_BTT) == 0) { 2046 ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
2242 struct scb_tailq *untagged_q; 2047
2243 int target_offset; 2048 /*
2244 2049 * Copy the first SG into the "current"
2245 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); 2050 * data pointer area.
2246 untagged_q = &(ahc->untagged_queues[target_offset]); 2051 */
2247 TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe); 2052 scb->hscb->dataptr = sg->addr;
2248 scb->flags |= SCB_UNTAGGEDQ; 2053 scb->hscb->datacnt = sg->len;
2249 if (TAILQ_FIRST(untagged_q) != scb) 2054 } else {
2250 continue; 2055 scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
2251 } 2056 scb->hscb->dataptr = 0;
2252 scb->flags |= SCB_ACTIVE; 2057 scb->hscb->datacnt = 0;
2253 ahc_queue_scb(ahc, scb); 2058 scb->sg_count = 0;
2059 }
2060
2061 LIST_INSERT_HEAD(&ahc->pending_scbs, scb, pending_links);
2062 dev->openings--;
2063 dev->active++;
2064 dev->commands_issued++;
2065 if ((dev->flags & AHC_DEV_PERIODIC_OTAG) != 0)
2066 dev->commands_since_idle_or_otag++;
2067
2068 scb->flags |= SCB_ACTIVE;
2069 if (untagged_q) {
2070 TAILQ_INSERT_TAIL(untagged_q, scb, links.tqe);
2071 scb->flags |= SCB_UNTAGGEDQ;
2254 } 2072 }
2073 ahc_queue_scb(ahc, scb);
2074 return 0;
2255} 2075}
2256 2076
2257/* 2077/*
@@ -2267,8 +2087,6 @@ ahc_linux_isr(int irq, void *dev_id, struct pt_regs * regs)
2267 ahc = (struct ahc_softc *) dev_id; 2087 ahc = (struct ahc_softc *) dev_id;
2268 ahc_lock(ahc, &flags); 2088 ahc_lock(ahc, &flags);
2269 ours = ahc_intr(ahc); 2089 ours = ahc_intr(ahc);
2270 if (ahc_linux_next_device_to_run(ahc) != NULL)
2271 ahc_schedule_runq(ahc);
2272 ahc_linux_run_complete_queue(ahc); 2090 ahc_linux_run_complete_queue(ahc);
2273 ahc_unlock(ahc, &flags); 2091 ahc_unlock(ahc, &flags);
2274 return IRQ_RETVAL(ours); 2092 return IRQ_RETVAL(ours);
@@ -2349,7 +2167,6 @@ ahc_linux_alloc_device(struct ahc_softc *ahc,
2349 return (NULL); 2167 return (NULL);
2350 memset(dev, 0, sizeof(*dev)); 2168 memset(dev, 0, sizeof(*dev));
2351 init_timer(&dev->timer); 2169 init_timer(&dev->timer);
2352 TAILQ_INIT(&dev->busyq);
2353 dev->flags = AHC_DEV_UNCONFIGURED; 2170 dev->flags = AHC_DEV_UNCONFIGURED;
2354 dev->lun = lun; 2171 dev->lun = lun;
2355 dev->target = targ; 2172 dev->target = targ;
@@ -2515,7 +2332,7 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
2515 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb); 2332 target_offset = SCB_GET_TARGET_OFFSET(ahc, scb);
2516 untagged_q = &(ahc->untagged_queues[target_offset]); 2333 untagged_q = &(ahc->untagged_queues[target_offset]);
2517 TAILQ_REMOVE(untagged_q, scb, links.tqe); 2334 TAILQ_REMOVE(untagged_q, scb, links.tqe);
2518 ahc_run_untagged_queue(ahc, untagged_q); 2335 BUG_ON(!TAILQ_EMPTY(untagged_q));
2519 } 2336 }
2520 2337
2521 if ((scb->flags & SCB_ACTIVE) == 0) { 2338 if ((scb->flags & SCB_ACTIVE) == 0) {
@@ -2606,12 +2423,11 @@ ahc_done(struct ahc_softc *ahc, struct scb *scb)
2606 if (dev->active == 0) 2423 if (dev->active == 0)
2607 dev->commands_since_idle_or_otag = 0; 2424 dev->commands_since_idle_or_otag = 0;
2608 2425
2609 if (TAILQ_EMPTY(&dev->busyq)) { 2426 if ((dev->flags & AHC_DEV_UNCONFIGURED) != 0
2610 if ((dev->flags & AHC_DEV_UNCONFIGURED) != 0 2427 && dev->active == 0
2611 && dev->active == 0 2428 && (dev->flags & AHC_DEV_TIMER_ACTIVE) == 0)
2612 && (dev->flags & AHC_DEV_TIMER_ACTIVE) == 0) 2429 ahc_linux_free_device(ahc, dev);
2613 ahc_linux_free_device(ahc, dev); 2430 else if ((dev->flags & AHC_DEV_ON_RUN_LIST) == 0) {
2614 } else if ((dev->flags & AHC_DEV_ON_RUN_LIST) == 0) {
2615 TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links); 2431 TAILQ_INSERT_TAIL(&ahc->platform_data->device_runq, dev, links);
2616 dev->flags |= AHC_DEV_ON_RUN_LIST; 2432 dev->flags |= AHC_DEV_ON_RUN_LIST;
2617 } 2433 }
@@ -2940,7 +2756,6 @@ ahc_linux_release_simq(u_long arg)
2940 ahc->platform_data->qfrozen--; 2756 ahc->platform_data->qfrozen--;
2941 if (ahc->platform_data->qfrozen == 0) 2757 if (ahc->platform_data->qfrozen == 0)
2942 unblock_reqs = 1; 2758 unblock_reqs = 1;
2943 ahc_schedule_runq(ahc);
2944 ahc_unlock(ahc, &s); 2759 ahc_unlock(ahc, &s);
2945 /* 2760 /*
2946 * There is still a race here. The mid-layer 2761 * There is still a race here. The mid-layer
@@ -2965,11 +2780,7 @@ ahc_linux_dev_timed_unfreeze(u_long arg)
2965 dev->flags &= ~AHC_DEV_TIMER_ACTIVE; 2780 dev->flags &= ~AHC_DEV_TIMER_ACTIVE;
2966 if (dev->qfrozen > 0) 2781 if (dev->qfrozen > 0)
2967 dev->qfrozen--; 2782 dev->qfrozen--;
2968 if (dev->qfrozen == 0 2783 if (dev->active == 0)
2969 && (dev->flags & AHC_DEV_ON_RUN_LIST) == 0)
2970 ahc_linux_run_device_queue(ahc, dev);
2971 if (TAILQ_EMPTY(&dev->busyq)
2972 && dev->active == 0)
2973 __ahc_linux_free_device(ahc, dev); 2784 __ahc_linux_free_device(ahc, dev);
2974 ahc_unlock(ahc, &s); 2785 ahc_unlock(ahc, &s);
2975} 2786}
@@ -2978,8 +2789,6 @@ static int
2978ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag) 2789ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag)
2979{ 2790{
2980 struct ahc_softc *ahc; 2791 struct ahc_softc *ahc;
2981 struct ahc_cmd *acmd;
2982 struct ahc_cmd *list_acmd;
2983 struct ahc_linux_device *dev; 2792 struct ahc_linux_device *dev;
2984 struct scb *pending_scb; 2793 struct scb *pending_scb;
2985 u_long s; 2794 u_long s;
@@ -2998,7 +2807,6 @@ ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag)
2998 paused = FALSE; 2807 paused = FALSE;
2999 wait = FALSE; 2808 wait = FALSE;
3000 ahc = *(struct ahc_softc **)cmd->device->host->hostdata; 2809 ahc = *(struct ahc_softc **)cmd->device->host->hostdata;
3001 acmd = (struct ahc_cmd *)cmd;
3002 2810
3003 printf("%s:%d:%d:%d: Attempting to queue a%s message\n", 2811 printf("%s:%d:%d:%d: Attempting to queue a%s message\n",
3004 ahc_name(ahc), cmd->device->channel, 2812 ahc_name(ahc), cmd->device->channel,
@@ -3048,24 +2856,6 @@ ahc_linux_queue_recovery_cmd(Scsi_Cmnd *cmd, scb_flag flag)
3048 goto no_cmd; 2856 goto no_cmd;
3049 } 2857 }
3050 2858
3051 TAILQ_FOREACH(list_acmd, &dev->busyq, acmd_links.tqe) {
3052 if (list_acmd == acmd)
3053 break;
3054 }
3055
3056 if (list_acmd != NULL) {
3057 printf("%s:%d:%d:%d: Command found on device queue\n",
3058 ahc_name(ahc), cmd->device->channel, cmd->device->id,
3059 cmd->device->lun);
3060 if (flag == SCB_ABORT) {
3061 TAILQ_REMOVE(&dev->busyq, list_acmd, acmd_links.tqe);
3062 cmd->result = DID_ABORT << 16;
3063 ahc_linux_queue_cmd_complete(ahc, cmd);
3064 retval = SUCCESS;
3065 goto done;
3066 }
3067 }
3068
3069 if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0 2859 if ((dev->flags & (AHC_DEV_Q_BASIC|AHC_DEV_Q_TAGGED)) == 0
3070 && ahc_search_untagged_queues(ahc, cmd, cmd->device->id, 2860 && ahc_search_untagged_queues(ahc, cmd, cmd->device->id,
3071 cmd->device->channel + 'A', 2861 cmd->device->channel + 'A',
@@ -3299,7 +3089,6 @@ done:
3299 } 3089 }
3300 spin_lock_irq(&ahc->platform_data->spin_lock); 3090 spin_lock_irq(&ahc->platform_data->spin_lock);
3301 } 3091 }
3302 ahc_schedule_runq(ahc);
3303 ahc_linux_run_complete_queue(ahc); 3092 ahc_linux_run_complete_queue(ahc);
3304 ahc_midlayer_entrypoint_unlock(ahc, &s); 3093 ahc_midlayer_entrypoint_unlock(ahc, &s);
3305 return (retval); 3094 return (retval);
@@ -3308,40 +3097,6 @@ done:
3308void 3097void
3309ahc_platform_dump_card_state(struct ahc_softc *ahc) 3098ahc_platform_dump_card_state(struct ahc_softc *ahc)
3310{ 3099{
3311 struct ahc_linux_device *dev;
3312 int channel;
3313 int maxchannel;
3314 int target;
3315 int maxtarget;
3316 int lun;
3317 int i;
3318
3319 maxchannel = (ahc->features & AHC_TWIN) ? 1 : 0;
3320 maxtarget = (ahc->features & AHC_WIDE) ? 15 : 7;
3321 for (channel = 0; channel <= maxchannel; channel++) {
3322
3323 for (target = 0; target <=maxtarget; target++) {
3324
3325 for (lun = 0; lun < AHC_NUM_LUNS; lun++) {
3326 struct ahc_cmd *acmd;
3327
3328 dev = ahc_linux_get_device(ahc, channel, target,
3329 lun, /*alloc*/FALSE);
3330 if (dev == NULL)
3331 continue;
3332
3333 printf("DevQ(%d:%d:%d): ",
3334 channel, target, lun);
3335 i = 0;
3336 TAILQ_FOREACH(acmd, &dev->busyq,
3337 acmd_links.tqe) {
3338 if (i++ > AHC_SCB_MAX)
3339 break;
3340 }
3341 printf("%d waiting\n", i);
3342 }
3343 }
3344 }
3345} 3100}
3346 3101
3347static void ahc_linux_exit(void); 3102static void ahc_linux_exit(void);