diff options
Diffstat (limited to 'drivers/scsi')
31 files changed, 6421 insertions, 7319 deletions
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c index bf5d63e1beee..656bdb1352d8 100644 --- a/drivers/scsi/3w-xxxx.c +++ b/drivers/scsi/3w-xxxx.c | |||
@@ -1864,10 +1864,17 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id) | |||
1864 | /* This function will handle the request sense scsi command */ | 1864 | /* This function will handle the request sense scsi command */ |
1865 | static int tw_scsiop_request_sense(TW_Device_Extension *tw_dev, int request_id) | 1865 | static int tw_scsiop_request_sense(TW_Device_Extension *tw_dev, int request_id) |
1866 | { | 1866 | { |
1867 | char request_buffer[18]; | ||
1868 | |||
1867 | dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_request_sense()\n"); | 1869 | dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_request_sense()\n"); |
1868 | 1870 | ||
1869 | /* For now we just zero the request buffer */ | 1871 | memset(request_buffer, 0, sizeof(request_buffer)); |
1870 | memset(tw_dev->srb[request_id]->request_buffer, 0, tw_dev->srb[request_id]->request_bufflen); | 1872 | request_buffer[0] = 0x70; /* Immediate fixed format */ |
1873 | request_buffer[7] = 10; /* minimum size per SPC: 18 bytes */ | ||
1874 | /* leave all other fields zero, giving effectively NO_SENSE return */ | ||
1875 | tw_transfer_internal(tw_dev, request_id, request_buffer, | ||
1876 | sizeof(request_buffer)); | ||
1877 | |||
1871 | tw_dev->state[request_id] = TW_S_COMPLETED; | 1878 | tw_dev->state[request_id] = TW_S_COMPLETED; |
1872 | tw_state_request_finish(tw_dev, request_id); | 1879 | tw_state_request_finish(tw_dev, request_id); |
1873 | 1880 | ||
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index d243ae115209..58c811d20eb2 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig | |||
@@ -1639,7 +1639,7 @@ config OKTAGON_SCSI | |||
1639 | 1639 | ||
1640 | config ATARI_SCSI | 1640 | config ATARI_SCSI |
1641 | tristate "Atari native SCSI support" | 1641 | tristate "Atari native SCSI support" |
1642 | depends on ATARI && SCSI && BROKEN | 1642 | depends on ATARI && SCSI |
1643 | select SCSI_SPI_ATTRS | 1643 | select SCSI_SPI_ATTRS |
1644 | ---help--- | 1644 | ---help--- |
1645 | If you have an Atari with built-in NCR5380 SCSI controller (TT, | 1645 | If you have an Atari with built-in NCR5380 SCSI controller (TT, |
@@ -1753,9 +1753,15 @@ config SUN3X_ESP | |||
1753 | The ESP was an on-board SCSI controller used on Sun 3/80 | 1753 | The ESP was an on-board SCSI controller used on Sun 3/80 |
1754 | machines. Say Y here to compile in support for it. | 1754 | machines. Say Y here to compile in support for it. |
1755 | 1755 | ||
1756 | config SCSI_ESP_CORE | ||
1757 | tristate "ESP Scsi Driver Core" | ||
1758 | depends on SCSI | ||
1759 | select SCSI_SPI_ATTRS | ||
1760 | |||
1756 | config SCSI_SUNESP | 1761 | config SCSI_SUNESP |
1757 | tristate "Sparc ESP Scsi Driver" | 1762 | tristate "Sparc ESP Scsi Driver" |
1758 | depends on SBUS && SCSI | 1763 | depends on SBUS && SCSI |
1764 | select SCSI_ESP_CORE | ||
1759 | help | 1765 | help |
1760 | This is the driver for the Sun ESP SCSI host adapter. The ESP | 1766 | This is the driver for the Sun ESP SCSI host adapter. The ESP |
1761 | chipset is present in most SPARC SBUS-based computers. | 1767 | chipset is present in most SPARC SBUS-based computers. |
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 41c7883b24aa..51e884fa10b0 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile | |||
@@ -106,7 +106,8 @@ obj-$(CONFIG_MEGARAID_LEGACY) += megaraid.o | |||
106 | obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ | 106 | obj-$(CONFIG_MEGARAID_NEWGEN) += megaraid/ |
107 | obj-$(CONFIG_MEGARAID_SAS) += megaraid/ | 107 | obj-$(CONFIG_MEGARAID_SAS) += megaraid/ |
108 | obj-$(CONFIG_SCSI_ACARD) += atp870u.o | 108 | obj-$(CONFIG_SCSI_ACARD) += atp870u.o |
109 | obj-$(CONFIG_SCSI_SUNESP) += esp.o | 109 | obj-$(CONFIG_SCSI_ESP_CORE) += esp_scsi.o |
110 | obj-$(CONFIG_SCSI_SUNESP) += sun_esp.o | ||
110 | obj-$(CONFIG_SCSI_GDTH) += gdth.o | 111 | obj-$(CONFIG_SCSI_GDTH) += gdth.o |
111 | obj-$(CONFIG_SCSI_INITIO) += initio.o | 112 | obj-$(CONFIG_SCSI_INITIO) += initio.o |
112 | obj-$(CONFIG_SCSI_INIA100) += a100u2w.o | 113 | obj-$(CONFIG_SCSI_INIA100) += a100u2w.o |
diff --git a/drivers/scsi/aacraid/dpcsup.c b/drivers/scsi/aacraid/dpcsup.c index fbf27f59a311..42c7dcda6d9b 100644 --- a/drivers/scsi/aacraid/dpcsup.c +++ b/drivers/scsi/aacraid/dpcsup.c | |||
@@ -32,7 +32,6 @@ | |||
32 | #include <linux/kernel.h> | 32 | #include <linux/kernel.h> |
33 | #include <linux/init.h> | 33 | #include <linux/init.h> |
34 | #include <linux/types.h> | 34 | #include <linux/types.h> |
35 | #include <linux/pci.h> | ||
36 | #include <linux/spinlock.h> | 35 | #include <linux/spinlock.h> |
37 | #include <linux/slab.h> | 36 | #include <linux/slab.h> |
38 | #include <linux/completion.h> | 37 | #include <linux/completion.h> |
diff --git a/drivers/scsi/aacraid/sa.c b/drivers/scsi/aacraid/sa.c index 6f1a1780efce..f4b5e9742ab0 100644 --- a/drivers/scsi/aacraid/sa.c +++ b/drivers/scsi/aacraid/sa.c | |||
@@ -31,7 +31,6 @@ | |||
31 | #include <linux/kernel.h> | 31 | #include <linux/kernel.h> |
32 | #include <linux/init.h> | 32 | #include <linux/init.h> |
33 | #include <linux/types.h> | 33 | #include <linux/types.h> |
34 | #include <linux/pci.h> | ||
35 | #include <linux/spinlock.h> | 34 | #include <linux/spinlock.h> |
36 | #include <linux/slab.h> | 35 | #include <linux/slab.h> |
37 | #include <linux/blkdev.h> | 36 | #include <linux/blkdev.h> |
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c index 1d239f6c0103..cbbfbc9f3e0f 100644 --- a/drivers/scsi/aha1542.c +++ b/drivers/scsi/aha1542.c | |||
@@ -35,7 +35,6 @@ | |||
35 | #include <linux/proc_fs.h> | 35 | #include <linux/proc_fs.h> |
36 | #include <linux/init.h> | 36 | #include <linux/init.h> |
37 | #include <linux/spinlock.h> | 37 | #include <linux/spinlock.h> |
38 | #include <linux/pci.h> | ||
39 | #include <linux/isapnp.h> | 38 | #include <linux/isapnp.h> |
40 | #include <linux/blkdev.h> | 39 | #include <linux/blkdev.h> |
41 | #include <linux/mca.h> | 40 | #include <linux/mca.h> |
diff --git a/drivers/scsi/aic94xx/aic94xx_scb.c b/drivers/scsi/aic94xx/aic94xx_scb.c index 8f43ff772f23..db6ab1a3b81e 100644 --- a/drivers/scsi/aic94xx/aic94xx_scb.c +++ b/drivers/scsi/aic94xx/aic94xx_scb.c | |||
@@ -24,7 +24,6 @@ | |||
24 | * | 24 | * |
25 | */ | 25 | */ |
26 | 26 | ||
27 | #include <linux/pci.h> | ||
28 | #include <scsi/scsi_host.h> | 27 | #include <scsi/scsi_host.h> |
29 | 28 | ||
30 | #include "aic94xx.h" | 29 | #include "aic94xx.h" |
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c index 12497da5529d..03bfed61bffc 100644 --- a/drivers/scsi/arcmsr/arcmsr_attr.c +++ b/drivers/scsi/arcmsr/arcmsr_attr.c | |||
@@ -49,7 +49,6 @@ | |||
49 | #include <linux/init.h> | 49 | #include <linux/init.h> |
50 | #include <linux/errno.h> | 50 | #include <linux/errno.h> |
51 | #include <linux/delay.h> | 51 | #include <linux/delay.h> |
52 | #include <linux/pci.h> | ||
53 | 52 | ||
54 | #include <scsi/scsi_cmnd.h> | 53 | #include <scsi/scsi_cmnd.h> |
55 | #include <scsi/scsi_device.h> | 54 | #include <scsi/scsi_device.h> |
diff --git a/drivers/scsi/atari_NCR5380.c b/drivers/scsi/atari_NCR5380.c index 0f920c84ac0f..eff846ae0aff 100644 --- a/drivers/scsi/atari_NCR5380.c +++ b/drivers/scsi/atari_NCR5380.c | |||
@@ -1,19 +1,19 @@ | |||
1 | /* | 1 | /* |
2 | * NCR 5380 generic driver routines. These should make it *trivial* | 2 | * NCR 5380 generic driver routines. These should make it *trivial* |
3 | * to implement 5380 SCSI drivers under Linux with a non-trantor | 3 | * to implement 5380 SCSI drivers under Linux with a non-trantor |
4 | * architecture. | 4 | * architecture. |
5 | * | 5 | * |
6 | * Note that these routines also work with NR53c400 family chips. | 6 | * Note that these routines also work with NR53c400 family chips. |
7 | * | 7 | * |
8 | * Copyright 1993, Drew Eckhardt | 8 | * Copyright 1993, Drew Eckhardt |
9 | * Visionary Computing | 9 | * Visionary Computing |
10 | * (Unix and Linux consulting and custom programming) | 10 | * (Unix and Linux consulting and custom programming) |
11 | * drew@colorado.edu | 11 | * drew@colorado.edu |
12 | * +1 (303) 666-5836 | 12 | * +1 (303) 666-5836 |
13 | * | 13 | * |
14 | * DISTRIBUTION RELEASE 6. | 14 | * DISTRIBUTION RELEASE 6. |
15 | * | 15 | * |
16 | * For more information, please consult | 16 | * For more information, please consult |
17 | * | 17 | * |
18 | * NCR 5380 Family | 18 | * NCR 5380 Family |
19 | * SCSI Protocol Controller | 19 | * SCSI Protocol Controller |
@@ -57,7 +57,7 @@ | |||
57 | * - I've deleted all the stuff for AUTOPROBE_IRQ, REAL_DMA_POLL, PSEUDO_DMA | 57 | * - I've deleted all the stuff for AUTOPROBE_IRQ, REAL_DMA_POLL, PSEUDO_DMA |
58 | * and USLEEP, because these were messing up readability and will never be | 58 | * and USLEEP, because these were messing up readability and will never be |
59 | * needed for Atari SCSI. | 59 | * needed for Atari SCSI. |
60 | * | 60 | * |
61 | * - I've revised the NCR5380_main() calling scheme (relax the 'main_running' | 61 | * - I've revised the NCR5380_main() calling scheme (relax the 'main_running' |
62 | * stuff), and 'main' is executed in a bottom half if awoken by an | 62 | * stuff), and 'main' is executed in a bottom half if awoken by an |
63 | * interrupt. | 63 | * interrupt. |
@@ -69,21 +69,29 @@ | |||
69 | */ | 69 | */ |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * Further development / testing that should be done : | 72 | * Further development / testing that should be done : |
73 | * 1. Test linked command handling code after Eric is ready with | 73 | * 1. Test linked command handling code after Eric is ready with |
74 | * the high level code. | 74 | * the high level code. |
75 | */ | 75 | */ |
76 | #include <scsi/scsi_dbg.h> | 76 | #include <scsi/scsi_dbg.h> |
77 | #include <scsi/scsi_transport_spi.h> | 77 | #include <scsi/scsi_transport_spi.h> |
78 | 78 | ||
79 | #if (NDEBUG & NDEBUG_LISTS) | 79 | #if (NDEBUG & NDEBUG_LISTS) |
80 | #define LIST(x,y) \ | 80 | #define LIST(x, y) \ |
81 | { printk("LINE:%d Adding %p to %p\n", __LINE__, (void*)(x), (void*)(y)); \ | 81 | do { \ |
82 | if ((x)==(y)) udelay(5); } | 82 | printk("LINE:%d Adding %p to %p\n", \ |
83 | #define REMOVE(w,x,y,z) \ | 83 | __LINE__, (void*)(x), (void*)(y)); \ |
84 | { printk("LINE:%d Removing: %p->%p %p->%p \n", __LINE__, \ | 84 | if ((x) == (y)) \ |
85 | (void*)(w), (void*)(x), (void*)(y), (void*)(z)); \ | 85 | udelay(5); \ |
86 | if ((x)==(y)) udelay(5); } | 86 | } while (0) |
87 | #define REMOVE(w, x, y, z) \ | ||
88 | do { \ | ||
89 | printk("LINE:%d Removing: %p->%p %p->%p \n", \ | ||
90 | __LINE__, (void*)(w), (void*)(x), \ | ||
91 | (void*)(y), (void*)(z)); \ | ||
92 | if ((x) == (y)) \ | ||
93 | udelay(5); \ | ||
94 | } while (0) | ||
87 | #else | 95 | #else |
88 | #define LIST(x,y) | 96 | #define LIST(x,y) |
89 | #define REMOVE(w,x,y,z) | 97 | #define REMOVE(w,x,y,z) |
@@ -103,62 +111,62 @@ | |||
103 | * more difficult than it has to be. | 111 | * more difficult than it has to be. |
104 | * | 112 | * |
105 | * Also, many of the SCSI drivers were written before the command queuing | 113 | * Also, many of the SCSI drivers were written before the command queuing |
106 | * routines were implemented, meaning their implementations of queued | 114 | * routines were implemented, meaning their implementations of queued |
107 | * commands were hacked on rather than designed in from the start. | 115 | * commands were hacked on rather than designed in from the start. |
108 | * | 116 | * |
109 | * When I designed the Linux SCSI drivers I figured that | 117 | * When I designed the Linux SCSI drivers I figured that |
110 | * while having two different SCSI boards in a system might be useful | 118 | * while having two different SCSI boards in a system might be useful |
111 | * for debugging things, two of the same type wouldn't be used. | 119 | * for debugging things, two of the same type wouldn't be used. |
112 | * Well, I was wrong and a number of users have mailed me about running | 120 | * Well, I was wrong and a number of users have mailed me about running |
113 | * multiple high-performance SCSI boards in a server. | 121 | * multiple high-performance SCSI boards in a server. |
114 | * | 122 | * |
115 | * Finally, when I get questions from users, I have no idea what | 123 | * Finally, when I get questions from users, I have no idea what |
116 | * revision of my driver they are running. | 124 | * revision of my driver they are running. |
117 | * | 125 | * |
118 | * This driver attempts to address these problems : | 126 | * This driver attempts to address these problems : |
119 | * This is a generic 5380 driver. To use it on a different platform, | 127 | * This is a generic 5380 driver. To use it on a different platform, |
120 | * one simply writes appropriate system specific macros (ie, data | 128 | * one simply writes appropriate system specific macros (ie, data |
121 | * transfer - some PC's will use the I/O bus, 68K's must use | 129 | * transfer - some PC's will use the I/O bus, 68K's must use |
122 | * memory mapped) and drops this file in their 'C' wrapper. | 130 | * memory mapped) and drops this file in their 'C' wrapper. |
123 | * | 131 | * |
124 | * As far as command queueing, two queues are maintained for | 132 | * As far as command queueing, two queues are maintained for |
125 | * each 5380 in the system - commands that haven't been issued yet, | 133 | * each 5380 in the system - commands that haven't been issued yet, |
126 | * and commands that are currently executing. This means that an | 134 | * and commands that are currently executing. This means that an |
127 | * unlimited number of commands may be queued, letting | 135 | * unlimited number of commands may be queued, letting |
128 | * more commands propagate from the higher driver levels giving higher | 136 | * more commands propagate from the higher driver levels giving higher |
129 | * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported, | 137 | * throughput. Note that both I_T_L and I_T_L_Q nexuses are supported, |
130 | * allowing multiple commands to propagate all the way to a SCSI-II device | 138 | * allowing multiple commands to propagate all the way to a SCSI-II device |
131 | * while a command is already executing. | 139 | * while a command is already executing. |
132 | * | 140 | * |
133 | * To solve the multiple-boards-in-the-same-system problem, | 141 | * To solve the multiple-boards-in-the-same-system problem, |
134 | * there is a separate instance structure for each instance | 142 | * there is a separate instance structure for each instance |
135 | * of a 5380 in the system. So, multiple NCR5380 drivers will | 143 | * of a 5380 in the system. So, multiple NCR5380 drivers will |
136 | * be able to coexist with appropriate changes to the high level | 144 | * be able to coexist with appropriate changes to the high level |
137 | * SCSI code. | 145 | * SCSI code. |
138 | * | 146 | * |
139 | * A NCR5380_PUBLIC_REVISION macro is provided, with the release | 147 | * A NCR5380_PUBLIC_REVISION macro is provided, with the release |
140 | * number (updated for each public release) printed by the | 148 | * number (updated for each public release) printed by the |
141 | * NCR5380_print_options command, which should be called from the | 149 | * NCR5380_print_options command, which should be called from the |
142 | * wrapper detect function, so that I know what release of the driver | 150 | * wrapper detect function, so that I know what release of the driver |
143 | * users are using. | 151 | * users are using. |
144 | * | 152 | * |
145 | * Issues specific to the NCR5380 : | 153 | * Issues specific to the NCR5380 : |
146 | * | 154 | * |
147 | * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead | 155 | * When used in a PIO or pseudo-dma mode, the NCR5380 is a braindead |
148 | * piece of hardware that requires you to sit in a loop polling for | 156 | * piece of hardware that requires you to sit in a loop polling for |
149 | * the REQ signal as long as you are connected. Some devices are | 157 | * the REQ signal as long as you are connected. Some devices are |
150 | * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect | 158 | * brain dead (ie, many TEXEL CD ROM drives) and won't disconnect |
151 | * while doing long seek operations. | 159 | * while doing long seek operations. |
152 | * | 160 | * |
153 | * The workaround for this is to keep track of devices that have | 161 | * The workaround for this is to keep track of devices that have |
154 | * disconnected. If the device hasn't disconnected, for commands that | 162 | * disconnected. If the device hasn't disconnected, for commands that |
155 | * should disconnect, we do something like | 163 | * should disconnect, we do something like |
156 | * | 164 | * |
157 | * while (!REQ is asserted) { sleep for N usecs; poll for M usecs } | 165 | * while (!REQ is asserted) { sleep for N usecs; poll for M usecs } |
158 | * | 166 | * |
159 | * Some tweaking of N and M needs to be done. An algorithm based | 167 | * Some tweaking of N and M needs to be done. An algorithm based |
160 | * on "time to data" would give the best results as long as short time | 168 | * on "time to data" would give the best results as long as short time |
161 | * to datas (ie, on the same track) were considered, however these | 169 | * to datas (ie, on the same track) were considered, however these |
162 | * broken devices are the exception rather than the rule and I'd rather | 170 | * broken devices are the exception rather than the rule and I'd rather |
163 | * spend my time optimizing for the normal case. | 171 | * spend my time optimizing for the normal case. |
164 | * | 172 | * |
@@ -167,9 +175,9 @@ | |||
167 | * At the heart of the design is a coroutine, NCR5380_main, | 175 | * At the heart of the design is a coroutine, NCR5380_main, |
168 | * which is started when not running by the interrupt handler, | 176 | * which is started when not running by the interrupt handler, |
169 | * timer, and queue command function. It attempts to establish | 177 | * timer, and queue command function. It attempts to establish |
170 | * I_T_L or I_T_L_Q nexuses by removing the commands from the | 178 | * I_T_L or I_T_L_Q nexuses by removing the commands from the |
171 | * issue queue and calling NCR5380_select() if a nexus | 179 | * issue queue and calling NCR5380_select() if a nexus |
172 | * is not established. | 180 | * is not established. |
173 | * | 181 | * |
174 | * Once a nexus is established, the NCR5380_information_transfer() | 182 | * Once a nexus is established, the NCR5380_information_transfer() |
175 | * phase goes through the various phases as instructed by the target. | 183 | * phase goes through the various phases as instructed by the target. |
@@ -183,10 +191,10 @@ | |||
183 | * calling NCR5380_intr() which will in turn call NCR5380_reselect | 191 | * calling NCR5380_intr() which will in turn call NCR5380_reselect |
184 | * to reestablish a nexus. This will run main if necessary. | 192 | * to reestablish a nexus. This will run main if necessary. |
185 | * | 193 | * |
186 | * On command termination, the done function will be called as | 194 | * On command termination, the done function will be called as |
187 | * appropriate. | 195 | * appropriate. |
188 | * | 196 | * |
189 | * SCSI pointers are maintained in the SCp field of SCSI command | 197 | * SCSI pointers are maintained in the SCp field of SCSI command |
190 | * structures, being initialized after the command is connected | 198 | * structures, being initialized after the command is connected |
191 | * in NCR5380_select, and set as appropriate in NCR5380_information_transfer. | 199 | * in NCR5380_select, and set as appropriate in NCR5380_information_transfer. |
192 | * Note that in violation of the standard, an implicit SAVE POINTERS operation | 200 | * Note that in violation of the standard, an implicit SAVE POINTERS operation |
@@ -196,12 +204,12 @@ | |||
196 | /* | 204 | /* |
197 | * Using this file : | 205 | * Using this file : |
198 | * This file a skeleton Linux SCSI driver for the NCR 5380 series | 206 | * This file a skeleton Linux SCSI driver for the NCR 5380 series |
199 | * of chips. To use it, you write an architecture specific functions | 207 | * of chips. To use it, you write an architecture specific functions |
200 | * and macros and include this file in your driver. | 208 | * and macros and include this file in your driver. |
201 | * | 209 | * |
202 | * These macros control options : | 210 | * These macros control options : |
203 | * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically | 211 | * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically |
204 | * for commands that return with a CHECK CONDITION status. | 212 | * for commands that return with a CHECK CONDITION status. |
205 | * | 213 | * |
206 | * LINKED - if defined, linked commands are supported. | 214 | * LINKED - if defined, linked commands are supported. |
207 | * | 215 | * |
@@ -210,18 +218,18 @@ | |||
210 | * SUPPORT_TAGS - if defined, SCSI-2 tagged queuing is used where possible | 218 | * SUPPORT_TAGS - if defined, SCSI-2 tagged queuing is used where possible |
211 | * | 219 | * |
212 | * These macros MUST be defined : | 220 | * These macros MUST be defined : |
213 | * | 221 | * |
214 | * NCR5380_read(register) - read from the specified register | 222 | * NCR5380_read(register) - read from the specified register |
215 | * | 223 | * |
216 | * NCR5380_write(register, value) - write to the specific register | 224 | * NCR5380_write(register, value) - write to the specific register |
217 | * | 225 | * |
218 | * Either real DMA *or* pseudo DMA may be implemented | 226 | * Either real DMA *or* pseudo DMA may be implemented |
219 | * REAL functions : | 227 | * REAL functions : |
220 | * NCR5380_REAL_DMA should be defined if real DMA is to be used. | 228 | * NCR5380_REAL_DMA should be defined if real DMA is to be used. |
221 | * Note that the DMA setup functions should return the number of bytes | 229 | * Note that the DMA setup functions should return the number of bytes |
222 | * that they were able to program the controller for. | 230 | * that they were able to program the controller for. |
223 | * | 231 | * |
224 | * Also note that generic i386/PC versions of these macros are | 232 | * Also note that generic i386/PC versions of these macros are |
225 | * available as NCR5380_i386_dma_write_setup, | 233 | * available as NCR5380_i386_dma_write_setup, |
226 | * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual. | 234 | * NCR5380_i386_dma_read_setup, and NCR5380_i386_dma_residual. |
227 | * | 235 | * |
@@ -234,14 +242,14 @@ | |||
234 | * NCR5380_pread(instance, dst, count); | 242 | * NCR5380_pread(instance, dst, count); |
235 | * | 243 | * |
236 | * If nothing specific to this implementation needs doing (ie, with external | 244 | * If nothing specific to this implementation needs doing (ie, with external |
237 | * hardware), you must also define | 245 | * hardware), you must also define |
238 | * | 246 | * |
239 | * NCR5380_queue_command | 247 | * NCR5380_queue_command |
240 | * NCR5380_reset | 248 | * NCR5380_reset |
241 | * NCR5380_abort | 249 | * NCR5380_abort |
242 | * NCR5380_proc_info | 250 | * NCR5380_proc_info |
243 | * | 251 | * |
244 | * to be the global entry points into the specific driver, ie | 252 | * to be the global entry points into the specific driver, ie |
245 | * #define NCR5380_queue_command t128_queue_command. | 253 | * #define NCR5380_queue_command t128_queue_command. |
246 | * | 254 | * |
247 | * If this is not done, the routines will be defined as static functions | 255 | * If this is not done, the routines will be defined as static functions |
@@ -249,7 +257,7 @@ | |||
249 | * accessible wrapper function. | 257 | * accessible wrapper function. |
250 | * | 258 | * |
251 | * The generic driver is initialized by calling NCR5380_init(instance), | 259 | * The generic driver is initialized by calling NCR5380_init(instance), |
252 | * after setting the appropriate host specific fields and ID. If the | 260 | * after setting the appropriate host specific fields and ID. If the |
253 | * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, | 261 | * driver wishes to autoprobe for an IRQ line, the NCR5380_probe_irq(instance, |
254 | * possible) function may be used. Before the specific driver initialization | 262 | * possible) function may be used. Before the specific driver initialization |
255 | * code finishes, NCR5380_print_options should be called. | 263 | * code finishes, NCR5380_print_options should be called. |
@@ -264,8 +272,9 @@ static struct scsi_host_template *the_template = NULL; | |||
264 | (struct NCR5380_hostdata *)(in)->hostdata | 272 | (struct NCR5380_hostdata *)(in)->hostdata |
265 | #define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata) | 273 | #define HOSTDATA(in) ((struct NCR5380_hostdata *)(in)->hostdata) |
266 | 274 | ||
267 | #define NEXT(cmd) ((Scsi_Cmnd *)((cmd)->host_scribble)) | 275 | #define NEXT(cmd) ((Scsi_Cmnd *)(cmd)->host_scribble) |
268 | #define NEXTADDR(cmd) ((Scsi_Cmnd **)&((cmd)->host_scribble)) | 276 | #define SET_NEXT(cmd,next) ((cmd)->host_scribble = (void *)(next)) |
277 | #define NEXTADDR(cmd) ((Scsi_Cmnd **)&(cmd)->host_scribble) | ||
269 | 278 | ||
270 | #define HOSTNO instance->host_no | 279 | #define HOSTNO instance->host_no |
271 | #define H_NO(cmd) (cmd)->device->host->host_no | 280 | #define H_NO(cmd) (cmd)->device->host->host_no |
@@ -312,34 +321,34 @@ static struct scsi_host_template *the_template = NULL; | |||
312 | #define TAG_NONE 0xff | 321 | #define TAG_NONE 0xff |
313 | 322 | ||
314 | typedef struct { | 323 | typedef struct { |
315 | DECLARE_BITMAP(allocated, MAX_TAGS); | 324 | DECLARE_BITMAP(allocated, MAX_TAGS); |
316 | int nr_allocated; | 325 | int nr_allocated; |
317 | int queue_size; | 326 | int queue_size; |
318 | } TAG_ALLOC; | 327 | } TAG_ALLOC; |
319 | 328 | ||
320 | static TAG_ALLOC TagAlloc[8][8]; /* 8 targets and 8 LUNs */ | 329 | static TAG_ALLOC TagAlloc[8][8]; /* 8 targets and 8 LUNs */ |
321 | 330 | ||
322 | 331 | ||
323 | static void __init init_tags( void ) | 332 | static void __init init_tags(void) |
324 | { | 333 | { |
325 | int target, lun; | 334 | int target, lun; |
326 | TAG_ALLOC *ta; | 335 | TAG_ALLOC *ta; |
327 | 336 | ||
328 | if (!setup_use_tagged_queuing) | 337 | if (!setup_use_tagged_queuing) |
329 | return; | 338 | return; |
330 | 339 | ||
331 | for( target = 0; target < 8; ++target ) { | 340 | for (target = 0; target < 8; ++target) { |
332 | for( lun = 0; lun < 8; ++lun ) { | 341 | for (lun = 0; lun < 8; ++lun) { |
333 | ta = &TagAlloc[target][lun]; | 342 | ta = &TagAlloc[target][lun]; |
334 | bitmap_zero(ta->allocated, MAX_TAGS); | 343 | bitmap_zero(ta->allocated, MAX_TAGS); |
335 | ta->nr_allocated = 0; | 344 | ta->nr_allocated = 0; |
336 | /* At the beginning, assume the maximum queue size we could | 345 | /* At the beginning, assume the maximum queue size we could |
337 | * support (MAX_TAGS). This value will be decreased if the target | 346 | * support (MAX_TAGS). This value will be decreased if the target |
338 | * returns QUEUE_FULL status. | 347 | * returns QUEUE_FULL status. |
339 | */ | 348 | */ |
340 | ta->queue_size = MAX_TAGS; | 349 | ta->queue_size = MAX_TAGS; |
350 | } | ||
341 | } | 351 | } |
342 | } | ||
343 | } | 352 | } |
344 | 353 | ||
345 | 354 | ||
@@ -348,24 +357,24 @@ static void __init init_tags( void ) | |||
348 | * check that there is a free tag and the target's queue won't overflow. This | 357 | * check that there is a free tag and the target's queue won't overflow. This |
349 | * function should be called with interrupts disabled to avoid race | 358 | * function should be called with interrupts disabled to avoid race |
350 | * conditions. | 359 | * conditions. |
351 | */ | 360 | */ |
352 | 361 | ||
353 | static int is_lun_busy( Scsi_Cmnd *cmd, int should_be_tagged ) | 362 | static int is_lun_busy(Scsi_Cmnd *cmd, int should_be_tagged) |
354 | { | 363 | { |
355 | SETUP_HOSTDATA(cmd->device->host); | 364 | SETUP_HOSTDATA(cmd->device->host); |
356 | 365 | ||
357 | if (hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)) | 366 | if (hostdata->busy[cmd->device->id] & (1 << cmd->device->lun)) |
358 | return( 1 ); | 367 | return 1; |
359 | if (!should_be_tagged || | 368 | if (!should_be_tagged || |
360 | !setup_use_tagged_queuing || !cmd->device->tagged_supported) | 369 | !setup_use_tagged_queuing || !cmd->device->tagged_supported) |
361 | return( 0 ); | 370 | return 0; |
362 | if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >= | 371 | if (TagAlloc[cmd->device->id][cmd->device->lun].nr_allocated >= |
363 | TagAlloc[cmd->device->id][cmd->device->lun].queue_size ) { | 372 | TagAlloc[cmd->device->id][cmd->device->lun].queue_size) { |
364 | TAG_PRINTK( "scsi%d: target %d lun %d: no free tags\n", | 373 | TAG_PRINTK("scsi%d: target %d lun %d: no free tags\n", |
365 | H_NO(cmd), cmd->device->id, cmd->device->lun ); | 374 | H_NO(cmd), cmd->device->id, cmd->device->lun); |
366 | return( 1 ); | 375 | return 1; |
367 | } | 376 | } |
368 | return( 0 ); | 377 | return 0; |
369 | } | 378 | } |
370 | 379 | ||
371 | 380 | ||
@@ -374,31 +383,30 @@ static int is_lun_busy( Scsi_Cmnd *cmd, int should_be_tagged ) | |||
374 | * untagged. | 383 | * untagged. |
375 | */ | 384 | */ |
376 | 385 | ||
377 | static void cmd_get_tag( Scsi_Cmnd *cmd, int should_be_tagged ) | 386 | static void cmd_get_tag(Scsi_Cmnd *cmd, int should_be_tagged) |
378 | { | 387 | { |
379 | SETUP_HOSTDATA(cmd->device->host); | 388 | SETUP_HOSTDATA(cmd->device->host); |
380 | 389 | ||
381 | /* If we or the target don't support tagged queuing, allocate the LUN for | 390 | /* If we or the target don't support tagged queuing, allocate the LUN for |
382 | * an untagged command. | 391 | * an untagged command. |
383 | */ | 392 | */ |
384 | if (!should_be_tagged || | 393 | if (!should_be_tagged || |
385 | !setup_use_tagged_queuing || !cmd->device->tagged_supported) { | 394 | !setup_use_tagged_queuing || !cmd->device->tagged_supported) { |
386 | cmd->tag = TAG_NONE; | 395 | cmd->tag = TAG_NONE; |
387 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); | 396 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); |
388 | TAG_PRINTK( "scsi%d: target %d lun %d now allocated by untagged " | 397 | TAG_PRINTK("scsi%d: target %d lun %d now allocated by untagged " |
389 | "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun ); | 398 | "command\n", H_NO(cmd), cmd->device->id, cmd->device->lun); |
390 | } | 399 | } else { |
391 | else { | 400 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; |
392 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; | 401 | |
393 | 402 | cmd->tag = find_first_zero_bit(ta->allocated, MAX_TAGS); | |
394 | cmd->tag = find_first_zero_bit( ta->allocated, MAX_TAGS ); | 403 | set_bit(cmd->tag, ta->allocated); |
395 | set_bit( cmd->tag, ta->allocated ); | 404 | ta->nr_allocated++; |
396 | ta->nr_allocated++; | 405 | TAG_PRINTK("scsi%d: using tag %d for target %d lun %d " |
397 | TAG_PRINTK( "scsi%d: using tag %d for target %d lun %d " | 406 | "(now %d tags in use)\n", |
398 | "(now %d tags in use)\n", | 407 | H_NO(cmd), cmd->tag, cmd->device->id, |
399 | H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun, | 408 | cmd->device->lun, ta->nr_allocated); |
400 | ta->nr_allocated ); | 409 | } |
401 | } | ||
402 | } | 410 | } |
403 | 411 | ||
404 | 412 | ||
@@ -406,44 +414,42 @@ static void cmd_get_tag( Scsi_Cmnd *cmd, int should_be_tagged ) | |||
406 | * unlock the LUN. | 414 | * unlock the LUN. |
407 | */ | 415 | */ |
408 | 416 | ||
409 | static void cmd_free_tag( Scsi_Cmnd *cmd ) | 417 | static void cmd_free_tag(Scsi_Cmnd *cmd) |
410 | { | 418 | { |
411 | SETUP_HOSTDATA(cmd->device->host); | 419 | SETUP_HOSTDATA(cmd->device->host); |
412 | 420 | ||
413 | if (cmd->tag == TAG_NONE) { | 421 | if (cmd->tag == TAG_NONE) { |
414 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); | 422 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); |
415 | TAG_PRINTK( "scsi%d: target %d lun %d untagged cmd finished\n", | 423 | TAG_PRINTK("scsi%d: target %d lun %d untagged cmd finished\n", |
416 | H_NO(cmd), cmd->device->id, cmd->device->lun ); | 424 | H_NO(cmd), cmd->device->id, cmd->device->lun); |
417 | } | 425 | } else if (cmd->tag >= MAX_TAGS) { |
418 | else if (cmd->tag >= MAX_TAGS) { | 426 | printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", |
419 | printk(KERN_NOTICE "scsi%d: trying to free bad tag %d!\n", | 427 | H_NO(cmd), cmd->tag); |
420 | H_NO(cmd), cmd->tag ); | 428 | } else { |
421 | } | 429 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; |
422 | else { | 430 | clear_bit(cmd->tag, ta->allocated); |
423 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; | 431 | ta->nr_allocated--; |
424 | clear_bit( cmd->tag, ta->allocated ); | 432 | TAG_PRINTK("scsi%d: freed tag %d for target %d lun %d\n", |
425 | ta->nr_allocated--; | 433 | H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun); |
426 | TAG_PRINTK( "scsi%d: freed tag %d for target %d lun %d\n", | 434 | } |
427 | H_NO(cmd), cmd->tag, cmd->device->id, cmd->device->lun ); | ||
428 | } | ||
429 | } | 435 | } |
430 | 436 | ||
431 | 437 | ||
432 | static void free_all_tags( void ) | 438 | static void free_all_tags(void) |
433 | { | 439 | { |
434 | int target, lun; | 440 | int target, lun; |
435 | TAG_ALLOC *ta; | 441 | TAG_ALLOC *ta; |
436 | 442 | ||
437 | if (!setup_use_tagged_queuing) | 443 | if (!setup_use_tagged_queuing) |
438 | return; | 444 | return; |
439 | 445 | ||
440 | for( target = 0; target < 8; ++target ) { | 446 | for (target = 0; target < 8; ++target) { |
441 | for( lun = 0; lun < 8; ++lun ) { | 447 | for (lun = 0; lun < 8; ++lun) { |
442 | ta = &TagAlloc[target][lun]; | 448 | ta = &TagAlloc[target][lun]; |
443 | bitmap_zero(ta->allocated, MAX_TAGS); | 449 | bitmap_zero(ta->allocated, MAX_TAGS); |
444 | ta->nr_allocated = 0; | 450 | ta->nr_allocated = 0; |
451 | } | ||
445 | } | 452 | } |
446 | } | ||
447 | } | 453 | } |
448 | 454 | ||
449 | #endif /* SUPPORT_TAGS */ | 455 | #endif /* SUPPORT_TAGS */ |
@@ -461,89 +467,94 @@ static void free_all_tags( void ) | |||
461 | * assumed to be already transfered into ptr/this_residual. | 467 | * assumed to be already transfered into ptr/this_residual. |
462 | */ | 468 | */ |
463 | 469 | ||
464 | static void merge_contiguous_buffers( Scsi_Cmnd *cmd ) | 470 | static void merge_contiguous_buffers(Scsi_Cmnd *cmd) |
465 | { | 471 | { |
466 | unsigned long endaddr; | 472 | unsigned long endaddr; |
467 | #if (NDEBUG & NDEBUG_MERGING) | 473 | #if (NDEBUG & NDEBUG_MERGING) |
468 | unsigned long oldlen = cmd->SCp.this_residual; | 474 | unsigned long oldlen = cmd->SCp.this_residual; |
469 | int cnt = 1; | 475 | int cnt = 1; |
470 | #endif | 476 | #endif |
471 | 477 | ||
472 | for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1; | 478 | for (endaddr = virt_to_phys(cmd->SCp.ptr + cmd->SCp.this_residual - 1) + 1; |
473 | cmd->SCp.buffers_residual && | 479 | cmd->SCp.buffers_residual && |
474 | virt_to_phys(page_address(cmd->SCp.buffer[1].page)+ | 480 | virt_to_phys(page_address(cmd->SCp.buffer[1].page) + |
475 | cmd->SCp.buffer[1].offset) == endaddr; ) { | 481 | cmd->SCp.buffer[1].offset) == endaddr;) { |
476 | MER_PRINTK("VTOP(%p) == %08lx -> merging\n", | 482 | MER_PRINTK("VTOP(%p) == %08lx -> merging\n", |
477 | cmd->SCp.buffer[1].address, endaddr); | 483 | page_address(cmd->SCp.buffer[1].page), endaddr); |
478 | #if (NDEBUG & NDEBUG_MERGING) | 484 | #if (NDEBUG & NDEBUG_MERGING) |
479 | ++cnt; | 485 | ++cnt; |
480 | #endif | 486 | #endif |
481 | ++cmd->SCp.buffer; | 487 | ++cmd->SCp.buffer; |
482 | --cmd->SCp.buffers_residual; | 488 | --cmd->SCp.buffers_residual; |
483 | cmd->SCp.this_residual += cmd->SCp.buffer->length; | 489 | cmd->SCp.this_residual += cmd->SCp.buffer->length; |
484 | endaddr += cmd->SCp.buffer->length; | 490 | endaddr += cmd->SCp.buffer->length; |
485 | } | 491 | } |
486 | #if (NDEBUG & NDEBUG_MERGING) | 492 | #if (NDEBUG & NDEBUG_MERGING) |
487 | if (oldlen != cmd->SCp.this_residual) | 493 | if (oldlen != cmd->SCp.this_residual) |
488 | MER_PRINTK("merged %d buffers from %p, new length %08x\n", | 494 | MER_PRINTK("merged %d buffers from %p, new length %08x\n", |
489 | cnt, cmd->SCp.ptr, cmd->SCp.this_residual); | 495 | cnt, cmd->SCp.ptr, cmd->SCp.this_residual); |
490 | #endif | 496 | #endif |
491 | } | 497 | } |
492 | 498 | ||
493 | /* | 499 | /* |
494 | * Function : void initialize_SCp(Scsi_Cmnd *cmd) | 500 | * Function : void initialize_SCp(Scsi_Cmnd *cmd) |
495 | * | 501 | * |
496 | * Purpose : initialize the saved data pointers for cmd to point to the | 502 | * Purpose : initialize the saved data pointers for cmd to point to the |
497 | * start of the buffer. | 503 | * start of the buffer. |
498 | * | 504 | * |
499 | * Inputs : cmd - Scsi_Cmnd structure to have pointers reset. | 505 | * Inputs : cmd - Scsi_Cmnd structure to have pointers reset. |
500 | */ | 506 | */ |
501 | 507 | ||
502 | static __inline__ void initialize_SCp(Scsi_Cmnd *cmd) | 508 | static inline void initialize_SCp(Scsi_Cmnd *cmd) |
503 | { | 509 | { |
504 | /* | 510 | /* |
505 | * Initialize the Scsi Pointer field so that all of the commands in the | 511 | * Initialize the Scsi Pointer field so that all of the commands in the |
506 | * various queues are valid. | 512 | * various queues are valid. |
507 | */ | ||
508 | |||
509 | if (cmd->use_sg) { | ||
510 | cmd->SCp.buffer = (struct scatterlist *) cmd->request_buffer; | ||
511 | cmd->SCp.buffers_residual = cmd->use_sg - 1; | ||
512 | cmd->SCp.ptr = (char *)page_address(cmd->SCp.buffer->page)+ | ||
513 | cmd->SCp.buffer->offset; | ||
514 | cmd->SCp.this_residual = cmd->SCp.buffer->length; | ||
515 | /* ++roman: Try to merge some scatter-buffers if they are at | ||
516 | * contiguous physical addresses. | ||
517 | */ | 513 | */ |
518 | merge_contiguous_buffers( cmd ); | 514 | |
519 | } else { | 515 | if (cmd->use_sg) { |
520 | cmd->SCp.buffer = NULL; | 516 | cmd->SCp.buffer = (struct scatterlist *)cmd->request_buffer; |
521 | cmd->SCp.buffers_residual = 0; | 517 | cmd->SCp.buffers_residual = cmd->use_sg - 1; |
522 | cmd->SCp.ptr = (char *) cmd->request_buffer; | 518 | cmd->SCp.ptr = (char *)page_address(cmd->SCp.buffer->page) + |
523 | cmd->SCp.this_residual = cmd->request_bufflen; | 519 | cmd->SCp.buffer->offset; |
524 | } | 520 | cmd->SCp.this_residual = cmd->SCp.buffer->length; |
521 | /* ++roman: Try to merge some scatter-buffers if they are at | ||
522 | * contiguous physical addresses. | ||
523 | */ | ||
524 | merge_contiguous_buffers(cmd); | ||
525 | } else { | ||
526 | cmd->SCp.buffer = NULL; | ||
527 | cmd->SCp.buffers_residual = 0; | ||
528 | cmd->SCp.ptr = (char *)cmd->request_buffer; | ||
529 | cmd->SCp.this_residual = cmd->request_bufflen; | ||
530 | } | ||
525 | } | 531 | } |
526 | 532 | ||
527 | #include <linux/delay.h> | 533 | #include <linux/delay.h> |
528 | 534 | ||
529 | #if NDEBUG | 535 | #if NDEBUG |
530 | static struct { | 536 | static struct { |
531 | unsigned char mask; | 537 | unsigned char mask; |
532 | const char * name;} | 538 | const char *name; |
533 | signals[] = {{ SR_DBP, "PARITY"}, { SR_RST, "RST" }, { SR_BSY, "BSY" }, | 539 | } signals[] = { |
534 | { SR_REQ, "REQ" }, { SR_MSG, "MSG" }, { SR_CD, "CD" }, { SR_IO, "IO" }, | 540 | { SR_DBP, "PARITY"}, { SR_RST, "RST" }, { SR_BSY, "BSY" }, |
535 | { SR_SEL, "SEL" }, {0, NULL}}, | 541 | { SR_REQ, "REQ" }, { SR_MSG, "MSG" }, { SR_CD, "CD" }, { SR_IO, "IO" }, |
536 | basrs[] = {{BASR_ATN, "ATN"}, {BASR_ACK, "ACK"}, {0, NULL}}, | 542 | { SR_SEL, "SEL" }, {0, NULL} |
537 | icrs[] = {{ICR_ASSERT_RST, "ASSERT RST"},{ICR_ASSERT_ACK, "ASSERT ACK"}, | 543 | }, basrs[] = { |
538 | {ICR_ASSERT_BSY, "ASSERT BSY"}, {ICR_ASSERT_SEL, "ASSERT SEL"}, | 544 | {BASR_ATN, "ATN"}, {BASR_ACK, "ACK"}, {0, NULL} |
539 | {ICR_ASSERT_ATN, "ASSERT ATN"}, {ICR_ASSERT_DATA, "ASSERT DATA"}, | 545 | }, icrs[] = { |
540 | {0, NULL}}, | 546 | {ICR_ASSERT_RST, "ASSERT RST"},{ICR_ASSERT_ACK, "ASSERT ACK"}, |
541 | mrs[] = {{MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, {MR_TARGET, "MODE TARGET"}, | 547 | {ICR_ASSERT_BSY, "ASSERT BSY"}, {ICR_ASSERT_SEL, "ASSERT SEL"}, |
542 | {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, {MR_ENABLE_PAR_INTR, | 548 | {ICR_ASSERT_ATN, "ASSERT ATN"}, {ICR_ASSERT_DATA, "ASSERT DATA"}, |
543 | "MODE PARITY INTR"}, {MR_ENABLE_EOP_INTR,"MODE EOP INTR"}, | 549 | {0, NULL} |
544 | {MR_MONITOR_BSY, "MODE MONITOR BSY"}, | 550 | }, mrs[] = { |
545 | {MR_DMA_MODE, "MODE DMA"}, {MR_ARBITRATE, "MODE ARBITRATION"}, | 551 | {MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, {MR_TARGET, "MODE TARGET"}, |
546 | {0, NULL}}; | 552 | {MR_ENABLE_PAR_CHECK, "MODE PARITY CHECK"}, {MR_ENABLE_PAR_INTR, |
553 | "MODE PARITY INTR"}, {MR_ENABLE_EOP_INTR,"MODE EOP INTR"}, | ||
554 | {MR_MONITOR_BSY, "MODE MONITOR BSY"}, | ||
555 | {MR_DMA_MODE, "MODE DMA"}, {MR_ARBITRATE, "MODE ARBITRATION"}, | ||
556 | {0, NULL} | ||
557 | }; | ||
547 | 558 | ||
548 | /* | 559 | /* |
549 | * Function : void NCR5380_print(struct Scsi_Host *instance) | 560 | * Function : void NCR5380_print(struct Scsi_Host *instance) |
@@ -553,45 +564,47 @@ mrs[] = {{MR_BLOCK_DMA_MODE, "MODE BLOCK DMA"}, {MR_TARGET, "MODE TARGET"}, | |||
553 | * Input : instance - which NCR5380 | 564 | * Input : instance - which NCR5380 |
554 | */ | 565 | */ |
555 | 566 | ||
556 | static void NCR5380_print(struct Scsi_Host *instance) { | 567 | static void NCR5380_print(struct Scsi_Host *instance) |
557 | unsigned char status, data, basr, mr, icr, i; | 568 | { |
558 | unsigned long flags; | 569 | unsigned char status, data, basr, mr, icr, i; |
559 | 570 | unsigned long flags; | |
560 | local_irq_save(flags); | 571 | |
561 | data = NCR5380_read(CURRENT_SCSI_DATA_REG); | 572 | local_irq_save(flags); |
562 | status = NCR5380_read(STATUS_REG); | 573 | data = NCR5380_read(CURRENT_SCSI_DATA_REG); |
563 | mr = NCR5380_read(MODE_REG); | 574 | status = NCR5380_read(STATUS_REG); |
564 | icr = NCR5380_read(INITIATOR_COMMAND_REG); | 575 | mr = NCR5380_read(MODE_REG); |
565 | basr = NCR5380_read(BUS_AND_STATUS_REG); | 576 | icr = NCR5380_read(INITIATOR_COMMAND_REG); |
566 | local_irq_restore(flags); | 577 | basr = NCR5380_read(BUS_AND_STATUS_REG); |
567 | printk("STATUS_REG: %02x ", status); | 578 | local_irq_restore(flags); |
568 | for (i = 0; signals[i].mask ; ++i) | 579 | printk("STATUS_REG: %02x ", status); |
569 | if (status & signals[i].mask) | 580 | for (i = 0; signals[i].mask; ++i) |
570 | printk(",%s", signals[i].name); | 581 | if (status & signals[i].mask) |
571 | printk("\nBASR: %02x ", basr); | 582 | printk(",%s", signals[i].name); |
572 | for (i = 0; basrs[i].mask ; ++i) | 583 | printk("\nBASR: %02x ", basr); |
573 | if (basr & basrs[i].mask) | 584 | for (i = 0; basrs[i].mask; ++i) |
574 | printk(",%s", basrs[i].name); | 585 | if (basr & basrs[i].mask) |
575 | printk("\nICR: %02x ", icr); | 586 | printk(",%s", basrs[i].name); |
576 | for (i = 0; icrs[i].mask; ++i) | 587 | printk("\nICR: %02x ", icr); |
577 | if (icr & icrs[i].mask) | 588 | for (i = 0; icrs[i].mask; ++i) |
578 | printk(",%s", icrs[i].name); | 589 | if (icr & icrs[i].mask) |
579 | printk("\nMODE: %02x ", mr); | 590 | printk(",%s", icrs[i].name); |
580 | for (i = 0; mrs[i].mask; ++i) | 591 | printk("\nMODE: %02x ", mr); |
581 | if (mr & mrs[i].mask) | 592 | for (i = 0; mrs[i].mask; ++i) |
582 | printk(",%s", mrs[i].name); | 593 | if (mr & mrs[i].mask) |
583 | printk("\n"); | 594 | printk(",%s", mrs[i].name); |
595 | printk("\n"); | ||
584 | } | 596 | } |
585 | 597 | ||
586 | static struct { | 598 | static struct { |
587 | unsigned char value; | 599 | unsigned char value; |
588 | const char *name; | 600 | const char *name; |
589 | } phases[] = { | 601 | } phases[] = { |
590 | {PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"}, | 602 | {PHASE_DATAOUT, "DATAOUT"}, {PHASE_DATAIN, "DATAIN"}, {PHASE_CMDOUT, "CMDOUT"}, |
591 | {PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"}, | 603 | {PHASE_STATIN, "STATIN"}, {PHASE_MSGOUT, "MSGOUT"}, {PHASE_MSGIN, "MSGIN"}, |
592 | {PHASE_UNKNOWN, "UNKNOWN"}}; | 604 | {PHASE_UNKNOWN, "UNKNOWN"} |
605 | }; | ||
593 | 606 | ||
594 | /* | 607 | /* |
595 | * Function : void NCR5380_print_phase(struct Scsi_Host *instance) | 608 | * Function : void NCR5380_print_phase(struct Scsi_Host *instance) |
596 | * | 609 | * |
597 | * Purpose : print the current SCSI phase for debugging purposes | 610 | * Purpose : print the current SCSI phase for debugging purposes |
@@ -601,30 +614,35 @@ static struct { | |||
601 | 614 | ||
602 | static void NCR5380_print_phase(struct Scsi_Host *instance) | 615 | static void NCR5380_print_phase(struct Scsi_Host *instance) |
603 | { | 616 | { |
604 | unsigned char status; | 617 | unsigned char status; |
605 | int i; | 618 | int i; |
606 | 619 | ||
607 | status = NCR5380_read(STATUS_REG); | 620 | status = NCR5380_read(STATUS_REG); |
608 | if (!(status & SR_REQ)) | 621 | if (!(status & SR_REQ)) |
609 | printk(KERN_DEBUG "scsi%d: REQ not asserted, phase unknown.\n", HOSTNO); | 622 | printk(KERN_DEBUG "scsi%d: REQ not asserted, phase unknown.\n", HOSTNO); |
610 | else { | 623 | else { |
611 | for (i = 0; (phases[i].value != PHASE_UNKNOWN) && | 624 | for (i = 0; (phases[i].value != PHASE_UNKNOWN) && |
612 | (phases[i].value != (status & PHASE_MASK)); ++i); | 625 | (phases[i].value != (status & PHASE_MASK)); ++i) |
613 | printk(KERN_DEBUG "scsi%d: phase %s\n", HOSTNO, phases[i].name); | 626 | ; |
614 | } | 627 | printk(KERN_DEBUG "scsi%d: phase %s\n", HOSTNO, phases[i].name); |
628 | } | ||
615 | } | 629 | } |
616 | 630 | ||
617 | #else /* !NDEBUG */ | 631 | #else /* !NDEBUG */ |
618 | 632 | ||
619 | /* dummies... */ | 633 | /* dummies... */ |
620 | __inline__ void NCR5380_print(struct Scsi_Host *instance) { }; | 634 | static inline void NCR5380_print(struct Scsi_Host *instance) |
621 | __inline__ void NCR5380_print_phase(struct Scsi_Host *instance) { }; | 635 | { |
636 | }; | ||
637 | static inline void NCR5380_print_phase(struct Scsi_Host *instance) | ||
638 | { | ||
639 | }; | ||
622 | 640 | ||
623 | #endif | 641 | #endif |
624 | 642 | ||
625 | /* | 643 | /* |
626 | * ++roman: New scheme of calling NCR5380_main() | 644 | * ++roman: New scheme of calling NCR5380_main() |
627 | * | 645 | * |
628 | * If we're not in an interrupt, we can call our main directly, it cannot be | 646 | * If we're not in an interrupt, we can call our main directly, it cannot be |
629 | * already running. Else, we queue it on a task queue, if not 'main_running' | 647 | * already running. Else, we queue it on a task queue, if not 'main_running' |
630 | * tells us that a lower level is already executing it. This way, | 648 | * tells us that a lower level is already executing it. This way, |
@@ -638,33 +656,33 @@ __inline__ void NCR5380_print_phase(struct Scsi_Host *instance) { }; | |||
638 | #include <linux/workqueue.h> | 656 | #include <linux/workqueue.h> |
639 | #include <linux/interrupt.h> | 657 | #include <linux/interrupt.h> |
640 | 658 | ||
641 | static volatile int main_running = 0; | 659 | static volatile int main_running; |
642 | static DECLARE_WORK(NCR5380_tqueue, (void (*)(void*))NCR5380_main, NULL); | 660 | static DECLARE_WORK(NCR5380_tqueue, NCR5380_main); |
643 | 661 | ||
644 | static __inline__ void queue_main(void) | 662 | static inline void queue_main(void) |
645 | { | 663 | { |
646 | if (!main_running) { | 664 | if (!main_running) { |
647 | /* If in interrupt and NCR5380_main() not already running, | 665 | /* If in interrupt and NCR5380_main() not already running, |
648 | queue it on the 'immediate' task queue, to be processed | 666 | queue it on the 'immediate' task queue, to be processed |
649 | immediately after the current interrupt processing has | 667 | immediately after the current interrupt processing has |
650 | finished. */ | 668 | finished. */ |
651 | schedule_work(&NCR5380_tqueue); | 669 | schedule_work(&NCR5380_tqueue); |
652 | } | 670 | } |
653 | /* else: nothing to do: the running NCR5380_main() will pick up | 671 | /* else: nothing to do: the running NCR5380_main() will pick up |
654 | any newly queued command. */ | 672 | any newly queued command. */ |
655 | } | 673 | } |
656 | 674 | ||
657 | 675 | ||
658 | static inline void NCR5380_all_init (void) | 676 | static inline void NCR5380_all_init(void) |
659 | { | 677 | { |
660 | static int done = 0; | 678 | static int done = 0; |
661 | if (!done) { | 679 | if (!done) { |
662 | INI_PRINTK("scsi : NCR5380_all_init()\n"); | 680 | INI_PRINTK("scsi : NCR5380_all_init()\n"); |
663 | done = 1; | 681 | done = 1; |
664 | } | 682 | } |
665 | } | 683 | } |
666 | 684 | ||
667 | 685 | ||
668 | /* | 686 | /* |
669 | * Function : void NCR58380_print_options (struct Scsi_Host *instance) | 687 | * Function : void NCR58380_print_options (struct Scsi_Host *instance) |
670 | * | 688 | * |
@@ -674,23 +692,23 @@ static inline void NCR5380_all_init (void) | |||
674 | * Inputs : instance, pointer to this instance. Unused. | 692 | * Inputs : instance, pointer to this instance. Unused. |
675 | */ | 693 | */ |
676 | 694 | ||
677 | static void __init NCR5380_print_options (struct Scsi_Host *instance) | 695 | static void __init NCR5380_print_options(struct Scsi_Host *instance) |
678 | { | 696 | { |
679 | printk(" generic options" | 697 | printk(" generic options" |
680 | #ifdef AUTOSENSE | 698 | #ifdef AUTOSENSE |
681 | " AUTOSENSE" | 699 | " AUTOSENSE" |
682 | #endif | 700 | #endif |
683 | #ifdef REAL_DMA | 701 | #ifdef REAL_DMA |
684 | " REAL DMA" | 702 | " REAL DMA" |
685 | #endif | 703 | #endif |
686 | #ifdef PARITY | 704 | #ifdef PARITY |
687 | " PARITY" | 705 | " PARITY" |
688 | #endif | 706 | #endif |
689 | #ifdef SUPPORT_TAGS | 707 | #ifdef SUPPORT_TAGS |
690 | " SCSI-2 TAGGED QUEUING" | 708 | " SCSI-2 TAGGED QUEUING" |
691 | #endif | 709 | #endif |
692 | ); | 710 | ); |
693 | printk(" generic release=%d", NCR5380_PUBLIC_RELEASE); | 711 | printk(" generic release=%d", NCR5380_PUBLIC_RELEASE); |
694 | } | 712 | } |
695 | 713 | ||
696 | /* | 714 | /* |
@@ -699,27 +717,27 @@ static void __init NCR5380_print_options (struct Scsi_Host *instance) | |||
699 | * Purpose : print commands in the various queues, called from | 717 | * Purpose : print commands in the various queues, called from |
700 | * NCR5380_abort and NCR5380_debug to aid debugging. | 718 | * NCR5380_abort and NCR5380_debug to aid debugging. |
701 | * | 719 | * |
702 | * Inputs : instance, pointer to this instance. | 720 | * Inputs : instance, pointer to this instance. |
703 | */ | 721 | */ |
704 | 722 | ||
705 | static void NCR5380_print_status (struct Scsi_Host *instance) | 723 | static void NCR5380_print_status(struct Scsi_Host *instance) |
706 | { | 724 | { |
707 | char *pr_bfr; | 725 | char *pr_bfr; |
708 | char *start; | 726 | char *start; |
709 | int len; | 727 | int len; |
710 | 728 | ||
711 | NCR_PRINT(NDEBUG_ANY); | 729 | NCR_PRINT(NDEBUG_ANY); |
712 | NCR_PRINT_PHASE(NDEBUG_ANY); | 730 | NCR_PRINT_PHASE(NDEBUG_ANY); |
713 | 731 | ||
714 | pr_bfr = (char *) __get_free_page(GFP_ATOMIC); | 732 | pr_bfr = (char *)__get_free_page(GFP_ATOMIC); |
715 | if (!pr_bfr) { | 733 | if (!pr_bfr) { |
716 | printk("NCR5380_print_status: no memory for print buffer\n"); | 734 | printk("NCR5380_print_status: no memory for print buffer\n"); |
717 | return; | 735 | return; |
718 | } | 736 | } |
719 | len = NCR5380_proc_info(pr_bfr, &start, 0, PAGE_SIZE, HOSTNO, 0); | 737 | len = NCR5380_proc_info(instance, pr_bfr, &start, 0, PAGE_SIZE, 0); |
720 | pr_bfr[len] = 0; | 738 | pr_bfr[len] = 0; |
721 | printk("\n%s\n", pr_bfr); | 739 | printk("\n%s\n", pr_bfr); |
722 | free_page((unsigned long) pr_bfr); | 740 | free_page((unsigned long)pr_bfr); |
723 | } | 741 | } |
724 | 742 | ||
725 | 743 | ||
@@ -738,443 +756,478 @@ static void NCR5380_print_status (struct Scsi_Host *instance) | |||
738 | */ | 756 | */ |
739 | 757 | ||
740 | #undef SPRINTF | 758 | #undef SPRINTF |
741 | #define SPRINTF(fmt,args...) \ | 759 | #define SPRINTF(fmt,args...) \ |
742 | do { if (pos + strlen(fmt) + 20 /* slop */ < buffer + length) \ | 760 | do { \ |
743 | pos += sprintf(pos, fmt , ## args); } while(0) | 761 | if (pos + strlen(fmt) + 20 /* slop */ < buffer + length) \ |
744 | static | 762 | pos += sprintf(pos, fmt , ## args); \ |
745 | char *lprint_Scsi_Cmnd (Scsi_Cmnd *cmd, char *pos, char *buffer, int length); | 763 | } while(0) |
746 | 764 | static char *lprint_Scsi_Cmnd(Scsi_Cmnd *cmd, char *pos, char *buffer, int length); | |
747 | static | 765 | |
748 | int NCR5380_proc_info (struct Scsi_Host *instance, char *buffer, char **start, off_t offset, | 766 | static int NCR5380_proc_info(struct Scsi_Host *instance, char *buffer, |
749 | int length, int inout) | 767 | char **start, off_t offset, int length, int inout) |
750 | { | 768 | { |
751 | char *pos = buffer; | 769 | char *pos = buffer; |
752 | struct NCR5380_hostdata *hostdata; | 770 | struct NCR5380_hostdata *hostdata; |
753 | Scsi_Cmnd *ptr; | 771 | Scsi_Cmnd *ptr; |
754 | unsigned long flags; | 772 | unsigned long flags; |
755 | off_t begin = 0; | 773 | off_t begin = 0; |
756 | #define check_offset() \ | 774 | #define check_offset() \ |
757 | do { \ | 775 | do { \ |
758 | if (pos - buffer < offset - begin) { \ | 776 | if (pos - buffer < offset - begin) { \ |
759 | begin += pos - buffer; \ | 777 | begin += pos - buffer; \ |
760 | pos = buffer; \ | 778 | pos = buffer; \ |
761 | } \ | 779 | } \ |
762 | } while (0) | 780 | } while (0) |
763 | 781 | ||
764 | hostdata = (struct NCR5380_hostdata *)instance->hostdata; | 782 | hostdata = (struct NCR5380_hostdata *)instance->hostdata; |
765 | 783 | ||
766 | if (inout) { /* Has data been written to the file ? */ | 784 | if (inout) /* Has data been written to the file ? */ |
767 | return(-ENOSYS); /* Currently this is a no-op */ | 785 | return -ENOSYS; /* Currently this is a no-op */ |
768 | } | 786 | SPRINTF("NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE); |
769 | SPRINTF("NCR5380 core release=%d.\n", NCR5380_PUBLIC_RELEASE); | ||
770 | check_offset(); | ||
771 | local_irq_save(flags); | ||
772 | SPRINTF("NCR5380: coroutine is%s running.\n", main_running ? "" : "n't"); | ||
773 | check_offset(); | ||
774 | if (!hostdata->connected) | ||
775 | SPRINTF("scsi%d: no currently connected command\n", HOSTNO); | ||
776 | else | ||
777 | pos = lprint_Scsi_Cmnd ((Scsi_Cmnd *) hostdata->connected, | ||
778 | pos, buffer, length); | ||
779 | SPRINTF("scsi%d: issue_queue\n", HOSTNO); | ||
780 | check_offset(); | ||
781 | for (ptr = (Scsi_Cmnd *) hostdata->issue_queue; ptr; ptr = NEXT(ptr)) { | ||
782 | pos = lprint_Scsi_Cmnd (ptr, pos, buffer, length); | ||
783 | check_offset(); | 787 | check_offset(); |
784 | } | 788 | local_irq_save(flags); |
789 | SPRINTF("NCR5380: coroutine is%s running.\n", | ||
790 | main_running ? "" : "n't"); | ||
791 | check_offset(); | ||
792 | if (!hostdata->connected) | ||
793 | SPRINTF("scsi%d: no currently connected command\n", HOSTNO); | ||
794 | else | ||
795 | pos = lprint_Scsi_Cmnd((Scsi_Cmnd *) hostdata->connected, | ||
796 | pos, buffer, length); | ||
797 | SPRINTF("scsi%d: issue_queue\n", HOSTNO); | ||
798 | check_offset(); | ||
799 | for (ptr = (Scsi_Cmnd *)hostdata->issue_queue; ptr; ptr = NEXT(ptr)) { | ||
800 | pos = lprint_Scsi_Cmnd(ptr, pos, buffer, length); | ||
801 | check_offset(); | ||
802 | } | ||
785 | 803 | ||
786 | SPRINTF("scsi%d: disconnected_queue\n", HOSTNO); | 804 | SPRINTF("scsi%d: disconnected_queue\n", HOSTNO); |
787 | check_offset(); | ||
788 | for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; | ||
789 | ptr = NEXT(ptr)) { | ||
790 | pos = lprint_Scsi_Cmnd (ptr, pos, buffer, length); | ||
791 | check_offset(); | 805 | check_offset(); |
792 | } | 806 | for (ptr = (Scsi_Cmnd *) hostdata->disconnected_queue; ptr; |
807 | ptr = NEXT(ptr)) { | ||
808 | pos = lprint_Scsi_Cmnd(ptr, pos, buffer, length); | ||
809 | check_offset(); | ||
810 | } | ||
793 | 811 | ||
794 | local_irq_restore(flags); | 812 | local_irq_restore(flags); |
795 | *start = buffer + (offset - begin); | 813 | *start = buffer + (offset - begin); |
796 | if (pos - buffer < offset - begin) | 814 | if (pos - buffer < offset - begin) |
797 | return 0; | 815 | return 0; |
798 | else if (pos - buffer - (offset - begin) < length) | 816 | else if (pos - buffer - (offset - begin) < length) |
799 | return pos - buffer - (offset - begin); | 817 | return pos - buffer - (offset - begin); |
800 | return length; | 818 | return length; |
801 | } | 819 | } |
802 | 820 | ||
803 | static char * | 821 | static char *lprint_Scsi_Cmnd(Scsi_Cmnd *cmd, char *pos, char *buffer, int length) |
804 | lprint_Scsi_Cmnd (Scsi_Cmnd *cmd, char *pos, char *buffer, int length) | ||
805 | { | 822 | { |
806 | int i, s; | 823 | int i, s; |
807 | unsigned char *command; | 824 | unsigned char *command; |
808 | SPRINTF("scsi%d: destination target %d, lun %d\n", | 825 | SPRINTF("scsi%d: destination target %d, lun %d\n", |
809 | H_NO(cmd), cmd->device->id, cmd->device->lun); | 826 | H_NO(cmd), cmd->device->id, cmd->device->lun); |
810 | SPRINTF(" command = "); | 827 | SPRINTF(" command = "); |
811 | command = cmd->cmnd; | 828 | command = cmd->cmnd; |
812 | SPRINTF("%2d (0x%02x)", command[0], command[0]); | 829 | SPRINTF("%2d (0x%02x)", command[0], command[0]); |
813 | for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) | 830 | for (i = 1, s = COMMAND_SIZE(command[0]); i < s; ++i) |
814 | SPRINTF(" %02x", command[i]); | 831 | SPRINTF(" %02x", command[i]); |
815 | SPRINTF("\n"); | 832 | SPRINTF("\n"); |
816 | return pos; | 833 | return pos; |
817 | } | 834 | } |
818 | 835 | ||
819 | 836 | ||
820 | /* | 837 | /* |
821 | * Function : void NCR5380_init (struct Scsi_Host *instance) | 838 | * Function : void NCR5380_init (struct Scsi_Host *instance) |
822 | * | 839 | * |
823 | * Purpose : initializes *instance and corresponding 5380 chip. | 840 | * Purpose : initializes *instance and corresponding 5380 chip. |
824 | * | 841 | * |
825 | * Inputs : instance - instantiation of the 5380 driver. | 842 | * Inputs : instance - instantiation of the 5380 driver. |
826 | * | 843 | * |
827 | * Notes : I assume that the host, hostno, and id bits have been | 844 | * Notes : I assume that the host, hostno, and id bits have been |
828 | * set correctly. I don't care about the irq and other fields. | 845 | * set correctly. I don't care about the irq and other fields. |
829 | * | 846 | * |
830 | */ | 847 | */ |
831 | 848 | ||
832 | static int NCR5380_init (struct Scsi_Host *instance, int flags) | 849 | static int NCR5380_init(struct Scsi_Host *instance, int flags) |
833 | { | 850 | { |
834 | int i; | 851 | int i; |
835 | SETUP_HOSTDATA(instance); | 852 | SETUP_HOSTDATA(instance); |
836 | 853 | ||
837 | NCR5380_all_init(); | 854 | NCR5380_all_init(); |
838 | 855 | ||
839 | hostdata->aborted = 0; | 856 | hostdata->aborted = 0; |
840 | hostdata->id_mask = 1 << instance->this_id; | 857 | hostdata->id_mask = 1 << instance->this_id; |
841 | hostdata->id_higher_mask = 0; | 858 | hostdata->id_higher_mask = 0; |
842 | for (i = hostdata->id_mask; i <= 0x80; i <<= 1) | 859 | for (i = hostdata->id_mask; i <= 0x80; i <<= 1) |
843 | if (i > hostdata->id_mask) | 860 | if (i > hostdata->id_mask) |
844 | hostdata->id_higher_mask |= i; | 861 | hostdata->id_higher_mask |= i; |
845 | for (i = 0; i < 8; ++i) | 862 | for (i = 0; i < 8; ++i) |
846 | hostdata->busy[i] = 0; | 863 | hostdata->busy[i] = 0; |
847 | #ifdef SUPPORT_TAGS | 864 | #ifdef SUPPORT_TAGS |
848 | init_tags(); | 865 | init_tags(); |
849 | #endif | 866 | #endif |
850 | #if defined (REAL_DMA) | 867 | #if defined (REAL_DMA) |
851 | hostdata->dma_len = 0; | 868 | hostdata->dma_len = 0; |
852 | #endif | 869 | #endif |
853 | hostdata->targets_present = 0; | 870 | hostdata->targets_present = 0; |
854 | hostdata->connected = NULL; | 871 | hostdata->connected = NULL; |
855 | hostdata->issue_queue = NULL; | 872 | hostdata->issue_queue = NULL; |
856 | hostdata->disconnected_queue = NULL; | 873 | hostdata->disconnected_queue = NULL; |
857 | hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT; | 874 | hostdata->flags = FLAG_CHECK_LAST_BYTE_SENT; |
858 | 875 | ||
859 | if (!the_template) { | 876 | if (!the_template) { |
860 | the_template = instance->hostt; | 877 | the_template = instance->hostt; |
861 | first_instance = instance; | 878 | first_instance = instance; |
862 | } | 879 | } |
863 | |||
864 | 880 | ||
865 | #ifndef AUTOSENSE | 881 | #ifndef AUTOSENSE |
866 | if ((instance->cmd_per_lun > 1) || (instance->can_queue > 1)) | 882 | if ((instance->cmd_per_lun > 1) || (instance->can_queue > 1)) |
867 | printk("scsi%d: WARNING : support for multiple outstanding commands enabled\n" | 883 | printk("scsi%d: WARNING : support for multiple outstanding commands enabled\n" |
868 | " without AUTOSENSE option, contingent allegiance conditions may\n" | 884 | " without AUTOSENSE option, contingent allegiance conditions may\n" |
869 | " be incorrectly cleared.\n", HOSTNO); | 885 | " be incorrectly cleared.\n", HOSTNO); |
870 | #endif /* def AUTOSENSE */ | 886 | #endif /* def AUTOSENSE */ |
871 | 887 | ||
872 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 888 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
873 | NCR5380_write(MODE_REG, MR_BASE); | 889 | NCR5380_write(MODE_REG, MR_BASE); |
874 | NCR5380_write(TARGET_COMMAND_REG, 0); | 890 | NCR5380_write(TARGET_COMMAND_REG, 0); |
875 | NCR5380_write(SELECT_ENABLE_REG, 0); | 891 | NCR5380_write(SELECT_ENABLE_REG, 0); |
876 | 892 | ||
877 | return 0; | 893 | return 0; |
878 | } | 894 | } |
879 | 895 | ||
880 | /* | 896 | /* |
881 | * Function : int NCR5380_queue_command (Scsi_Cmnd *cmd, | 897 | * our own old-style timeout update |
882 | * void (*done)(Scsi_Cmnd *)) | 898 | */ |
899 | /* | ||
900 | * The strategy is to cause the timer code to call scsi_times_out() | ||
901 | * when the soonest timeout is pending. | ||
902 | * The arguments are used when we are queueing a new command, because | ||
903 | * we do not want to subtract the time used from this time, but when we | ||
904 | * set the timer, we want to take this value into account. | ||
905 | */ | ||
906 | |||
907 | int atari_scsi_update_timeout(Scsi_Cmnd * SCset, int timeout) | ||
908 | { | ||
909 | int rtn; | ||
910 | |||
911 | /* | ||
912 | * We are using the new error handling code to actually register/deregister | ||
913 | * timers for timeout. | ||
914 | */ | ||
915 | |||
916 | if (!timer_pending(&SCset->eh_timeout)) | ||
917 | rtn = 0; | ||
918 | else | ||
919 | rtn = SCset->eh_timeout.expires - jiffies; | ||
920 | |||
921 | if (timeout == 0) { | ||
922 | del_timer(&SCset->eh_timeout); | ||
923 | SCset->eh_timeout.data = (unsigned long)NULL; | ||
924 | SCset->eh_timeout.expires = 0; | ||
925 | } else { | ||
926 | if (SCset->eh_timeout.data != (unsigned long)NULL) | ||
927 | del_timer(&SCset->eh_timeout); | ||
928 | SCset->eh_timeout.data = (unsigned long)SCset; | ||
929 | SCset->eh_timeout.expires = jiffies + timeout; | ||
930 | add_timer(&SCset->eh_timeout); | ||
931 | } | ||
932 | return rtn; | ||
933 | } | ||
934 | |||
935 | /* | ||
936 | * Function : int NCR5380_queue_command (Scsi_Cmnd *cmd, | ||
937 | * void (*done)(Scsi_Cmnd *)) | ||
883 | * | 938 | * |
884 | * Purpose : enqueues a SCSI command | 939 | * Purpose : enqueues a SCSI command |
885 | * | 940 | * |
886 | * Inputs : cmd - SCSI command, done - function called on completion, with | 941 | * Inputs : cmd - SCSI command, done - function called on completion, with |
887 | * a pointer to the command descriptor. | 942 | * a pointer to the command descriptor. |
888 | * | 943 | * |
889 | * Returns : 0 | 944 | * Returns : 0 |
890 | * | 945 | * |
891 | * Side effects : | 946 | * Side effects : |
892 | * cmd is added to the per instance issue_queue, with minor | 947 | * cmd is added to the per instance issue_queue, with minor |
893 | * twiddling done to the host specific fields of cmd. If the | 948 | * twiddling done to the host specific fields of cmd. If the |
894 | * main coroutine is not running, it is restarted. | 949 | * main coroutine is not running, it is restarted. |
895 | * | 950 | * |
896 | */ | 951 | */ |
897 | 952 | ||
898 | static | 953 | static int NCR5380_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) |
899 | int NCR5380_queue_command (Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) | ||
900 | { | 954 | { |
901 | SETUP_HOSTDATA(cmd->device->host); | 955 | SETUP_HOSTDATA(cmd->device->host); |
902 | Scsi_Cmnd *tmp; | 956 | Scsi_Cmnd *tmp; |
903 | int oldto; | 957 | int oldto; |
904 | unsigned long flags; | 958 | unsigned long flags; |
905 | extern int update_timeout(Scsi_Cmnd * SCset, int timeout); | 959 | // extern int update_timeout(Scsi_Cmnd * SCset, int timeout); |
906 | 960 | ||
907 | #if (NDEBUG & NDEBUG_NO_WRITE) | 961 | #if (NDEBUG & NDEBUG_NO_WRITE) |
908 | switch (cmd->cmnd[0]) { | 962 | switch (cmd->cmnd[0]) { |
909 | case WRITE_6: | 963 | case WRITE_6: |
910 | case WRITE_10: | 964 | case WRITE_10: |
911 | printk(KERN_NOTICE "scsi%d: WRITE attempted with NO_WRITE debugging flag set\n", | 965 | printk(KERN_NOTICE "scsi%d: WRITE attempted with NO_WRITE debugging flag set\n", |
912 | H_NO(cmd)); | 966 | H_NO(cmd)); |
913 | cmd->result = (DID_ERROR << 16); | 967 | cmd->result = (DID_ERROR << 16); |
914 | done(cmd); | 968 | done(cmd); |
915 | return 0; | 969 | return 0; |
916 | } | 970 | } |
917 | #endif /* (NDEBUG & NDEBUG_NO_WRITE) */ | 971 | #endif /* (NDEBUG & NDEBUG_NO_WRITE) */ |
918 | 972 | ||
919 | |||
920 | #ifdef NCR5380_STATS | 973 | #ifdef NCR5380_STATS |
921 | # if 0 | 974 | # if 0 |
922 | if (!hostdata->connected && !hostdata->issue_queue && | 975 | if (!hostdata->connected && !hostdata->issue_queue && |
923 | !hostdata->disconnected_queue) { | 976 | !hostdata->disconnected_queue) { |
924 | hostdata->timebase = jiffies; | 977 | hostdata->timebase = jiffies; |
925 | } | 978 | } |
926 | # endif | 979 | # endif |
927 | # ifdef NCR5380_STAT_LIMIT | 980 | # ifdef NCR5380_STAT_LIMIT |
928 | if (cmd->request_bufflen > NCR5380_STAT_LIMIT) | 981 | if (cmd->request_bufflen > NCR5380_STAT_LIMIT) |
929 | # endif | 982 | # endif |
930 | switch (cmd->cmnd[0]) | 983 | switch (cmd->cmnd[0]) { |
931 | { | 984 | case WRITE: |
932 | case WRITE: | 985 | case WRITE_6: |
933 | case WRITE_6: | 986 | case WRITE_10: |
934 | case WRITE_10: | 987 | hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase); |
935 | hostdata->time_write[cmd->device->id] -= (jiffies - hostdata->timebase); | 988 | hostdata->bytes_write[cmd->device->id] += cmd->request_bufflen; |
936 | hostdata->bytes_write[cmd->device->id] += cmd->request_bufflen; | 989 | hostdata->pendingw++; |
937 | hostdata->pendingw++; | 990 | break; |
938 | break; | 991 | case READ: |
939 | case READ: | 992 | case READ_6: |
940 | case READ_6: | 993 | case READ_10: |
941 | case READ_10: | 994 | hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase); |
942 | hostdata->time_read[cmd->device->id] -= (jiffies - hostdata->timebase); | 995 | hostdata->bytes_read[cmd->device->id] += cmd->request_bufflen; |
943 | hostdata->bytes_read[cmd->device->id] += cmd->request_bufflen; | 996 | hostdata->pendingr++; |
944 | hostdata->pendingr++; | 997 | break; |
945 | break; | 998 | } |
946 | } | ||
947 | #endif | 999 | #endif |
948 | 1000 | ||
949 | /* | 1001 | /* |
950 | * We use the host_scribble field as a pointer to the next command | 1002 | * We use the host_scribble field as a pointer to the next command |
951 | * in a queue | 1003 | * in a queue |
952 | */ | 1004 | */ |
953 | 1005 | ||
954 | NEXT(cmd) = NULL; | 1006 | SET_NEXT(cmd, NULL); |
955 | cmd->scsi_done = done; | 1007 | cmd->scsi_done = done; |
956 | 1008 | ||
957 | cmd->result = 0; | 1009 | cmd->result = 0; |
958 | 1010 | ||
959 | 1011 | /* | |
960 | /* | 1012 | * Insert the cmd into the issue queue. Note that REQUEST SENSE |
961 | * Insert the cmd into the issue queue. Note that REQUEST SENSE | 1013 | * commands are added to the head of the queue since any command will |
962 | * commands are added to the head of the queue since any command will | 1014 | * clear the contingent allegiance condition that exists and the |
963 | * clear the contingent allegiance condition that exists and the | 1015 | * sense data is only guaranteed to be valid while the condition exists. |
964 | * sense data is only guaranteed to be valid while the condition exists. | 1016 | */ |
965 | */ | 1017 | |
966 | 1018 | local_irq_save(flags); | |
967 | local_irq_save(flags); | 1019 | /* ++guenther: now that the issue queue is being set up, we can lock ST-DMA. |
968 | /* ++guenther: now that the issue queue is being set up, we can lock ST-DMA. | 1020 | * Otherwise a running NCR5380_main may steal the lock. |
969 | * Otherwise a running NCR5380_main may steal the lock. | 1021 | * Lock before actually inserting due to fairness reasons explained in |
970 | * Lock before actually inserting due to fairness reasons explained in | 1022 | * atari_scsi.c. If we insert first, then it's impossible for this driver |
971 | * atari_scsi.c. If we insert first, then it's impossible for this driver | 1023 | * to release the lock. |
972 | * to release the lock. | 1024 | * Stop timer for this command while waiting for the lock, or timeouts |
973 | * Stop timer for this command while waiting for the lock, or timeouts | 1025 | * may happen (and they really do), and it's no good if the command doesn't |
974 | * may happen (and they really do), and it's no good if the command doesn't | 1026 | * appear in any of the queues. |
975 | * appear in any of the queues. | 1027 | * ++roman: Just disabling the NCR interrupt isn't sufficient here, |
976 | * ++roman: Just disabling the NCR interrupt isn't sufficient here, | 1028 | * because also a timer int can trigger an abort or reset, which would |
977 | * because also a timer int can trigger an abort or reset, which would | 1029 | * alter queues and touch the lock. |
978 | * alter queues and touch the lock. | 1030 | */ |
979 | */ | 1031 | if (!IS_A_TT()) { |
980 | if (!IS_A_TT()) { | 1032 | oldto = atari_scsi_update_timeout(cmd, 0); |
981 | oldto = update_timeout(cmd, 0); | 1033 | falcon_get_lock(); |
982 | falcon_get_lock(); | 1034 | atari_scsi_update_timeout(cmd, oldto); |
983 | update_timeout(cmd, oldto); | 1035 | } |
984 | } | 1036 | if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) { |
985 | if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) { | 1037 | LIST(cmd, hostdata->issue_queue); |
986 | LIST(cmd, hostdata->issue_queue); | 1038 | SET_NEXT(cmd, hostdata->issue_queue); |
987 | NEXT(cmd) = hostdata->issue_queue; | 1039 | hostdata->issue_queue = cmd; |
988 | hostdata->issue_queue = cmd; | 1040 | } else { |
989 | } else { | 1041 | for (tmp = (Scsi_Cmnd *)hostdata->issue_queue; |
990 | for (tmp = (Scsi_Cmnd *)hostdata->issue_queue; | 1042 | NEXT(tmp); tmp = NEXT(tmp)) |
991 | NEXT(tmp); tmp = NEXT(tmp)) | 1043 | ; |
992 | ; | 1044 | LIST(cmd, tmp); |
993 | LIST(cmd, tmp); | 1045 | SET_NEXT(tmp, cmd); |
994 | NEXT(tmp) = cmd; | 1046 | } |
995 | } | 1047 | local_irq_restore(flags); |
996 | local_irq_restore(flags); | 1048 | |
997 | 1049 | QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd), | |
998 | QU_PRINTK("scsi%d: command added to %s of queue\n", H_NO(cmd), | 1050 | (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); |
999 | (cmd->cmnd[0] == REQUEST_SENSE) ? "head" : "tail"); | 1051 | |
1000 | 1052 | /* If queue_command() is called from an interrupt (real one or bottom | |
1001 | /* If queue_command() is called from an interrupt (real one or bottom | 1053 | * half), we let queue_main() do the job of taking care about main. If it |
1002 | * half), we let queue_main() do the job of taking care about main. If it | 1054 | * is already running, this is a no-op, else main will be queued. |
1003 | * is already running, this is a no-op, else main will be queued. | 1055 | * |
1004 | * | 1056 | * If we're not in an interrupt, we can call NCR5380_main() |
1005 | * If we're not in an interrupt, we can call NCR5380_main() | 1057 | * unconditionally, because it cannot be already running. |
1006 | * unconditionally, because it cannot be already running. | 1058 | */ |
1007 | */ | 1059 | if (in_interrupt() || ((flags >> 8) & 7) >= 6) |
1008 | if (in_interrupt() || ((flags >> 8) & 7) >= 6) | 1060 | queue_main(); |
1009 | queue_main(); | 1061 | else |
1010 | else | 1062 | NCR5380_main(NULL); |
1011 | NCR5380_main(NULL); | 1063 | return 0; |
1012 | return 0; | ||
1013 | } | 1064 | } |
1014 | 1065 | ||
1015 | /* | 1066 | /* |
1016 | * Function : NCR5380_main (void) | 1067 | * Function : NCR5380_main (void) |
1017 | * | 1068 | * |
1018 | * Purpose : NCR5380_main is a coroutine that runs as long as more work can | 1069 | * Purpose : NCR5380_main is a coroutine that runs as long as more work can |
1019 | * be done on the NCR5380 host adapters in a system. Both | 1070 | * be done on the NCR5380 host adapters in a system. Both |
1020 | * NCR5380_queue_command() and NCR5380_intr() will try to start it | 1071 | * NCR5380_queue_command() and NCR5380_intr() will try to start it |
1021 | * in case it is not running. | 1072 | * in case it is not running. |
1022 | * | 1073 | * |
1023 | * NOTE : NCR5380_main exits with interrupts *disabled*, the caller should | 1074 | * NOTE : NCR5380_main exits with interrupts *disabled*, the caller should |
1024 | * reenable them. This prevents reentrancy and kernel stack overflow. | 1075 | * reenable them. This prevents reentrancy and kernel stack overflow. |
1025 | */ | 1076 | */ |
1026 | 1077 | ||
1027 | static void NCR5380_main (void *bl) | 1078 | static void NCR5380_main(struct work_struct *work) |
1028 | { | 1079 | { |
1029 | Scsi_Cmnd *tmp, *prev; | 1080 | Scsi_Cmnd *tmp, *prev; |
1030 | struct Scsi_Host *instance = first_instance; | 1081 | struct Scsi_Host *instance = first_instance; |
1031 | struct NCR5380_hostdata *hostdata = HOSTDATA(instance); | 1082 | struct NCR5380_hostdata *hostdata = HOSTDATA(instance); |
1032 | int done; | 1083 | int done; |
1033 | unsigned long flags; | 1084 | unsigned long flags; |
1034 | 1085 | ||
1035 | /* | 1086 | /* |
1036 | * We run (with interrupts disabled) until we're sure that none of | 1087 | * We run (with interrupts disabled) until we're sure that none of |
1037 | * the host adapters have anything that can be done, at which point | 1088 | * the host adapters have anything that can be done, at which point |
1038 | * we set main_running to 0 and exit. | 1089 | * we set main_running to 0 and exit. |
1039 | * | 1090 | * |
1040 | * Interrupts are enabled before doing various other internal | 1091 | * Interrupts are enabled before doing various other internal |
1041 | * instructions, after we've decided that we need to run through | 1092 | * instructions, after we've decided that we need to run through |
1042 | * the loop again. | 1093 | * the loop again. |
1043 | * | 1094 | * |
1044 | * this should prevent any race conditions. | 1095 | * this should prevent any race conditions. |
1045 | * | 1096 | * |
1046 | * ++roman: Just disabling the NCR interrupt isn't sufficient here, | 1097 | * ++roman: Just disabling the NCR interrupt isn't sufficient here, |
1047 | * because also a timer int can trigger an abort or reset, which can | 1098 | * because also a timer int can trigger an abort or reset, which can |
1048 | * alter queues and touch the Falcon lock. | 1099 | * alter queues and touch the Falcon lock. |
1049 | */ | 1100 | */ |
1050 | 1101 | ||
1051 | /* Tell int handlers main() is now already executing. Note that | 1102 | /* Tell int handlers main() is now already executing. Note that |
1052 | no races are possible here. If an int comes in before | 1103 | no races are possible here. If an int comes in before |
1053 | 'main_running' is set here, and queues/executes main via the | 1104 | 'main_running' is set here, and queues/executes main via the |
1054 | task queue, it doesn't do any harm, just this instance of main | 1105 | task queue, it doesn't do any harm, just this instance of main |
1055 | won't find any work left to do. */ | 1106 | won't find any work left to do. */ |
1056 | if (main_running) | 1107 | if (main_running) |
1057 | return; | 1108 | return; |
1058 | main_running = 1; | 1109 | main_running = 1; |
1059 | 1110 | ||
1060 | local_save_flags(flags); | 1111 | local_save_flags(flags); |
1061 | do { | 1112 | do { |
1062 | local_irq_disable(); /* Freeze request queues */ | 1113 | local_irq_disable(); /* Freeze request queues */ |
1063 | done = 1; | 1114 | done = 1; |
1064 | 1115 | ||
1065 | if (!hostdata->connected) { | 1116 | if (!hostdata->connected) { |
1066 | MAIN_PRINTK( "scsi%d: not connected\n", HOSTNO ); | 1117 | MAIN_PRINTK("scsi%d: not connected\n", HOSTNO); |
1067 | /* | 1118 | /* |
1068 | * Search through the issue_queue for a command destined | 1119 | * Search through the issue_queue for a command destined |
1069 | * for a target that's not busy. | 1120 | * for a target that's not busy. |
1070 | */ | 1121 | */ |
1071 | #if (NDEBUG & NDEBUG_LISTS) | 1122 | #if (NDEBUG & NDEBUG_LISTS) |
1072 | for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; | 1123 | for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, prev = NULL; |
1073 | tmp && (tmp != prev); prev = tmp, tmp = NEXT(tmp)) | 1124 | tmp && (tmp != prev); prev = tmp, tmp = NEXT(tmp)) |
1074 | ; | 1125 | ; |
1075 | /*printk("%p ", tmp);*/ | 1126 | /*printk("%p ", tmp);*/ |
1076 | if ((tmp == prev) && tmp) printk(" LOOP\n");/* else printk("\n");*/ | 1127 | if ((tmp == prev) && tmp) |
1128 | printk(" LOOP\n"); | ||
1129 | /* else printk("\n"); */ | ||
1077 | #endif | 1130 | #endif |
1078 | for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, | 1131 | for (tmp = (Scsi_Cmnd *) hostdata->issue_queue, |
1079 | prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp) ) { | 1132 | prev = NULL; tmp; prev = tmp, tmp = NEXT(tmp)) { |
1080 | 1133 | ||
1081 | #if (NDEBUG & NDEBUG_LISTS) | 1134 | #if (NDEBUG & NDEBUG_LISTS) |
1082 | if (prev != tmp) | 1135 | if (prev != tmp) |
1083 | printk("MAIN tmp=%p target=%d busy=%d lun=%d\n", | 1136 | printk("MAIN tmp=%p target=%d busy=%d lun=%d\n", |
1084 | tmp, tmp->device->id, hostdata->busy[tmp->device->id], | 1137 | tmp, tmp->device->id, hostdata->busy[tmp->device->id], |
1085 | tmp->device->lun); | 1138 | tmp->device->lun); |
1086 | #endif | 1139 | #endif |
1087 | /* When we find one, remove it from the issue queue. */ | 1140 | /* When we find one, remove it from the issue queue. */ |
1088 | /* ++guenther: possible race with Falcon locking */ | 1141 | /* ++guenther: possible race with Falcon locking */ |
1089 | if ( | 1142 | if ( |
1090 | #ifdef SUPPORT_TAGS | 1143 | #ifdef SUPPORT_TAGS |
1091 | !is_lun_busy( tmp, tmp->cmnd[0] != REQUEST_SENSE) | 1144 | !is_lun_busy( tmp, tmp->cmnd[0] != REQUEST_SENSE) |
1092 | #else | 1145 | #else |
1093 | !(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun)) | 1146 | !(hostdata->busy[tmp->device->id] & (1 << tmp->device->lun)) |
1094 | #endif | 1147 | #endif |
1095 | ) { | 1148 | ) { |
1096 | /* ++guenther: just to be sure, this must be atomic */ | 1149 | /* ++guenther: just to be sure, this must be atomic */ |
1097 | local_irq_disable(); | 1150 | local_irq_disable(); |
1098 | if (prev) { | 1151 | if (prev) { |
1099 | REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); | 1152 | REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); |
1100 | NEXT(prev) = NEXT(tmp); | 1153 | SET_NEXT(prev, NEXT(tmp)); |
1101 | } else { | 1154 | } else { |
1102 | REMOVE(-1, hostdata->issue_queue, tmp, NEXT(tmp)); | 1155 | REMOVE(-1, hostdata->issue_queue, tmp, NEXT(tmp)); |
1103 | hostdata->issue_queue = NEXT(tmp); | 1156 | hostdata->issue_queue = NEXT(tmp); |
1104 | } | 1157 | } |
1105 | NEXT(tmp) = NULL; | 1158 | SET_NEXT(tmp, NULL); |
1106 | falcon_dont_release++; | 1159 | falcon_dont_release++; |
1107 | 1160 | ||
1108 | /* reenable interrupts after finding one */ | 1161 | /* reenable interrupts after finding one */ |
1109 | local_irq_restore(flags); | 1162 | local_irq_restore(flags); |
1110 | 1163 | ||
1111 | /* | 1164 | /* |
1112 | * Attempt to establish an I_T_L nexus here. | 1165 | * Attempt to establish an I_T_L nexus here. |
1113 | * On success, instance->hostdata->connected is set. | 1166 | * On success, instance->hostdata->connected is set. |
1114 | * On failure, we must add the command back to the | 1167 | * On failure, we must add the command back to the |
1115 | * issue queue so we can keep trying. | 1168 | * issue queue so we can keep trying. |
1116 | */ | 1169 | */ |
1117 | MAIN_PRINTK("scsi%d: main(): command for target %d " | 1170 | MAIN_PRINTK("scsi%d: main(): command for target %d " |
1118 | "lun %d removed from issue_queue\n", | 1171 | "lun %d removed from issue_queue\n", |
1119 | HOSTNO, tmp->device->id, tmp->device->lun); | 1172 | HOSTNO, tmp->device->id, tmp->device->lun); |
1120 | /* | 1173 | /* |
1121 | * REQUEST SENSE commands are issued without tagged | 1174 | * REQUEST SENSE commands are issued without tagged |
1122 | * queueing, even on SCSI-II devices because the | 1175 | * queueing, even on SCSI-II devices because the |
1123 | * contingent allegiance condition exists for the | 1176 | * contingent allegiance condition exists for the |
1124 | * entire unit. | 1177 | * entire unit. |
1125 | */ | 1178 | */ |
1126 | /* ++roman: ...and the standard also requires that | 1179 | /* ++roman: ...and the standard also requires that |
1127 | * REQUEST SENSE command are untagged. | 1180 | * REQUEST SENSE command are untagged. |
1128 | */ | 1181 | */ |
1129 | 1182 | ||
1130 | #ifdef SUPPORT_TAGS | 1183 | #ifdef SUPPORT_TAGS |
1131 | cmd_get_tag( tmp, tmp->cmnd[0] != REQUEST_SENSE ); | 1184 | cmd_get_tag(tmp, tmp->cmnd[0] != REQUEST_SENSE); |
1132 | #endif | 1185 | #endif |
1133 | if (!NCR5380_select(instance, tmp, | 1186 | if (!NCR5380_select(instance, tmp, |
1134 | (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE : | 1187 | (tmp->cmnd[0] == REQUEST_SENSE) ? TAG_NONE : |
1135 | TAG_NEXT)) { | 1188 | TAG_NEXT)) { |
1136 | falcon_dont_release--; | 1189 | falcon_dont_release--; |
1137 | /* release if target did not response! */ | 1190 | /* release if target did not response! */ |
1138 | falcon_release_lock_if_possible( hostdata ); | 1191 | falcon_release_lock_if_possible(hostdata); |
1139 | break; | 1192 | break; |
1140 | } else { | 1193 | } else { |
1141 | local_irq_disable(); | 1194 | local_irq_disable(); |
1142 | LIST(tmp, hostdata->issue_queue); | 1195 | LIST(tmp, hostdata->issue_queue); |
1143 | NEXT(tmp) = hostdata->issue_queue; | 1196 | SET_NEXT(tmp, hostdata->issue_queue); |
1144 | hostdata->issue_queue = tmp; | 1197 | hostdata->issue_queue = tmp; |
1145 | #ifdef SUPPORT_TAGS | 1198 | #ifdef SUPPORT_TAGS |
1146 | cmd_free_tag( tmp ); | 1199 | cmd_free_tag(tmp); |
1147 | #endif | 1200 | #endif |
1148 | falcon_dont_release--; | 1201 | falcon_dont_release--; |
1149 | local_irq_restore(flags); | 1202 | local_irq_restore(flags); |
1150 | MAIN_PRINTK("scsi%d: main(): select() failed, " | 1203 | MAIN_PRINTK("scsi%d: main(): select() failed, " |
1151 | "returned to issue_queue\n", HOSTNO); | 1204 | "returned to issue_queue\n", HOSTNO); |
1152 | if (hostdata->connected) | 1205 | if (hostdata->connected) |
1153 | break; | 1206 | break; |
1154 | } | 1207 | } |
1155 | } /* if target/lun/target queue is not busy */ | 1208 | } /* if target/lun/target queue is not busy */ |
1156 | } /* for issue_queue */ | 1209 | } /* for issue_queue */ |
1157 | } /* if (!hostdata->connected) */ | 1210 | } /* if (!hostdata->connected) */ |
1158 | 1211 | ||
1159 | if (hostdata->connected | 1212 | if (hostdata->connected |
1160 | #ifdef REAL_DMA | 1213 | #ifdef REAL_DMA |
1161 | && !hostdata->dma_len | 1214 | && !hostdata->dma_len |
1162 | #endif | 1215 | #endif |
1163 | ) { | 1216 | ) { |
1164 | local_irq_restore(flags); | 1217 | local_irq_restore(flags); |
1165 | MAIN_PRINTK("scsi%d: main: performing information transfer\n", | 1218 | MAIN_PRINTK("scsi%d: main: performing information transfer\n", |
1166 | HOSTNO); | 1219 | HOSTNO); |
1167 | NCR5380_information_transfer(instance); | 1220 | NCR5380_information_transfer(instance); |
1168 | MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO); | 1221 | MAIN_PRINTK("scsi%d: main: done set false\n", HOSTNO); |
1169 | done = 0; | 1222 | done = 0; |
1170 | } | 1223 | } |
1171 | } while (!done); | 1224 | } while (!done); |
1172 | 1225 | ||
1173 | /* Better allow ints _after_ 'main_running' has been cleared, else | 1226 | /* Better allow ints _after_ 'main_running' has been cleared, else |
1174 | an interrupt could believe we'll pick up the work it left for | 1227 | an interrupt could believe we'll pick up the work it left for |
1175 | us, but we won't see it anymore here... */ | 1228 | us, but we won't see it anymore here... */ |
1176 | main_running = 0; | 1229 | main_running = 0; |
1177 | local_irq_restore(flags); | 1230 | local_irq_restore(flags); |
1178 | } | 1231 | } |
1179 | 1232 | ||
1180 | 1233 | ||
@@ -1183,1441 +1236,1439 @@ static void NCR5380_main (void *bl) | |||
1183 | * Function : void NCR5380_dma_complete (struct Scsi_Host *instance) | 1236 | * Function : void NCR5380_dma_complete (struct Scsi_Host *instance) |
1184 | * | 1237 | * |
1185 | * Purpose : Called by interrupt handler when DMA finishes or a phase | 1238 | * Purpose : Called by interrupt handler when DMA finishes or a phase |
1186 | * mismatch occurs (which would finish the DMA transfer). | 1239 | * mismatch occurs (which would finish the DMA transfer). |
1187 | * | 1240 | * |
1188 | * Inputs : instance - this instance of the NCR5380. | 1241 | * Inputs : instance - this instance of the NCR5380. |
1189 | * | 1242 | * |
1190 | */ | 1243 | */ |
1191 | 1244 | ||
1192 | static void NCR5380_dma_complete( struct Scsi_Host *instance ) | 1245 | static void NCR5380_dma_complete(struct Scsi_Host *instance) |
1193 | { | 1246 | { |
1194 | SETUP_HOSTDATA(instance); | 1247 | SETUP_HOSTDATA(instance); |
1195 | int transfered, saved_data = 0, overrun = 0, cnt, toPIO; | 1248 | int transfered, saved_data = 0, overrun = 0, cnt, toPIO; |
1196 | unsigned char **data, p; | 1249 | unsigned char **data, p; |
1197 | volatile int *count; | 1250 | volatile int *count; |
1198 | 1251 | ||
1199 | if (!hostdata->connected) { | 1252 | if (!hostdata->connected) { |
1200 | printk(KERN_WARNING "scsi%d: received end of DMA interrupt with " | 1253 | printk(KERN_WARNING "scsi%d: received end of DMA interrupt with " |
1201 | "no connected cmd\n", HOSTNO); | 1254 | "no connected cmd\n", HOSTNO); |
1202 | return; | 1255 | return; |
1203 | } | ||
1204 | |||
1205 | if (atari_read_overruns) { | ||
1206 | p = hostdata->connected->SCp.phase; | ||
1207 | if (p & SR_IO) { | ||
1208 | udelay(10); | ||
1209 | if ((((NCR5380_read(BUS_AND_STATUS_REG)) & | ||
1210 | (BASR_PHASE_MATCH|BASR_ACK)) == | ||
1211 | (BASR_PHASE_MATCH|BASR_ACK))) { | ||
1212 | saved_data = NCR5380_read(INPUT_DATA_REG); | ||
1213 | overrun = 1; | ||
1214 | DMA_PRINTK("scsi%d: read overrun handled\n", HOSTNO); | ||
1215 | } | ||
1216 | } | 1256 | } |
1217 | } | 1257 | |
1218 | 1258 | if (atari_read_overruns) { | |
1219 | DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", | 1259 | p = hostdata->connected->SCp.phase; |
1220 | HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), | 1260 | if (p & SR_IO) { |
1221 | NCR5380_read(STATUS_REG)); | 1261 | udelay(10); |
1222 | 1262 | if ((NCR5380_read(BUS_AND_STATUS_REG) & | |
1223 | (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); | 1263 | (BASR_PHASE_MATCH|BASR_ACK)) == |
1224 | NCR5380_write(MODE_REG, MR_BASE); | 1264 | (BASR_PHASE_MATCH|BASR_ACK)) { |
1225 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 1265 | saved_data = NCR5380_read(INPUT_DATA_REG); |
1226 | 1266 | overrun = 1; | |
1227 | transfered = hostdata->dma_len - NCR5380_dma_residual(instance); | 1267 | DMA_PRINTK("scsi%d: read overrun handled\n", HOSTNO); |
1228 | hostdata->dma_len = 0; | 1268 | } |
1229 | 1269 | } | |
1230 | data = (unsigned char **) &(hostdata->connected->SCp.ptr); | 1270 | } |
1231 | count = &(hostdata->connected->SCp.this_residual); | 1271 | |
1232 | *data += transfered; | 1272 | DMA_PRINTK("scsi%d: real DMA transfer complete, basr 0x%X, sr 0x%X\n", |
1233 | *count -= transfered; | 1273 | HOSTNO, NCR5380_read(BUS_AND_STATUS_REG), |
1234 | 1274 | NCR5380_read(STATUS_REG)); | |
1235 | if (atari_read_overruns) { | 1275 | |
1236 | if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { | 1276 | (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); |
1237 | cnt = toPIO = atari_read_overruns; | 1277 | NCR5380_write(MODE_REG, MR_BASE); |
1238 | if (overrun) { | 1278 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
1239 | DMA_PRINTK("Got an input overrun, using saved byte\n"); | 1279 | |
1240 | *(*data)++ = saved_data; | 1280 | transfered = hostdata->dma_len - NCR5380_dma_residual(instance); |
1241 | (*count)--; | 1281 | hostdata->dma_len = 0; |
1242 | cnt--; | 1282 | |
1243 | toPIO--; | 1283 | data = (unsigned char **)&hostdata->connected->SCp.ptr; |
1244 | } | 1284 | count = &hostdata->connected->SCp.this_residual; |
1245 | DMA_PRINTK("Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data); | 1285 | *data += transfered; |
1246 | NCR5380_transfer_pio(instance, &p, &cnt, data); | 1286 | *count -= transfered; |
1247 | *count -= toPIO - cnt; | 1287 | |
1288 | if (atari_read_overruns) { | ||
1289 | if ((NCR5380_read(STATUS_REG) & PHASE_MASK) == p && (p & SR_IO)) { | ||
1290 | cnt = toPIO = atari_read_overruns; | ||
1291 | if (overrun) { | ||
1292 | DMA_PRINTK("Got an input overrun, using saved byte\n"); | ||
1293 | *(*data)++ = saved_data; | ||
1294 | (*count)--; | ||
1295 | cnt--; | ||
1296 | toPIO--; | ||
1297 | } | ||
1298 | DMA_PRINTK("Doing %d-byte PIO to 0x%08lx\n", cnt, (long)*data); | ||
1299 | NCR5380_transfer_pio(instance, &p, &cnt, data); | ||
1300 | *count -= toPIO - cnt; | ||
1301 | } | ||
1248 | } | 1302 | } |
1249 | } | ||
1250 | } | 1303 | } |
1251 | #endif /* REAL_DMA */ | 1304 | #endif /* REAL_DMA */ |
1252 | 1305 | ||
1253 | 1306 | ||
1254 | /* | 1307 | /* |
1255 | * Function : void NCR5380_intr (int irq) | 1308 | * Function : void NCR5380_intr (int irq) |
1256 | * | 1309 | * |
1257 | * Purpose : handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses | 1310 | * Purpose : handle interrupts, reestablishing I_T_L or I_T_L_Q nexuses |
1258 | * from the disconnected queue, and restarting NCR5380_main() | 1311 | * from the disconnected queue, and restarting NCR5380_main() |
1259 | * as required. | 1312 | * as required. |
1260 | * | 1313 | * |
1261 | * Inputs : int irq, irq that caused this interrupt. | 1314 | * Inputs : int irq, irq that caused this interrupt. |
1262 | * | 1315 | * |
1263 | */ | 1316 | */ |
1264 | 1317 | ||
1265 | static irqreturn_t NCR5380_intr (int irq, void *dev_id) | 1318 | static irqreturn_t NCR5380_intr(int irq, void *dev_id) |
1266 | { | 1319 | { |
1267 | struct Scsi_Host *instance = first_instance; | 1320 | struct Scsi_Host *instance = first_instance; |
1268 | int done = 1, handled = 0; | 1321 | int done = 1, handled = 0; |
1269 | unsigned char basr; | 1322 | unsigned char basr; |
1270 | 1323 | ||
1271 | INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO); | 1324 | INT_PRINTK("scsi%d: NCR5380 irq triggered\n", HOSTNO); |
1272 | 1325 | ||
1273 | /* Look for pending interrupts */ | 1326 | /* Look for pending interrupts */ |
1274 | basr = NCR5380_read(BUS_AND_STATUS_REG); | 1327 | basr = NCR5380_read(BUS_AND_STATUS_REG); |
1275 | INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr); | 1328 | INT_PRINTK("scsi%d: BASR=%02x\n", HOSTNO, basr); |
1276 | /* dispatch to appropriate routine if found and done=0 */ | 1329 | /* dispatch to appropriate routine if found and done=0 */ |
1277 | if (basr & BASR_IRQ) { | 1330 | if (basr & BASR_IRQ) { |
1278 | NCR_PRINT(NDEBUG_INTR); | 1331 | NCR_PRINT(NDEBUG_INTR); |
1279 | if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { | 1332 | if ((NCR5380_read(STATUS_REG) & (SR_SEL|SR_IO)) == (SR_SEL|SR_IO)) { |
1280 | done = 0; | 1333 | done = 0; |
1281 | ENABLE_IRQ(); | 1334 | ENABLE_IRQ(); |
1282 | INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO); | 1335 | INT_PRINTK("scsi%d: SEL interrupt\n", HOSTNO); |
1283 | NCR5380_reselect(instance); | 1336 | NCR5380_reselect(instance); |
1284 | (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); | 1337 | (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); |
1285 | } | 1338 | } else if (basr & BASR_PARITY_ERROR) { |
1286 | else if (basr & BASR_PARITY_ERROR) { | 1339 | INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO); |
1287 | INT_PRINTK("scsi%d: PARITY interrupt\n", HOSTNO); | 1340 | (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); |
1288 | (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); | 1341 | } else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { |
1289 | } | 1342 | INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO); |
1290 | else if ((NCR5380_read(STATUS_REG) & SR_RST) == SR_RST) { | 1343 | (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); |
1291 | INT_PRINTK("scsi%d: RESET interrupt\n", HOSTNO); | 1344 | } else { |
1292 | (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); | 1345 | /* |
1293 | } | 1346 | * The rest of the interrupt conditions can occur only during a |
1294 | else { | 1347 | * DMA transfer |
1295 | /* | 1348 | */ |
1296 | * The rest of the interrupt conditions can occur only during a | ||
1297 | * DMA transfer | ||
1298 | */ | ||
1299 | 1349 | ||
1300 | #if defined(REAL_DMA) | 1350 | #if defined(REAL_DMA) |
1301 | /* | 1351 | /* |
1302 | * We should only get PHASE MISMATCH and EOP interrupts if we have | 1352 | * We should only get PHASE MISMATCH and EOP interrupts if we have |
1303 | * DMA enabled, so do a sanity check based on the current setting | 1353 | * DMA enabled, so do a sanity check based on the current setting |
1304 | * of the MODE register. | 1354 | * of the MODE register. |
1305 | */ | 1355 | */ |
1306 | 1356 | ||
1307 | if ((NCR5380_read(MODE_REG) & MR_DMA_MODE) && | 1357 | if ((NCR5380_read(MODE_REG) & MR_DMA_MODE) && |
1308 | ((basr & BASR_END_DMA_TRANSFER) || | 1358 | ((basr & BASR_END_DMA_TRANSFER) || |
1309 | !(basr & BASR_PHASE_MATCH))) { | 1359 | !(basr & BASR_PHASE_MATCH))) { |
1310 | 1360 | ||
1311 | INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); | 1361 | INT_PRINTK("scsi%d: PHASE MISM or EOP interrupt\n", HOSTNO); |
1312 | NCR5380_dma_complete( instance ); | 1362 | NCR5380_dma_complete( instance ); |
1313 | done = 0; | 1363 | done = 0; |
1314 | ENABLE_IRQ(); | 1364 | ENABLE_IRQ(); |
1315 | } else | 1365 | } else |
1316 | #endif /* REAL_DMA */ | 1366 | #endif /* REAL_DMA */ |
1317 | { | 1367 | { |
1318 | /* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */ | 1368 | /* MS: Ignore unknown phase mismatch interrupts (caused by EOP interrupt) */ |
1319 | if (basr & BASR_PHASE_MATCH) | 1369 | if (basr & BASR_PHASE_MATCH) |
1320 | printk(KERN_NOTICE "scsi%d: unknown interrupt, " | 1370 | printk(KERN_NOTICE "scsi%d: unknown interrupt, " |
1321 | "BASR 0x%x, MR 0x%x, SR 0x%x\n", | 1371 | "BASR 0x%x, MR 0x%x, SR 0x%x\n", |
1322 | HOSTNO, basr, NCR5380_read(MODE_REG), | 1372 | HOSTNO, basr, NCR5380_read(MODE_REG), |
1323 | NCR5380_read(STATUS_REG)); | 1373 | NCR5380_read(STATUS_REG)); |
1324 | (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); | 1374 | (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); |
1325 | } | 1375 | } |
1326 | } /* if !(SELECTION || PARITY) */ | 1376 | } /* if !(SELECTION || PARITY) */ |
1327 | handled = 1; | 1377 | handled = 1; |
1328 | } /* BASR & IRQ */ | 1378 | } /* BASR & IRQ */ else { |
1329 | else { | 1379 | printk(KERN_NOTICE "scsi%d: interrupt without IRQ bit set in BASR, " |
1330 | printk(KERN_NOTICE "scsi%d: interrupt without IRQ bit set in BASR, " | 1380 | "BASR 0x%X, MR 0x%X, SR 0x%x\n", HOSTNO, basr, |
1331 | "BASR 0x%X, MR 0x%X, SR 0x%x\n", HOSTNO, basr, | 1381 | NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); |
1332 | NCR5380_read(MODE_REG), NCR5380_read(STATUS_REG)); | 1382 | (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); |
1333 | (void) NCR5380_read(RESET_PARITY_INTERRUPT_REG); | 1383 | } |
1334 | } | 1384 | |
1335 | 1385 | if (!done) { | |
1336 | if (!done) { | 1386 | INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO); |
1337 | INT_PRINTK("scsi%d: in int routine, calling main\n", HOSTNO); | 1387 | /* Put a call to NCR5380_main() on the queue... */ |
1338 | /* Put a call to NCR5380_main() on the queue... */ | 1388 | queue_main(); |
1339 | queue_main(); | 1389 | } |
1340 | } | 1390 | return IRQ_RETVAL(handled); |
1341 | return IRQ_RETVAL(handled); | ||
1342 | } | 1391 | } |
1343 | 1392 | ||
1344 | #ifdef NCR5380_STATS | 1393 | #ifdef NCR5380_STATS |
1345 | static void collect_stats(struct NCR5380_hostdata* hostdata, Scsi_Cmnd* cmd) | 1394 | static void collect_stats(struct NCR5380_hostdata* hostdata, Scsi_Cmnd *cmd) |
1346 | { | 1395 | { |
1347 | # ifdef NCR5380_STAT_LIMIT | 1396 | # ifdef NCR5380_STAT_LIMIT |
1348 | if (cmd->request_bufflen > NCR5380_STAT_LIMIT) | 1397 | if (cmd->request_bufflen > NCR5380_STAT_LIMIT) |
1349 | # endif | 1398 | # endif |
1350 | switch (cmd->cmnd[0]) | 1399 | switch (cmd->cmnd[0]) { |
1351 | { | 1400 | case WRITE: |
1352 | case WRITE: | 1401 | case WRITE_6: |
1353 | case WRITE_6: | 1402 | case WRITE_10: |
1354 | case WRITE_10: | 1403 | hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase); |
1355 | hostdata->time_write[cmd->device->id] += (jiffies - hostdata->timebase); | 1404 | /*hostdata->bytes_write[cmd->device->id] += cmd->request_bufflen;*/ |
1356 | /*hostdata->bytes_write[cmd->device->id] += cmd->request_bufflen;*/ | 1405 | hostdata->pendingw--; |
1357 | hostdata->pendingw--; | 1406 | break; |
1358 | break; | 1407 | case READ: |
1359 | case READ: | 1408 | case READ_6: |
1360 | case READ_6: | 1409 | case READ_10: |
1361 | case READ_10: | 1410 | hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase); |
1362 | hostdata->time_read[cmd->device->id] += (jiffies - hostdata->timebase); | 1411 | /*hostdata->bytes_read[cmd->device->id] += cmd->request_bufflen;*/ |
1363 | /*hostdata->bytes_read[cmd->device->id] += cmd->request_bufflen;*/ | 1412 | hostdata->pendingr--; |
1364 | hostdata->pendingr--; | 1413 | break; |
1365 | break; | 1414 | } |
1366 | } | ||
1367 | } | 1415 | } |
1368 | #endif | 1416 | #endif |
1369 | 1417 | ||
1370 | /* | 1418 | /* |
1371 | * Function : int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd, | 1419 | * Function : int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd, |
1372 | * int tag); | 1420 | * int tag); |
1373 | * | 1421 | * |
1374 | * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, | 1422 | * Purpose : establishes I_T_L or I_T_L_Q nexus for new or existing command, |
1375 | * including ARBITRATION, SELECTION, and initial message out for | 1423 | * including ARBITRATION, SELECTION, and initial message out for |
1376 | * IDENTIFY and queue messages. | 1424 | * IDENTIFY and queue messages. |
1377 | * | 1425 | * |
1378 | * Inputs : instance - instantiation of the 5380 driver on which this | 1426 | * Inputs : instance - instantiation of the 5380 driver on which this |
1379 | * target lives, cmd - SCSI command to execute, tag - set to TAG_NEXT for | 1427 | * target lives, cmd - SCSI command to execute, tag - set to TAG_NEXT for |
1380 | * new tag, TAG_NONE for untagged queueing, otherwise set to the tag for | 1428 | * new tag, TAG_NONE for untagged queueing, otherwise set to the tag for |
1381 | * the command that is presently connected. | 1429 | * the command that is presently connected. |
1382 | * | 1430 | * |
1383 | * Returns : -1 if selection could not execute for some reason, | 1431 | * Returns : -1 if selection could not execute for some reason, |
1384 | * 0 if selection succeeded or failed because the target | 1432 | * 0 if selection succeeded or failed because the target |
1385 | * did not respond. | 1433 | * did not respond. |
1386 | * | 1434 | * |
1387 | * Side effects : | 1435 | * Side effects : |
1388 | * If bus busy, arbitration failed, etc, NCR5380_select() will exit | 1436 | * If bus busy, arbitration failed, etc, NCR5380_select() will exit |
1389 | * with registers as they should have been on entry - ie | 1437 | * with registers as they should have been on entry - ie |
1390 | * SELECT_ENABLE will be set appropriately, the NCR5380 | 1438 | * SELECT_ENABLE will be set appropriately, the NCR5380 |
1391 | * will cease to drive any SCSI bus signals. | 1439 | * will cease to drive any SCSI bus signals. |
1392 | * | 1440 | * |
1393 | * If successful : I_T_L or I_T_L_Q nexus will be established, | 1441 | * If successful : I_T_L or I_T_L_Q nexus will be established, |
1394 | * instance->connected will be set to cmd. | 1442 | * instance->connected will be set to cmd. |
1395 | * SELECT interrupt will be disabled. | 1443 | * SELECT interrupt will be disabled. |
1396 | * | 1444 | * |
1397 | * If failed (no target) : cmd->scsi_done() will be called, and the | 1445 | * If failed (no target) : cmd->scsi_done() will be called, and the |
1398 | * cmd->result host byte set to DID_BAD_TARGET. | 1446 | * cmd->result host byte set to DID_BAD_TARGET. |
1399 | */ | 1447 | */ |
1400 | 1448 | ||
1401 | static int NCR5380_select (struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) | 1449 | static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd *cmd, int tag) |
1402 | { | 1450 | { |
1403 | SETUP_HOSTDATA(instance); | 1451 | SETUP_HOSTDATA(instance); |
1404 | unsigned char tmp[3], phase; | 1452 | unsigned char tmp[3], phase; |
1405 | unsigned char *data; | 1453 | unsigned char *data; |
1406 | int len; | 1454 | int len; |
1407 | unsigned long timeout; | 1455 | unsigned long timeout; |
1408 | unsigned long flags; | 1456 | unsigned long flags; |
1409 | 1457 | ||
1410 | hostdata->restart_select = 0; | 1458 | hostdata->restart_select = 0; |
1411 | NCR_PRINT(NDEBUG_ARBITRATION); | 1459 | NCR_PRINT(NDEBUG_ARBITRATION); |
1412 | ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO, | 1460 | ARB_PRINTK("scsi%d: starting arbitration, id = %d\n", HOSTNO, |
1413 | instance->this_id); | 1461 | instance->this_id); |
1414 | 1462 | ||
1415 | /* | 1463 | /* |
1416 | * Set the phase bits to 0, otherwise the NCR5380 won't drive the | 1464 | * Set the phase bits to 0, otherwise the NCR5380 won't drive the |
1417 | * data bus during SELECTION. | 1465 | * data bus during SELECTION. |
1418 | */ | 1466 | */ |
1419 | 1467 | ||
1420 | local_irq_save(flags); | 1468 | local_irq_save(flags); |
1421 | if (hostdata->connected) { | 1469 | if (hostdata->connected) { |
1470 | local_irq_restore(flags); | ||
1471 | return -1; | ||
1472 | } | ||
1473 | NCR5380_write(TARGET_COMMAND_REG, 0); | ||
1474 | |||
1475 | /* | ||
1476 | * Start arbitration. | ||
1477 | */ | ||
1478 | |||
1479 | NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); | ||
1480 | NCR5380_write(MODE_REG, MR_ARBITRATE); | ||
1481 | |||
1422 | local_irq_restore(flags); | 1482 | local_irq_restore(flags); |
1423 | return -1; | 1483 | |
1424 | } | 1484 | /* Wait for arbitration logic to complete */ |
1425 | NCR5380_write(TARGET_COMMAND_REG, 0); | 1485 | #if defined(NCR_TIMEOUT) |
1426 | 1486 | { | |
1427 | 1487 | unsigned long timeout = jiffies + 2*NCR_TIMEOUT; | |
1428 | /* | 1488 | |
1429 | * Start arbitration. | 1489 | while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) && |
1430 | */ | 1490 | time_before(jiffies, timeout) && !hostdata->connected) |
1431 | 1491 | ; | |
1432 | NCR5380_write(OUTPUT_DATA_REG, hostdata->id_mask); | 1492 | if (time_after_eq(jiffies, timeout)) { |
1433 | NCR5380_write(MODE_REG, MR_ARBITRATE); | 1493 | printk("scsi : arbitration timeout at %d\n", __LINE__); |
1434 | 1494 | NCR5380_write(MODE_REG, MR_BASE); | |
1435 | local_irq_restore(flags); | 1495 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
1436 | 1496 | return -1; | |
1437 | /* Wait for arbitration logic to complete */ | 1497 | } |
1438 | #if NCR_TIMEOUT | 1498 | } |
1439 | { | ||
1440 | unsigned long timeout = jiffies + 2*NCR_TIMEOUT; | ||
1441 | |||
1442 | while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) | ||
1443 | && time_before(jiffies, timeout) && !hostdata->connected) | ||
1444 | ; | ||
1445 | if (time_after_eq(jiffies, timeout)) | ||
1446 | { | ||
1447 | printk("scsi : arbitration timeout at %d\n", __LINE__); | ||
1448 | NCR5380_write(MODE_REG, MR_BASE); | ||
1449 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | ||
1450 | return -1; | ||
1451 | } | ||
1452 | } | ||
1453 | #else /* NCR_TIMEOUT */ | 1499 | #else /* NCR_TIMEOUT */ |
1454 | while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) | 1500 | while (!(NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_PROGRESS) && |
1455 | && !hostdata->connected); | 1501 | !hostdata->connected) |
1502 | ; | ||
1456 | #endif | 1503 | #endif |
1457 | 1504 | ||
1458 | ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO); | 1505 | ARB_PRINTK("scsi%d: arbitration complete\n", HOSTNO); |
1459 | |||
1460 | if (hostdata->connected) { | ||
1461 | NCR5380_write(MODE_REG, MR_BASE); | ||
1462 | return -1; | ||
1463 | } | ||
1464 | /* | ||
1465 | * The arbitration delay is 2.2us, but this is a minimum and there is | ||
1466 | * no maximum so we can safely sleep for ceil(2.2) usecs to accommodate | ||
1467 | * the integral nature of udelay(). | ||
1468 | * | ||
1469 | */ | ||
1470 | |||
1471 | udelay(3); | ||
1472 | |||
1473 | /* Check for lost arbitration */ | ||
1474 | if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || | ||
1475 | (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || | ||
1476 | (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || | ||
1477 | hostdata->connected) { | ||
1478 | NCR5380_write(MODE_REG, MR_BASE); | ||
1479 | ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", | ||
1480 | HOSTNO); | ||
1481 | return -1; | ||
1482 | } | ||
1483 | |||
1484 | /* after/during arbitration, BSY should be asserted. | ||
1485 | IBM DPES-31080 Version S31Q works now */ | ||
1486 | /* Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) */ | ||
1487 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_SEL | | ||
1488 | ICR_ASSERT_BSY ) ; | ||
1489 | |||
1490 | if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || | ||
1491 | hostdata->connected) { | ||
1492 | NCR5380_write(MODE_REG, MR_BASE); | ||
1493 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
1494 | ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", | ||
1495 | HOSTNO); | ||
1496 | return -1; | ||
1497 | } | ||
1498 | 1506 | ||
1499 | /* | 1507 | if (hostdata->connected) { |
1500 | * Again, bus clear + bus settle time is 1.2us, however, this is | 1508 | NCR5380_write(MODE_REG, MR_BASE); |
1501 | * a minimum so we'll udelay ceil(1.2) | 1509 | return -1; |
1502 | */ | 1510 | } |
1511 | /* | ||
1512 | * The arbitration delay is 2.2us, but this is a minimum and there is | ||
1513 | * no maximum so we can safely sleep for ceil(2.2) usecs to accommodate | ||
1514 | * the integral nature of udelay(). | ||
1515 | * | ||
1516 | */ | ||
1517 | |||
1518 | udelay(3); | ||
1519 | |||
1520 | /* Check for lost arbitration */ | ||
1521 | if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || | ||
1522 | (NCR5380_read(CURRENT_SCSI_DATA_REG) & hostdata->id_higher_mask) || | ||
1523 | (NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || | ||
1524 | hostdata->connected) { | ||
1525 | NCR5380_write(MODE_REG, MR_BASE); | ||
1526 | ARB_PRINTK("scsi%d: lost arbitration, deasserting MR_ARBITRATE\n", | ||
1527 | HOSTNO); | ||
1528 | return -1; | ||
1529 | } | ||
1530 | |||
1531 | /* after/during arbitration, BSY should be asserted. | ||
1532 | IBM DPES-31080 Version S31Q works now */ | ||
1533 | /* Tnx to Thomas_Roesch@m2.maus.de for finding this! (Roman) */ | ||
1534 | NCR5380_write(INITIATOR_COMMAND_REG, | ||
1535 | ICR_BASE | ICR_ASSERT_SEL | ICR_ASSERT_BSY); | ||
1536 | |||
1537 | if ((NCR5380_read(INITIATOR_COMMAND_REG) & ICR_ARBITRATION_LOST) || | ||
1538 | hostdata->connected) { | ||
1539 | NCR5380_write(MODE_REG, MR_BASE); | ||
1540 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
1541 | ARB_PRINTK("scsi%d: lost arbitration, deasserting ICR_ASSERT_SEL\n", | ||
1542 | HOSTNO); | ||
1543 | return -1; | ||
1544 | } | ||
1545 | |||
1546 | /* | ||
1547 | * Again, bus clear + bus settle time is 1.2us, however, this is | ||
1548 | * a minimum so we'll udelay ceil(1.2) | ||
1549 | */ | ||
1503 | 1550 | ||
1504 | #ifdef CONFIG_ATARI_SCSI_TOSHIBA_DELAY | 1551 | #ifdef CONFIG_ATARI_SCSI_TOSHIBA_DELAY |
1505 | /* ++roman: But some targets (see above :-) seem to need a bit more... */ | 1552 | /* ++roman: But some targets (see above :-) seem to need a bit more... */ |
1506 | udelay(15); | 1553 | udelay(15); |
1507 | #else | 1554 | #else |
1508 | udelay(2); | 1555 | udelay(2); |
1509 | #endif | 1556 | #endif |
1510 | 1557 | ||
1511 | if (hostdata->connected) { | 1558 | if (hostdata->connected) { |
1559 | NCR5380_write(MODE_REG, MR_BASE); | ||
1560 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
1561 | return -1; | ||
1562 | } | ||
1563 | |||
1564 | ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO); | ||
1565 | |||
1566 | /* | ||
1567 | * Now that we have won arbitration, start Selection process, asserting | ||
1568 | * the host and target ID's on the SCSI bus. | ||
1569 | */ | ||
1570 | |||
1571 | NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << cmd->device->id))); | ||
1572 | |||
1573 | /* | ||
1574 | * Raise ATN while SEL is true before BSY goes false from arbitration, | ||
1575 | * since this is the only way to guarantee that we'll get a MESSAGE OUT | ||
1576 | * phase immediately after selection. | ||
1577 | */ | ||
1578 | |||
1579 | NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY | | ||
1580 | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL )); | ||
1512 | NCR5380_write(MODE_REG, MR_BASE); | 1581 | NCR5380_write(MODE_REG, MR_BASE); |
1513 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
1514 | return -1; | ||
1515 | } | ||
1516 | 1582 | ||
1517 | ARB_PRINTK("scsi%d: won arbitration\n", HOSTNO); | 1583 | /* |
1584 | * Reselect interrupts must be turned off prior to the dropping of BSY, | ||
1585 | * otherwise we will trigger an interrupt. | ||
1586 | */ | ||
1587 | |||
1588 | if (hostdata->connected) { | ||
1589 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | ||
1590 | return -1; | ||
1591 | } | ||
1518 | 1592 | ||
1519 | /* | 1593 | NCR5380_write(SELECT_ENABLE_REG, 0); |
1520 | * Now that we have won arbitration, start Selection process, asserting | 1594 | |
1521 | * the host and target ID's on the SCSI bus. | 1595 | /* |
1522 | */ | 1596 | * The initiator shall then wait at least two deskew delays and release |
1597 | * the BSY signal. | ||
1598 | */ | ||
1599 | udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */ | ||
1600 | |||
1601 | /* Reset BSY */ | ||
1602 | NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA | | ||
1603 | ICR_ASSERT_ATN | ICR_ASSERT_SEL)); | ||
1604 | |||
1605 | /* | ||
1606 | * Something weird happens when we cease to drive BSY - looks | ||
1607 | * like the board/chip is letting us do another read before the | ||
1608 | * appropriate propagation delay has expired, and we're confusing | ||
1609 | * a BSY signal from ourselves as the target's response to SELECTION. | ||
1610 | * | ||
1611 | * A small delay (the 'C++' frontend breaks the pipeline with an | ||
1612 | * unnecessary jump, making it work on my 386-33/Trantor T128, the | ||
1613 | * tighter 'C' code breaks and requires this) solves the problem - | ||
1614 | * the 1 us delay is arbitrary, and only used because this delay will | ||
1615 | * be the same on other platforms and since it works here, it should | ||
1616 | * work there. | ||
1617 | * | ||
1618 | * wingel suggests that this could be due to failing to wait | ||
1619 | * one deskew delay. | ||
1620 | */ | ||
1523 | 1621 | ||
1524 | NCR5380_write(OUTPUT_DATA_REG, (hostdata->id_mask | (1 << cmd->device->id))); | 1622 | udelay(1); |
1525 | 1623 | ||
1526 | /* | 1624 | SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); |
1527 | * Raise ATN while SEL is true before BSY goes false from arbitration, | ||
1528 | * since this is the only way to guarantee that we'll get a MESSAGE OUT | ||
1529 | * phase immediately after selection. | ||
1530 | */ | ||
1531 | 1625 | ||
1532 | NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_BSY | | 1626 | /* |
1533 | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_SEL )); | 1627 | * The SCSI specification calls for a 250 ms timeout for the actual |
1534 | NCR5380_write(MODE_REG, MR_BASE); | 1628 | * selection. |
1629 | */ | ||
1535 | 1630 | ||
1536 | /* | 1631 | timeout = jiffies + 25; |
1537 | * Reselect interrupts must be turned off prior to the dropping of BSY, | ||
1538 | * otherwise we will trigger an interrupt. | ||
1539 | */ | ||
1540 | 1632 | ||
1541 | if (hostdata->connected) { | 1633 | /* |
1542 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 1634 | * XXX very interesting - we're seeing a bounce where the BSY we |
1543 | return -1; | 1635 | * asserted is being reflected / still asserted (propagation delay?) |
1544 | } | 1636 | * and it's detecting as true. Sigh. |
1545 | 1637 | */ | |
1546 | NCR5380_write(SELECT_ENABLE_REG, 0); | ||
1547 | |||
1548 | /* | ||
1549 | * The initiator shall then wait at least two deskew delays and release | ||
1550 | * the BSY signal. | ||
1551 | */ | ||
1552 | udelay(1); /* wingel -- wait two bus deskew delay >2*45ns */ | ||
1553 | |||
1554 | /* Reset BSY */ | ||
1555 | NCR5380_write(INITIATOR_COMMAND_REG, (ICR_BASE | ICR_ASSERT_DATA | | ||
1556 | ICR_ASSERT_ATN | ICR_ASSERT_SEL)); | ||
1557 | |||
1558 | /* | ||
1559 | * Something weird happens when we cease to drive BSY - looks | ||
1560 | * like the board/chip is letting us do another read before the | ||
1561 | * appropriate propagation delay has expired, and we're confusing | ||
1562 | * a BSY signal from ourselves as the target's response to SELECTION. | ||
1563 | * | ||
1564 | * A small delay (the 'C++' frontend breaks the pipeline with an | ||
1565 | * unnecessary jump, making it work on my 386-33/Trantor T128, the | ||
1566 | * tighter 'C' code breaks and requires this) solves the problem - | ||
1567 | * the 1 us delay is arbitrary, and only used because this delay will | ||
1568 | * be the same on other platforms and since it works here, it should | ||
1569 | * work there. | ||
1570 | * | ||
1571 | * wingel suggests that this could be due to failing to wait | ||
1572 | * one deskew delay. | ||
1573 | */ | ||
1574 | |||
1575 | udelay(1); | ||
1576 | |||
1577 | SEL_PRINTK("scsi%d: selecting target %d\n", HOSTNO, cmd->device->id); | ||
1578 | |||
1579 | /* | ||
1580 | * The SCSI specification calls for a 250 ms timeout for the actual | ||
1581 | * selection. | ||
1582 | */ | ||
1583 | |||
1584 | timeout = jiffies + 25; | ||
1585 | |||
1586 | /* | ||
1587 | * XXX very interesting - we're seeing a bounce where the BSY we | ||
1588 | * asserted is being reflected / still asserted (propagation delay?) | ||
1589 | * and it's detecting as true. Sigh. | ||
1590 | */ | ||
1591 | 1638 | ||
1592 | #if 0 | 1639 | #if 0 |
1593 | /* ++roman: If a target conformed to the SCSI standard, it wouldn't assert | 1640 | /* ++roman: If a target conformed to the SCSI standard, it wouldn't assert |
1594 | * IO while SEL is true. But again, there are some disks out the in the | 1641 | * IO while SEL is true. But again, there are some disks out the in the |
1595 | * world that do that nevertheless. (Somebody claimed that this announces | 1642 | * world that do that nevertheless. (Somebody claimed that this announces |
1596 | * reselection capability of the target.) So we better skip that test and | 1643 | * reselection capability of the target.) So we better skip that test and |
1597 | * only wait for BSY... (Famous german words: Der Klügere gibt nach :-) | 1644 | * only wait for BSY... (Famous german words: Der Klügere gibt nach :-) |
1598 | */ | 1645 | */ |
1599 | 1646 | ||
1600 | while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & | 1647 | while (time_before(jiffies, timeout) && |
1601 | (SR_BSY | SR_IO))); | 1648 | !(NCR5380_read(STATUS_REG) & (SR_BSY | SR_IO))) |
1602 | 1649 | ; | |
1603 | if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == | 1650 | |
1604 | (SR_SEL | SR_IO)) { | 1651 | if ((NCR5380_read(STATUS_REG) & (SR_SEL | SR_IO)) == (SR_SEL | SR_IO)) { |
1605 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 1652 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
1606 | NCR5380_reselect(instance); | 1653 | NCR5380_reselect(instance); |
1607 | printk (KERN_ERR "scsi%d: reselection after won arbitration?\n", | 1654 | printk(KERN_ERR "scsi%d: reselection after won arbitration?\n", |
1608 | HOSTNO); | 1655 | HOSTNO); |
1609 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 1656 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
1610 | return -1; | 1657 | return -1; |
1611 | } | 1658 | } |
1612 | #else | 1659 | #else |
1613 | while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & SR_BSY)); | 1660 | while (time_before(jiffies, timeout) && !(NCR5380_read(STATUS_REG) & SR_BSY)) |
1661 | ; | ||
1614 | #endif | 1662 | #endif |
1615 | 1663 | ||
1616 | /* | 1664 | /* |
1617 | * No less than two deskew delays after the initiator detects the | 1665 | * No less than two deskew delays after the initiator detects the |
1618 | * BSY signal is true, it shall release the SEL signal and may | 1666 | * BSY signal is true, it shall release the SEL signal and may |
1619 | * change the DATA BUS. -wingel | 1667 | * change the DATA BUS. -wingel |
1620 | */ | 1668 | */ |
1621 | 1669 | ||
1622 | udelay(1); | 1670 | udelay(1); |
1623 | 1671 | ||
1624 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); | 1672 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); |
1625 | 1673 | ||
1626 | if (!(NCR5380_read(STATUS_REG) & SR_BSY)) { | 1674 | if (!(NCR5380_read(STATUS_REG) & SR_BSY)) { |
1627 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 1675 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
1628 | if (hostdata->targets_present & (1 << cmd->device->id)) { | 1676 | if (hostdata->targets_present & (1 << cmd->device->id)) { |
1629 | printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); | 1677 | printk(KERN_ERR "scsi%d: weirdness\n", HOSTNO); |
1630 | if (hostdata->restart_select) | 1678 | if (hostdata->restart_select) |
1631 | printk(KERN_NOTICE "\trestart select\n"); | 1679 | printk(KERN_NOTICE "\trestart select\n"); |
1632 | NCR_PRINT(NDEBUG_ANY); | 1680 | NCR_PRINT(NDEBUG_ANY); |
1633 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 1681 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
1634 | return -1; | 1682 | return -1; |
1635 | } | 1683 | } |
1636 | cmd->result = DID_BAD_TARGET << 16; | 1684 | cmd->result = DID_BAD_TARGET << 16; |
1637 | #ifdef NCR5380_STATS | 1685 | #ifdef NCR5380_STATS |
1638 | collect_stats(hostdata, cmd); | 1686 | collect_stats(hostdata, cmd); |
1639 | #endif | 1687 | #endif |
1640 | #ifdef SUPPORT_TAGS | 1688 | #ifdef SUPPORT_TAGS |
1641 | cmd_free_tag( cmd ); | 1689 | cmd_free_tag(cmd); |
1642 | #endif | 1690 | #endif |
1643 | cmd->scsi_done(cmd); | 1691 | cmd->scsi_done(cmd); |
1644 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 1692 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
1645 | SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO); | 1693 | SEL_PRINTK("scsi%d: target did not respond within 250ms\n", HOSTNO); |
1646 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 1694 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
1647 | return 0; | 1695 | return 0; |
1648 | } | 1696 | } |
1649 | 1697 | ||
1650 | hostdata->targets_present |= (1 << cmd->device->id); | 1698 | hostdata->targets_present |= (1 << cmd->device->id); |
1651 | 1699 | ||
1652 | /* | 1700 | /* |
1653 | * Since we followed the SCSI spec, and raised ATN while SEL | 1701 | * Since we followed the SCSI spec, and raised ATN while SEL |
1654 | * was true but before BSY was false during selection, the information | 1702 | * was true but before BSY was false during selection, the information |
1655 | * transfer phase should be a MESSAGE OUT phase so that we can send the | 1703 | * transfer phase should be a MESSAGE OUT phase so that we can send the |
1656 | * IDENTIFY message. | 1704 | * IDENTIFY message. |
1657 | * | 1705 | * |
1658 | * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG | 1706 | * If SCSI-II tagged queuing is enabled, we also send a SIMPLE_QUEUE_TAG |
1659 | * message (2 bytes) with a tag ID that we increment with every command | 1707 | * message (2 bytes) with a tag ID that we increment with every command |
1660 | * until it wraps back to 0. | 1708 | * until it wraps back to 0. |
1661 | * | 1709 | * |
1662 | * XXX - it turns out that there are some broken SCSI-II devices, | 1710 | * XXX - it turns out that there are some broken SCSI-II devices, |
1663 | * which claim to support tagged queuing but fail when more than | 1711 | * which claim to support tagged queuing but fail when more than |
1664 | * some number of commands are issued at once. | 1712 | * some number of commands are issued at once. |
1665 | */ | 1713 | */ |
1666 | 1714 | ||
1667 | /* Wait for start of REQ/ACK handshake */ | 1715 | /* Wait for start of REQ/ACK handshake */ |
1668 | while (!(NCR5380_read(STATUS_REG) & SR_REQ)); | 1716 | while (!(NCR5380_read(STATUS_REG) & SR_REQ)) |
1669 | 1717 | ; | |
1670 | SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n", | 1718 | |
1671 | HOSTNO, cmd->device->id); | 1719 | SEL_PRINTK("scsi%d: target %d selected, going into MESSAGE OUT phase.\n", |
1672 | tmp[0] = IDENTIFY(1, cmd->device->lun); | 1720 | HOSTNO, cmd->device->id); |
1721 | tmp[0] = IDENTIFY(1, cmd->device->lun); | ||
1673 | 1722 | ||
1674 | #ifdef SUPPORT_TAGS | 1723 | #ifdef SUPPORT_TAGS |
1675 | if (cmd->tag != TAG_NONE) { | 1724 | if (cmd->tag != TAG_NONE) { |
1676 | tmp[1] = hostdata->last_message = SIMPLE_QUEUE_TAG; | 1725 | tmp[1] = hostdata->last_message = SIMPLE_QUEUE_TAG; |
1677 | tmp[2] = cmd->tag; | 1726 | tmp[2] = cmd->tag; |
1678 | len = 3; | 1727 | len = 3; |
1679 | } else | 1728 | } else |
1680 | len = 1; | 1729 | len = 1; |
1681 | #else | 1730 | #else |
1682 | len = 1; | 1731 | len = 1; |
1683 | cmd->tag=0; | 1732 | cmd->tag = 0; |
1684 | #endif /* SUPPORT_TAGS */ | 1733 | #endif /* SUPPORT_TAGS */ |
1685 | 1734 | ||
1686 | /* Send message(s) */ | 1735 | /* Send message(s) */ |
1687 | data = tmp; | 1736 | data = tmp; |
1688 | phase = PHASE_MSGOUT; | 1737 | phase = PHASE_MSGOUT; |
1689 | NCR5380_transfer_pio(instance, &phase, &len, &data); | 1738 | NCR5380_transfer_pio(instance, &phase, &len, &data); |
1690 | SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO); | 1739 | SEL_PRINTK("scsi%d: nexus established.\n", HOSTNO); |
1691 | /* XXX need to handle errors here */ | 1740 | /* XXX need to handle errors here */ |
1692 | hostdata->connected = cmd; | 1741 | hostdata->connected = cmd; |
1693 | #ifndef SUPPORT_TAGS | 1742 | #ifndef SUPPORT_TAGS |
1694 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); | 1743 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); |
1695 | #endif | 1744 | #endif |
1696 | |||
1697 | initialize_SCp(cmd); | ||
1698 | 1745 | ||
1746 | initialize_SCp(cmd); | ||
1699 | 1747 | ||
1700 | return 0; | 1748 | return 0; |
1701 | } | 1749 | } |
1702 | 1750 | ||
1703 | /* | 1751 | /* |
1704 | * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, | 1752 | * Function : int NCR5380_transfer_pio (struct Scsi_Host *instance, |
1705 | * unsigned char *phase, int *count, unsigned char **data) | 1753 | * unsigned char *phase, int *count, unsigned char **data) |
1706 | * | 1754 | * |
1707 | * Purpose : transfers data in given phase using polled I/O | 1755 | * Purpose : transfers data in given phase using polled I/O |
1708 | * | 1756 | * |
1709 | * Inputs : instance - instance of driver, *phase - pointer to | 1757 | * Inputs : instance - instance of driver, *phase - pointer to |
1710 | * what phase is expected, *count - pointer to number of | 1758 | * what phase is expected, *count - pointer to number of |
1711 | * bytes to transfer, **data - pointer to data pointer. | 1759 | * bytes to transfer, **data - pointer to data pointer. |
1712 | * | 1760 | * |
1713 | * Returns : -1 when different phase is entered without transferring | 1761 | * Returns : -1 when different phase is entered without transferring |
1714 | * maximum number of bytes, 0 if all bytes are transfered or exit | 1762 | * maximum number of bytes, 0 if all bytes are transfered or exit |
1715 | * is in same phase. | 1763 | * is in same phase. |
1716 | * | 1764 | * |
1717 | * Also, *phase, *count, *data are modified in place. | 1765 | * Also, *phase, *count, *data are modified in place. |
1718 | * | 1766 | * |
1719 | * XXX Note : handling for bus free may be useful. | 1767 | * XXX Note : handling for bus free may be useful. |
1720 | */ | 1768 | */ |
1721 | 1769 | ||
1722 | /* | 1770 | /* |
1723 | * Note : this code is not as quick as it could be, however it | 1771 | * Note : this code is not as quick as it could be, however it |
1724 | * IS 100% reliable, and for the actual data transfer where speed | 1772 | * IS 100% reliable, and for the actual data transfer where speed |
1725 | * counts, we will always do a pseudo DMA or DMA transfer. | 1773 | * counts, we will always do a pseudo DMA or DMA transfer. |
1726 | */ | 1774 | */ |
1727 | 1775 | ||
1728 | static int NCR5380_transfer_pio( struct Scsi_Host *instance, | 1776 | static int NCR5380_transfer_pio(struct Scsi_Host *instance, |
1729 | unsigned char *phase, int *count, | 1777 | unsigned char *phase, int *count, |
1730 | unsigned char **data) | 1778 | unsigned char **data) |
1731 | { | 1779 | { |
1732 | register unsigned char p = *phase, tmp; | 1780 | register unsigned char p = *phase, tmp; |
1733 | register int c = *count; | 1781 | register int c = *count; |
1734 | register unsigned char *d = *data; | 1782 | register unsigned char *d = *data; |
1735 | 1783 | ||
1736 | /* | 1784 | /* |
1737 | * The NCR5380 chip will only drive the SCSI bus when the | 1785 | * The NCR5380 chip will only drive the SCSI bus when the |
1738 | * phase specified in the appropriate bits of the TARGET COMMAND | 1786 | * phase specified in the appropriate bits of the TARGET COMMAND |
1739 | * REGISTER match the STATUS REGISTER | 1787 | * REGISTER match the STATUS REGISTER |
1740 | */ | ||
1741 | |||
1742 | NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); | ||
1743 | |||
1744 | do { | ||
1745 | /* | ||
1746 | * Wait for assertion of REQ, after which the phase bits will be | ||
1747 | * valid | ||
1748 | */ | 1788 | */ |
1749 | while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)); | ||
1750 | 1789 | ||
1751 | HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO); | 1790 | NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); |
1752 | 1791 | ||
1753 | /* Check for phase mismatch */ | 1792 | do { |
1754 | if ((tmp & PHASE_MASK) != p) { | 1793 | /* |
1755 | PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO); | 1794 | * Wait for assertion of REQ, after which the phase bits will be |
1756 | NCR_PRINT_PHASE(NDEBUG_PIO); | 1795 | * valid |
1757 | break; | 1796 | */ |
1758 | } | 1797 | while (!((tmp = NCR5380_read(STATUS_REG)) & SR_REQ)) |
1798 | ; | ||
1759 | 1799 | ||
1760 | /* Do actual transfer from SCSI bus to / from memory */ | 1800 | HSH_PRINTK("scsi%d: REQ detected\n", HOSTNO); |
1761 | if (!(p & SR_IO)) | ||
1762 | NCR5380_write(OUTPUT_DATA_REG, *d); | ||
1763 | else | ||
1764 | *d = NCR5380_read(CURRENT_SCSI_DATA_REG); | ||
1765 | 1801 | ||
1766 | ++d; | 1802 | /* Check for phase mismatch */ |
1803 | if ((tmp & PHASE_MASK) != p) { | ||
1804 | PIO_PRINTK("scsi%d: phase mismatch\n", HOSTNO); | ||
1805 | NCR_PRINT_PHASE(NDEBUG_PIO); | ||
1806 | break; | ||
1807 | } | ||
1767 | 1808 | ||
1768 | /* | 1809 | /* Do actual transfer from SCSI bus to / from memory */ |
1769 | * The SCSI standard suggests that in MSGOUT phase, the initiator | 1810 | if (!(p & SR_IO)) |
1770 | * should drop ATN on the last byte of the message phase | 1811 | NCR5380_write(OUTPUT_DATA_REG, *d); |
1771 | * after REQ has been asserted for the handshake but before | 1812 | else |
1772 | * the initiator raises ACK. | 1813 | *d = NCR5380_read(CURRENT_SCSI_DATA_REG); |
1773 | */ | ||
1774 | 1814 | ||
1775 | if (!(p & SR_IO)) { | 1815 | ++d; |
1776 | if (!((p & SR_MSG) && c > 1)) { | ||
1777 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | ||
1778 | ICR_ASSERT_DATA); | ||
1779 | NCR_PRINT(NDEBUG_PIO); | ||
1780 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | ||
1781 | ICR_ASSERT_DATA | ICR_ASSERT_ACK); | ||
1782 | } else { | ||
1783 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | ||
1784 | ICR_ASSERT_DATA | ICR_ASSERT_ATN); | ||
1785 | NCR_PRINT(NDEBUG_PIO); | ||
1786 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | ||
1787 | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); | ||
1788 | } | ||
1789 | } else { | ||
1790 | NCR_PRINT(NDEBUG_PIO); | ||
1791 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); | ||
1792 | } | ||
1793 | 1816 | ||
1794 | while (NCR5380_read(STATUS_REG) & SR_REQ); | 1817 | /* |
1818 | * The SCSI standard suggests that in MSGOUT phase, the initiator | ||
1819 | * should drop ATN on the last byte of the message phase | ||
1820 | * after REQ has been asserted for the handshake but before | ||
1821 | * the initiator raises ACK. | ||
1822 | */ | ||
1795 | 1823 | ||
1796 | HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO); | 1824 | if (!(p & SR_IO)) { |
1825 | if (!((p & SR_MSG) && c > 1)) { | ||
1826 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); | ||
1827 | NCR_PRINT(NDEBUG_PIO); | ||
1828 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | ||
1829 | ICR_ASSERT_DATA | ICR_ASSERT_ACK); | ||
1830 | } else { | ||
1831 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | ||
1832 | ICR_ASSERT_DATA | ICR_ASSERT_ATN); | ||
1833 | NCR_PRINT(NDEBUG_PIO); | ||
1834 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | ||
1835 | ICR_ASSERT_DATA | ICR_ASSERT_ATN | ICR_ASSERT_ACK); | ||
1836 | } | ||
1837 | } else { | ||
1838 | NCR_PRINT(NDEBUG_PIO); | ||
1839 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ACK); | ||
1840 | } | ||
1797 | 1841 | ||
1798 | /* | 1842 | while (NCR5380_read(STATUS_REG) & SR_REQ) |
1799 | * We have several special cases to consider during REQ/ACK handshaking : | 1843 | ; |
1800 | * 1. We were in MSGOUT phase, and we are on the last byte of the | 1844 | |
1801 | * message. ATN must be dropped as ACK is dropped. | 1845 | HSH_PRINTK("scsi%d: req false, handshake complete\n", HOSTNO); |
1802 | * | 1846 | |
1803 | * 2. We are in a MSGIN phase, and we are on the last byte of the | 1847 | /* |
1804 | * message. We must exit with ACK asserted, so that the calling | 1848 | * We have several special cases to consider during REQ/ACK handshaking : |
1805 | * code may raise ATN before dropping ACK to reject the message. | 1849 | * 1. We were in MSGOUT phase, and we are on the last byte of the |
1806 | * | 1850 | * message. ATN must be dropped as ACK is dropped. |
1807 | * 3. ACK and ATN are clear and the target may proceed as normal. | 1851 | * |
1808 | */ | 1852 | * 2. We are in a MSGIN phase, and we are on the last byte of the |
1809 | if (!(p == PHASE_MSGIN && c == 1)) { | 1853 | * message. We must exit with ACK asserted, so that the calling |
1810 | if (p == PHASE_MSGOUT && c > 1) | 1854 | * code may raise ATN before dropping ACK to reject the message. |
1811 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); | 1855 | * |
1812 | else | 1856 | * 3. ACK and ATN are clear and the target may proceed as normal. |
1813 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 1857 | */ |
1814 | } | 1858 | if (!(p == PHASE_MSGIN && c == 1)) { |
1815 | } while (--c); | 1859 | if (p == PHASE_MSGOUT && c > 1) |
1816 | 1860 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); | |
1817 | PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c); | 1861 | else |
1818 | 1862 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | |
1819 | *count = c; | 1863 | } |
1820 | *data = d; | 1864 | } while (--c); |
1821 | tmp = NCR5380_read(STATUS_REG); | 1865 | |
1822 | /* The phase read from the bus is valid if either REQ is (already) | 1866 | PIO_PRINTK("scsi%d: residual %d\n", HOSTNO, c); |
1823 | * asserted or if ACK hasn't been released yet. The latter is the case if | 1867 | |
1824 | * we're in MSGIN and all wanted bytes have been received. */ | 1868 | *count = c; |
1825 | if ((tmp & SR_REQ) || (p == PHASE_MSGIN && c == 0)) | 1869 | *data = d; |
1826 | *phase = tmp & PHASE_MASK; | 1870 | tmp = NCR5380_read(STATUS_REG); |
1827 | else | 1871 | /* The phase read from the bus is valid if either REQ is (already) |
1828 | *phase = PHASE_UNKNOWN; | 1872 | * asserted or if ACK hasn't been released yet. The latter is the case if |
1829 | 1873 | * we're in MSGIN and all wanted bytes have been received. | |
1830 | if (!c || (*phase == p)) | 1874 | */ |
1831 | return 0; | 1875 | if ((tmp & SR_REQ) || (p == PHASE_MSGIN && c == 0)) |
1832 | else | 1876 | *phase = tmp & PHASE_MASK; |
1833 | return -1; | 1877 | else |
1878 | *phase = PHASE_UNKNOWN; | ||
1879 | |||
1880 | if (!c || (*phase == p)) | ||
1881 | return 0; | ||
1882 | else | ||
1883 | return -1; | ||
1834 | } | 1884 | } |
1835 | 1885 | ||
1836 | /* | 1886 | /* |
1837 | * Function : do_abort (Scsi_Host *host) | 1887 | * Function : do_abort (Scsi_Host *host) |
1838 | * | 1888 | * |
1839 | * Purpose : abort the currently established nexus. Should only be | 1889 | * Purpose : abort the currently established nexus. Should only be |
1840 | * called from a routine which can drop into a | 1890 | * called from a routine which can drop into a |
1841 | * | 1891 | * |
1842 | * Returns : 0 on success, -1 on failure. | 1892 | * Returns : 0 on success, -1 on failure. |
1843 | */ | 1893 | */ |
1844 | 1894 | ||
1845 | static int do_abort (struct Scsi_Host *host) | 1895 | static int do_abort(struct Scsi_Host *host) |
1846 | { | 1896 | { |
1847 | unsigned char tmp, *msgptr, phase; | 1897 | unsigned char tmp, *msgptr, phase; |
1848 | int len; | 1898 | int len; |
1849 | 1899 | ||
1850 | /* Request message out phase */ | 1900 | /* Request message out phase */ |
1851 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); | ||
1852 | |||
1853 | /* | ||
1854 | * Wait for the target to indicate a valid phase by asserting | ||
1855 | * REQ. Once this happens, we'll have either a MSGOUT phase | ||
1856 | * and can immediately send the ABORT message, or we'll have some | ||
1857 | * other phase and will have to source/sink data. | ||
1858 | * | ||
1859 | * We really don't care what value was on the bus or what value | ||
1860 | * the target sees, so we just handshake. | ||
1861 | */ | ||
1862 | |||
1863 | while (!(tmp = NCR5380_read(STATUS_REG)) & SR_REQ); | ||
1864 | |||
1865 | NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); | ||
1866 | |||
1867 | if ((tmp & PHASE_MASK) != PHASE_MSGOUT) { | ||
1868 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | | ||
1869 | ICR_ASSERT_ACK); | ||
1870 | while (NCR5380_read(STATUS_REG) & SR_REQ); | ||
1871 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); | 1901 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); |
1872 | } | 1902 | |
1873 | 1903 | /* | |
1874 | tmp = ABORT; | 1904 | * Wait for the target to indicate a valid phase by asserting |
1875 | msgptr = &tmp; | 1905 | * REQ. Once this happens, we'll have either a MSGOUT phase |
1876 | len = 1; | 1906 | * and can immediately send the ABORT message, or we'll have some |
1877 | phase = PHASE_MSGOUT; | 1907 | * other phase and will have to source/sink data. |
1878 | NCR5380_transfer_pio (host, &phase, &len, &msgptr); | 1908 | * |
1879 | 1909 | * We really don't care what value was on the bus or what value | |
1880 | /* | 1910 | * the target sees, so we just handshake. |
1881 | * If we got here, and the command completed successfully, | 1911 | */ |
1882 | * we're about to go into bus free state. | 1912 | |
1883 | */ | 1913 | while (!(tmp = NCR5380_read(STATUS_REG)) & SR_REQ) |
1884 | 1914 | ; | |
1885 | return len ? -1 : 0; | 1915 | |
1916 | NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); | ||
1917 | |||
1918 | if ((tmp & PHASE_MASK) != PHASE_MSGOUT) { | ||
1919 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | | ||
1920 | ICR_ASSERT_ACK); | ||
1921 | while (NCR5380_read(STATUS_REG) & SR_REQ) | ||
1922 | ; | ||
1923 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); | ||
1924 | } | ||
1925 | |||
1926 | tmp = ABORT; | ||
1927 | msgptr = &tmp; | ||
1928 | len = 1; | ||
1929 | phase = PHASE_MSGOUT; | ||
1930 | NCR5380_transfer_pio(host, &phase, &len, &msgptr); | ||
1931 | |||
1932 | /* | ||
1933 | * If we got here, and the command completed successfully, | ||
1934 | * we're about to go into bus free state. | ||
1935 | */ | ||
1936 | |||
1937 | return len ? -1 : 0; | ||
1886 | } | 1938 | } |
1887 | 1939 | ||
1888 | #if defined(REAL_DMA) | 1940 | #if defined(REAL_DMA) |
1889 | /* | 1941 | /* |
1890 | * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, | 1942 | * Function : int NCR5380_transfer_dma (struct Scsi_Host *instance, |
1891 | * unsigned char *phase, int *count, unsigned char **data) | 1943 | * unsigned char *phase, int *count, unsigned char **data) |
1892 | * | 1944 | * |
1893 | * Purpose : transfers data in given phase using either real | 1945 | * Purpose : transfers data in given phase using either real |
1894 | * or pseudo DMA. | 1946 | * or pseudo DMA. |
1895 | * | 1947 | * |
1896 | * Inputs : instance - instance of driver, *phase - pointer to | 1948 | * Inputs : instance - instance of driver, *phase - pointer to |
1897 | * what phase is expected, *count - pointer to number of | 1949 | * what phase is expected, *count - pointer to number of |
1898 | * bytes to transfer, **data - pointer to data pointer. | 1950 | * bytes to transfer, **data - pointer to data pointer. |
1899 | * | 1951 | * |
1900 | * Returns : -1 when different phase is entered without transferring | 1952 | * Returns : -1 when different phase is entered without transferring |
1901 | * maximum number of bytes, 0 if all bytes or transfered or exit | 1953 | * maximum number of bytes, 0 if all bytes or transfered or exit |
1902 | * is in same phase. | 1954 | * is in same phase. |
1903 | * | 1955 | * |
1904 | * Also, *phase, *count, *data are modified in place. | 1956 | * Also, *phase, *count, *data are modified in place. |
1905 | * | 1957 | * |
1906 | */ | 1958 | */ |
1907 | 1959 | ||
1908 | 1960 | ||
1909 | static int NCR5380_transfer_dma( struct Scsi_Host *instance, | 1961 | static int NCR5380_transfer_dma(struct Scsi_Host *instance, |
1910 | unsigned char *phase, int *count, | 1962 | unsigned char *phase, int *count, |
1911 | unsigned char **data) | 1963 | unsigned char **data) |
1912 | { | 1964 | { |
1913 | SETUP_HOSTDATA(instance); | 1965 | SETUP_HOSTDATA(instance); |
1914 | register int c = *count; | 1966 | register int c = *count; |
1915 | register unsigned char p = *phase; | 1967 | register unsigned char p = *phase; |
1916 | register unsigned char *d = *data; | 1968 | register unsigned char *d = *data; |
1917 | unsigned char tmp; | 1969 | unsigned char tmp; |
1918 | unsigned long flags; | 1970 | unsigned long flags; |
1919 | 1971 | ||
1920 | if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) { | 1972 | if ((tmp = (NCR5380_read(STATUS_REG) & PHASE_MASK)) != p) { |
1921 | *phase = tmp; | 1973 | *phase = tmp; |
1922 | return -1; | 1974 | return -1; |
1923 | } | 1975 | } |
1924 | 1976 | ||
1925 | if (atari_read_overruns && (p & SR_IO)) { | 1977 | if (atari_read_overruns && (p & SR_IO)) |
1926 | c -= atari_read_overruns; | 1978 | c -= atari_read_overruns; |
1927 | } | ||
1928 | 1979 | ||
1929 | DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n", | 1980 | DMA_PRINTK("scsi%d: initializing DMA for %s, %d bytes %s %p\n", |
1930 | HOSTNO, (p & SR_IO) ? "reading" : "writing", | 1981 | HOSTNO, (p & SR_IO) ? "reading" : "writing", |
1931 | c, (p & SR_IO) ? "to" : "from", d); | 1982 | c, (p & SR_IO) ? "to" : "from", d); |
1932 | 1983 | ||
1933 | NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); | 1984 | NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(p)); |
1934 | 1985 | ||
1935 | #ifdef REAL_DMA | 1986 | #ifdef REAL_DMA |
1936 | NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_EOP_INTR | MR_MONITOR_BSY); | 1987 | NCR5380_write(MODE_REG, MR_BASE | MR_DMA_MODE | MR_ENABLE_EOP_INTR | MR_MONITOR_BSY); |
1937 | #endif /* def REAL_DMA */ | 1988 | #endif /* def REAL_DMA */ |
1938 | 1989 | ||
1939 | if (IS_A_TT()) { | 1990 | if (IS_A_TT()) { |
1940 | /* On the Medusa, it is a must to initialize the DMA before | 1991 | /* On the Medusa, it is a must to initialize the DMA before |
1941 | * starting the NCR. This is also the cleaner way for the TT. | 1992 | * starting the NCR. This is also the cleaner way for the TT. |
1942 | */ | 1993 | */ |
1943 | local_irq_save(flags); | 1994 | local_irq_save(flags); |
1944 | hostdata->dma_len = (p & SR_IO) ? | 1995 | hostdata->dma_len = (p & SR_IO) ? |
1945 | NCR5380_dma_read_setup(instance, d, c) : | 1996 | NCR5380_dma_read_setup(instance, d, c) : |
1946 | NCR5380_dma_write_setup(instance, d, c); | 1997 | NCR5380_dma_write_setup(instance, d, c); |
1947 | local_irq_restore(flags); | 1998 | local_irq_restore(flags); |
1948 | } | 1999 | } |
1949 | 2000 | ||
1950 | if (p & SR_IO) | 2001 | if (p & SR_IO) |
1951 | NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); | 2002 | NCR5380_write(START_DMA_INITIATOR_RECEIVE_REG, 0); |
1952 | else { | 2003 | else { |
1953 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); | 2004 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_DATA); |
1954 | NCR5380_write(START_DMA_SEND_REG, 0); | 2005 | NCR5380_write(START_DMA_SEND_REG, 0); |
1955 | } | 2006 | } |
1956 | 2007 | ||
1957 | if (!IS_A_TT()) { | 2008 | if (!IS_A_TT()) { |
1958 | /* On the Falcon, the DMA setup must be done after the last */ | 2009 | /* On the Falcon, the DMA setup must be done after the last */ |
1959 | /* NCR access, else the DMA setup gets trashed! | 2010 | /* NCR access, else the DMA setup gets trashed! |
1960 | */ | 2011 | */ |
1961 | local_irq_save(flags); | 2012 | local_irq_save(flags); |
1962 | hostdata->dma_len = (p & SR_IO) ? | 2013 | hostdata->dma_len = (p & SR_IO) ? |
1963 | NCR5380_dma_read_setup(instance, d, c) : | 2014 | NCR5380_dma_read_setup(instance, d, c) : |
1964 | NCR5380_dma_write_setup(instance, d, c); | 2015 | NCR5380_dma_write_setup(instance, d, c); |
1965 | local_irq_restore(flags); | 2016 | local_irq_restore(flags); |
1966 | } | 2017 | } |
1967 | return 0; | 2018 | return 0; |
1968 | } | 2019 | } |
1969 | #endif /* defined(REAL_DMA) */ | 2020 | #endif /* defined(REAL_DMA) */ |
1970 | 2021 | ||
1971 | /* | 2022 | /* |
1972 | * Function : NCR5380_information_transfer (struct Scsi_Host *instance) | 2023 | * Function : NCR5380_information_transfer (struct Scsi_Host *instance) |
1973 | * | 2024 | * |
1974 | * Purpose : run through the various SCSI phases and do as the target | 2025 | * Purpose : run through the various SCSI phases and do as the target |
1975 | * directs us to. Operates on the currently connected command, | 2026 | * directs us to. Operates on the currently connected command, |
1976 | * instance->connected. | 2027 | * instance->connected. |
1977 | * | 2028 | * |
1978 | * Inputs : instance, instance for which we are doing commands | 2029 | * Inputs : instance, instance for which we are doing commands |
1979 | * | 2030 | * |
1980 | * Side effects : SCSI things happen, the disconnected queue will be | 2031 | * Side effects : SCSI things happen, the disconnected queue will be |
1981 | * modified if a command disconnects, *instance->connected will | 2032 | * modified if a command disconnects, *instance->connected will |
1982 | * change. | 2033 | * change. |
1983 | * | 2034 | * |
1984 | * XXX Note : we need to watch for bus free or a reset condition here | 2035 | * XXX Note : we need to watch for bus free or a reset condition here |
1985 | * to recover from an unexpected bus free condition. | 2036 | * to recover from an unexpected bus free condition. |
1986 | */ | 2037 | */ |
1987 | 2038 | ||
1988 | static void NCR5380_information_transfer (struct Scsi_Host *instance) | 2039 | static void NCR5380_information_transfer(struct Scsi_Host *instance) |
1989 | { | 2040 | { |
1990 | SETUP_HOSTDATA(instance); | 2041 | SETUP_HOSTDATA(instance); |
1991 | unsigned long flags; | 2042 | unsigned long flags; |
1992 | unsigned char msgout = NOP; | 2043 | unsigned char msgout = NOP; |
1993 | int sink = 0; | 2044 | int sink = 0; |
1994 | int len; | 2045 | int len; |
1995 | #if defined(REAL_DMA) | 2046 | #if defined(REAL_DMA) |
1996 | int transfersize; | 2047 | int transfersize; |
1997 | #endif | 2048 | #endif |
1998 | unsigned char *data; | 2049 | unsigned char *data; |
1999 | unsigned char phase, tmp, extended_msg[10], old_phase=0xff; | 2050 | unsigned char phase, tmp, extended_msg[10], old_phase = 0xff; |
2000 | Scsi_Cmnd *cmd = (Scsi_Cmnd *) hostdata->connected; | 2051 | Scsi_Cmnd *cmd = (Scsi_Cmnd *) hostdata->connected; |
2052 | |||
2053 | while (1) { | ||
2054 | tmp = NCR5380_read(STATUS_REG); | ||
2055 | /* We only have a valid SCSI phase when REQ is asserted */ | ||
2056 | if (tmp & SR_REQ) { | ||
2057 | phase = (tmp & PHASE_MASK); | ||
2058 | if (phase != old_phase) { | ||
2059 | old_phase = phase; | ||
2060 | NCR_PRINT_PHASE(NDEBUG_INFORMATION); | ||
2061 | } | ||
2001 | 2062 | ||
2002 | while (1) { | 2063 | if (sink && (phase != PHASE_MSGOUT)) { |
2003 | tmp = NCR5380_read(STATUS_REG); | 2064 | NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); |
2004 | /* We only have a valid SCSI phase when REQ is asserted */ | 2065 | |
2005 | if (tmp & SR_REQ) { | 2066 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | |
2006 | phase = (tmp & PHASE_MASK); | 2067 | ICR_ASSERT_ACK); |
2007 | if (phase != old_phase) { | 2068 | while (NCR5380_read(STATUS_REG) & SR_REQ) |
2008 | old_phase = phase; | 2069 | ; |
2009 | NCR_PRINT_PHASE(NDEBUG_INFORMATION); | 2070 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | |
2010 | } | 2071 | ICR_ASSERT_ATN); |
2011 | 2072 | sink = 0; | |
2012 | if (sink && (phase != PHASE_MSGOUT)) { | 2073 | continue; |
2013 | NCR5380_write(TARGET_COMMAND_REG, PHASE_SR_TO_TCR(tmp)); | 2074 | } |
2014 | 2075 | ||
2015 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN | | 2076 | switch (phase) { |
2016 | ICR_ASSERT_ACK); | 2077 | case PHASE_DATAOUT: |
2017 | while (NCR5380_read(STATUS_REG) & SR_REQ); | ||
2018 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | ||
2019 | ICR_ASSERT_ATN); | ||
2020 | sink = 0; | ||
2021 | continue; | ||
2022 | } | ||
2023 | |||
2024 | switch (phase) { | ||
2025 | case PHASE_DATAOUT: | ||
2026 | #if (NDEBUG & NDEBUG_NO_DATAOUT) | 2078 | #if (NDEBUG & NDEBUG_NO_DATAOUT) |
2027 | printk("scsi%d: NDEBUG_NO_DATAOUT set, attempted DATAOUT " | 2079 | printk("scsi%d: NDEBUG_NO_DATAOUT set, attempted DATAOUT " |
2028 | "aborted\n", HOSTNO); | 2080 | "aborted\n", HOSTNO); |
2029 | sink = 1; | 2081 | sink = 1; |
2030 | do_abort(instance); | 2082 | do_abort(instance); |
2031 | cmd->result = DID_ERROR << 16; | 2083 | cmd->result = DID_ERROR << 16; |
2032 | cmd->done(cmd); | 2084 | cmd->done(cmd); |
2033 | return; | 2085 | return; |
2034 | #endif | 2086 | #endif |
2035 | case PHASE_DATAIN: | 2087 | case PHASE_DATAIN: |
2036 | /* | 2088 | /* |
2037 | * If there is no room left in the current buffer in the | 2089 | * If there is no room left in the current buffer in the |
2038 | * scatter-gather list, move onto the next one. | 2090 | * scatter-gather list, move onto the next one. |
2039 | */ | 2091 | */ |
2040 | 2092 | ||
2041 | if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { | 2093 | if (!cmd->SCp.this_residual && cmd->SCp.buffers_residual) { |
2042 | ++cmd->SCp.buffer; | 2094 | ++cmd->SCp.buffer; |
2043 | --cmd->SCp.buffers_residual; | 2095 | --cmd->SCp.buffers_residual; |
2044 | cmd->SCp.this_residual = cmd->SCp.buffer->length; | 2096 | cmd->SCp.this_residual = cmd->SCp.buffer->length; |
2045 | cmd->SCp.ptr = page_address(cmd->SCp.buffer->page)+ | 2097 | cmd->SCp.ptr = page_address(cmd->SCp.buffer->page) + |
2046 | cmd->SCp.buffer->offset; | 2098 | cmd->SCp.buffer->offset; |
2047 | /* ++roman: Try to merge some scatter-buffers if | 2099 | /* ++roman: Try to merge some scatter-buffers if |
2048 | * they are at contiguous physical addresses. | 2100 | * they are at contiguous physical addresses. |
2049 | */ | 2101 | */ |
2050 | merge_contiguous_buffers( cmd ); | 2102 | merge_contiguous_buffers(cmd); |
2051 | INF_PRINTK("scsi%d: %d bytes and %d buffers left\n", | 2103 | INF_PRINTK("scsi%d: %d bytes and %d buffers left\n", |
2052 | HOSTNO, cmd->SCp.this_residual, | 2104 | HOSTNO, cmd->SCp.this_residual, |
2053 | cmd->SCp.buffers_residual); | 2105 | cmd->SCp.buffers_residual); |
2054 | } | 2106 | } |
2055 | 2107 | ||
2056 | /* | 2108 | /* |
2057 | * The preferred transfer method is going to be | 2109 | * The preferred transfer method is going to be |
2058 | * PSEUDO-DMA for systems that are strictly PIO, | 2110 | * PSEUDO-DMA for systems that are strictly PIO, |
2059 | * since we can let the hardware do the handshaking. | 2111 | * since we can let the hardware do the handshaking. |
2060 | * | 2112 | * |
2061 | * For this to work, we need to know the transfersize | 2113 | * For this to work, we need to know the transfersize |
2062 | * ahead of time, since the pseudo-DMA code will sit | 2114 | * ahead of time, since the pseudo-DMA code will sit |
2063 | * in an unconditional loop. | 2115 | * in an unconditional loop. |
2064 | */ | 2116 | */ |
2065 | 2117 | ||
2066 | /* ++roman: I suggest, this should be | 2118 | /* ++roman: I suggest, this should be |
2067 | * #if def(REAL_DMA) | 2119 | * #if def(REAL_DMA) |
2068 | * instead of leaving REAL_DMA out. | 2120 | * instead of leaving REAL_DMA out. |
2069 | */ | 2121 | */ |
2070 | 2122 | ||
2071 | #if defined(REAL_DMA) | 2123 | #if defined(REAL_DMA) |
2072 | if (!cmd->device->borken && | 2124 | if (!cmd->device->borken && |
2073 | (transfersize = NCR5380_dma_xfer_len(instance,cmd,phase)) > 31) { | 2125 | (transfersize = NCR5380_dma_xfer_len(instance,cmd,phase)) > 31) { |
2074 | len = transfersize; | 2126 | len = transfersize; |
2075 | cmd->SCp.phase = phase; | 2127 | cmd->SCp.phase = phase; |
2076 | if (NCR5380_transfer_dma(instance, &phase, | 2128 | if (NCR5380_transfer_dma(instance, &phase, |
2077 | &len, (unsigned char **) &cmd->SCp.ptr)) { | 2129 | &len, (unsigned char **)&cmd->SCp.ptr)) { |
2078 | /* | 2130 | /* |
2079 | * If the watchdog timer fires, all future | 2131 | * If the watchdog timer fires, all future |
2080 | * accesses to this device will use the | 2132 | * accesses to this device will use the |
2081 | * polled-IO. */ | 2133 | * polled-IO. */ |
2082 | printk(KERN_NOTICE "scsi%d: switching target %d " | 2134 | printk(KERN_NOTICE "scsi%d: switching target %d " |
2083 | "lun %d to slow handshake\n", HOSTNO, | 2135 | "lun %d to slow handshake\n", HOSTNO, |
2084 | cmd->device->id, cmd->device->lun); | 2136 | cmd->device->id, cmd->device->lun); |
2085 | cmd->device->borken = 1; | 2137 | cmd->device->borken = 1; |
2086 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | 2138 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | |
2087 | ICR_ASSERT_ATN); | 2139 | ICR_ASSERT_ATN); |
2088 | sink = 1; | 2140 | sink = 1; |
2089 | do_abort(instance); | 2141 | do_abort(instance); |
2090 | cmd->result = DID_ERROR << 16; | 2142 | cmd->result = DID_ERROR << 16; |
2091 | cmd->done(cmd); | 2143 | cmd->done(cmd); |
2092 | /* XXX - need to source or sink data here, as appropriate */ | 2144 | /* XXX - need to source or sink data here, as appropriate */ |
2093 | } else { | 2145 | } else { |
2094 | #ifdef REAL_DMA | 2146 | #ifdef REAL_DMA |
2095 | /* ++roman: When using real DMA, | 2147 | /* ++roman: When using real DMA, |
2096 | * information_transfer() should return after | 2148 | * information_transfer() should return after |
2097 | * starting DMA since it has nothing more to | 2149 | * starting DMA since it has nothing more to |
2098 | * do. | 2150 | * do. |
2099 | */ | 2151 | */ |
2100 | return; | 2152 | return; |
2101 | #else | 2153 | #else |
2102 | cmd->SCp.this_residual -= transfersize - len; | 2154 | cmd->SCp.this_residual -= transfersize - len; |
2103 | #endif | 2155 | #endif |
2104 | } | 2156 | } |
2105 | } else | 2157 | } else |
2106 | #endif /* defined(REAL_DMA) */ | 2158 | #endif /* defined(REAL_DMA) */ |
2107 | NCR5380_transfer_pio(instance, &phase, | 2159 | NCR5380_transfer_pio(instance, &phase, |
2108 | (int *) &cmd->SCp.this_residual, (unsigned char **) | 2160 | (int *)&cmd->SCp.this_residual, |
2109 | &cmd->SCp.ptr); | 2161 | (unsigned char **)&cmd->SCp.ptr); |
2110 | break; | 2162 | break; |
2111 | case PHASE_MSGIN: | 2163 | case PHASE_MSGIN: |
2112 | len = 1; | 2164 | len = 1; |
2113 | data = &tmp; | 2165 | data = &tmp; |
2114 | NCR5380_write(SELECT_ENABLE_REG, 0); /* disable reselects */ | 2166 | NCR5380_write(SELECT_ENABLE_REG, 0); /* disable reselects */ |
2115 | NCR5380_transfer_pio(instance, &phase, &len, &data); | 2167 | NCR5380_transfer_pio(instance, &phase, &len, &data); |
2116 | cmd->SCp.Message = tmp; | 2168 | cmd->SCp.Message = tmp; |
2117 | 2169 | ||
2118 | switch (tmp) { | 2170 | switch (tmp) { |
2119 | /* | 2171 | /* |
2120 | * Linking lets us reduce the time required to get the | 2172 | * Linking lets us reduce the time required to get the |
2121 | * next command out to the device, hopefully this will | 2173 | * next command out to the device, hopefully this will |
2122 | * mean we don't waste another revolution due to the delays | 2174 | * mean we don't waste another revolution due to the delays |
2123 | * required by ARBITRATION and another SELECTION. | 2175 | * required by ARBITRATION and another SELECTION. |
2124 | * | 2176 | * |
2125 | * In the current implementation proposal, low level drivers | 2177 | * In the current implementation proposal, low level drivers |
2126 | * merely have to start the next command, pointed to by | 2178 | * merely have to start the next command, pointed to by |
2127 | * next_link, done() is called as with unlinked commands. | 2179 | * next_link, done() is called as with unlinked commands. |
2128 | */ | 2180 | */ |
2129 | #ifdef LINKED | 2181 | #ifdef LINKED |
2130 | case LINKED_CMD_COMPLETE: | 2182 | case LINKED_CMD_COMPLETE: |
2131 | case LINKED_FLG_CMD_COMPLETE: | 2183 | case LINKED_FLG_CMD_COMPLETE: |
2132 | /* Accept message by clearing ACK */ | 2184 | /* Accept message by clearing ACK */ |
2133 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2185 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
2134 | 2186 | ||
2135 | LNK_PRINTK("scsi%d: target %d lun %d linked command " | 2187 | LNK_PRINTK("scsi%d: target %d lun %d linked command " |
2136 | "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); | 2188 | "complete.\n", HOSTNO, cmd->device->id, cmd->device->lun); |
2137 | 2189 | ||
2138 | /* Enable reselect interrupts */ | 2190 | /* Enable reselect interrupts */ |
2139 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 2191 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
2140 | /* | 2192 | /* |
2141 | * Sanity check : A linked command should only terminate | 2193 | * Sanity check : A linked command should only terminate |
2142 | * with one of these messages if there are more linked | 2194 | * with one of these messages if there are more linked |
2143 | * commands available. | 2195 | * commands available. |
2144 | */ | 2196 | */ |
2145 | 2197 | ||
2146 | if (!cmd->next_link) { | 2198 | if (!cmd->next_link) { |
2147 | printk(KERN_NOTICE "scsi%d: target %d lun %d " | 2199 | printk(KERN_NOTICE "scsi%d: target %d lun %d " |
2148 | "linked command complete, no next_link\n", | 2200 | "linked command complete, no next_link\n", |
2149 | HOSTNO, cmd->device->id, cmd->device->lun); | 2201 | HOSTNO, cmd->device->id, cmd->device->lun); |
2150 | sink = 1; | 2202 | sink = 1; |
2151 | do_abort (instance); | 2203 | do_abort(instance); |
2152 | return; | 2204 | return; |
2153 | } | 2205 | } |
2154 | 2206 | ||
2155 | initialize_SCp(cmd->next_link); | 2207 | initialize_SCp(cmd->next_link); |
2156 | /* The next command is still part of this process; copy it | 2208 | /* The next command is still part of this process; copy it |
2157 | * and don't free it! */ | 2209 | * and don't free it! */ |
2158 | cmd->next_link->tag = cmd->tag; | 2210 | cmd->next_link->tag = cmd->tag; |
2159 | cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); | 2211 | cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); |
2160 | LNK_PRINTK("scsi%d: target %d lun %d linked request " | 2212 | LNK_PRINTK("scsi%d: target %d lun %d linked request " |
2161 | "done, calling scsi_done().\n", | 2213 | "done, calling scsi_done().\n", |
2162 | HOSTNO, cmd->device->id, cmd->device->lun); | 2214 | HOSTNO, cmd->device->id, cmd->device->lun); |
2163 | #ifdef NCR5380_STATS | 2215 | #ifdef NCR5380_STATS |
2164 | collect_stats(hostdata, cmd); | 2216 | collect_stats(hostdata, cmd); |
2165 | #endif | 2217 | #endif |
2166 | cmd->scsi_done(cmd); | 2218 | cmd->scsi_done(cmd); |
2167 | cmd = hostdata->connected; | 2219 | cmd = hostdata->connected; |
2168 | break; | 2220 | break; |
2169 | #endif /* def LINKED */ | 2221 | #endif /* def LINKED */ |
2170 | case ABORT: | 2222 | case ABORT: |
2171 | case COMMAND_COMPLETE: | 2223 | case COMMAND_COMPLETE: |
2172 | /* Accept message by clearing ACK */ | 2224 | /* Accept message by clearing ACK */ |
2173 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2225 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
2174 | /* ++guenther: possible race with Falcon locking */ | 2226 | /* ++guenther: possible race with Falcon locking */ |
2175 | falcon_dont_release++; | 2227 | falcon_dont_release++; |
2176 | hostdata->connected = NULL; | 2228 | hostdata->connected = NULL; |
2177 | QU_PRINTK("scsi%d: command for target %d, lun %d " | 2229 | QU_PRINTK("scsi%d: command for target %d, lun %d " |
2178 | "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); | 2230 | "completed\n", HOSTNO, cmd->device->id, cmd->device->lun); |
2179 | #ifdef SUPPORT_TAGS | 2231 | #ifdef SUPPORT_TAGS |
2180 | cmd_free_tag( cmd ); | 2232 | cmd_free_tag(cmd); |
2181 | if (status_byte(cmd->SCp.Status) == QUEUE_FULL) { | 2233 | if (status_byte(cmd->SCp.Status) == QUEUE_FULL) { |
2182 | /* Turn a QUEUE FULL status into BUSY, I think the | 2234 | /* Turn a QUEUE FULL status into BUSY, I think the |
2183 | * mid level cannot handle QUEUE FULL :-( (The | 2235 | * mid level cannot handle QUEUE FULL :-( (The |
2184 | * command is retried after BUSY). Also update our | 2236 | * command is retried after BUSY). Also update our |
2185 | * queue size to the number of currently issued | 2237 | * queue size to the number of currently issued |
2186 | * commands now. | 2238 | * commands now. |
2187 | */ | 2239 | */ |
2188 | /* ++Andreas: the mid level code knows about | 2240 | /* ++Andreas: the mid level code knows about |
2189 | QUEUE_FULL now. */ | 2241 | QUEUE_FULL now. */ |
2190 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; | 2242 | TAG_ALLOC *ta = &TagAlloc[cmd->device->id][cmd->device->lun]; |
2191 | TAG_PRINTK("scsi%d: target %d lun %d returned " | 2243 | TAG_PRINTK("scsi%d: target %d lun %d returned " |
2192 | "QUEUE_FULL after %d commands\n", | 2244 | "QUEUE_FULL after %d commands\n", |
2193 | HOSTNO, cmd->device->id, cmd->device->lun, | 2245 | HOSTNO, cmd->device->id, cmd->device->lun, |
2194 | ta->nr_allocated); | 2246 | ta->nr_allocated); |
2195 | if (ta->queue_size > ta->nr_allocated) | 2247 | if (ta->queue_size > ta->nr_allocated) |
2196 | ta->nr_allocated = ta->queue_size; | 2248 | ta->nr_allocated = ta->queue_size; |
2197 | } | 2249 | } |
2198 | #else | 2250 | #else |
2199 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); | 2251 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); |
2200 | #endif | 2252 | #endif |
2201 | /* Enable reselect interrupts */ | 2253 | /* Enable reselect interrupts */ |
2202 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 2254 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
2203 | 2255 | ||
2204 | /* | 2256 | /* |
2205 | * I'm not sure what the correct thing to do here is : | 2257 | * I'm not sure what the correct thing to do here is : |
2206 | * | 2258 | * |
2207 | * If the command that just executed is NOT a request | 2259 | * If the command that just executed is NOT a request |
2208 | * sense, the obvious thing to do is to set the result | 2260 | * sense, the obvious thing to do is to set the result |
2209 | * code to the values of the stored parameters. | 2261 | * code to the values of the stored parameters. |
2210 | * | 2262 | * |
2211 | * If it was a REQUEST SENSE command, we need some way to | 2263 | * If it was a REQUEST SENSE command, we need some way to |
2212 | * differentiate between the failure code of the original | 2264 | * differentiate between the failure code of the original |
2213 | * and the failure code of the REQUEST sense - the obvious | 2265 | * and the failure code of the REQUEST sense - the obvious |
2214 | * case is success, where we fall through and leave the | 2266 | * case is success, where we fall through and leave the |
2215 | * result code unchanged. | 2267 | * result code unchanged. |
2216 | * | 2268 | * |
2217 | * The non-obvious place is where the REQUEST SENSE failed | 2269 | * The non-obvious place is where the REQUEST SENSE failed |
2218 | */ | 2270 | */ |
2219 | 2271 | ||
2220 | if (cmd->cmnd[0] != REQUEST_SENSE) | 2272 | if (cmd->cmnd[0] != REQUEST_SENSE) |
2221 | cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); | 2273 | cmd->result = cmd->SCp.Status | (cmd->SCp.Message << 8); |
2222 | else if (status_byte(cmd->SCp.Status) != GOOD) | 2274 | else if (status_byte(cmd->SCp.Status) != GOOD) |
2223 | cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); | 2275 | cmd->result = (cmd->result & 0x00ffff) | (DID_ERROR << 16); |
2224 | |||
2225 | #ifdef AUTOSENSE | ||
2226 | if ((cmd->cmnd[0] != REQUEST_SENSE) && | ||
2227 | (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { | ||
2228 | ASEN_PRINTK("scsi%d: performing request sense\n", | ||
2229 | HOSTNO); | ||
2230 | cmd->cmnd[0] = REQUEST_SENSE; | ||
2231 | cmd->cmnd[1] &= 0xe0; | ||
2232 | cmd->cmnd[2] = 0; | ||
2233 | cmd->cmnd[3] = 0; | ||
2234 | cmd->cmnd[4] = sizeof(cmd->sense_buffer); | ||
2235 | cmd->cmnd[5] = 0; | ||
2236 | cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); | ||
2237 | |||
2238 | cmd->use_sg = 0; | ||
2239 | /* this is initialized from initialize_SCp | ||
2240 | cmd->SCp.buffer = NULL; | ||
2241 | cmd->SCp.buffers_residual = 0; | ||
2242 | */ | ||
2243 | cmd->request_buffer = (char *) cmd->sense_buffer; | ||
2244 | cmd->request_bufflen = sizeof(cmd->sense_buffer); | ||
2245 | 2276 | ||
2246 | local_irq_save(flags); | 2277 | #ifdef AUTOSENSE |
2247 | LIST(cmd,hostdata->issue_queue); | 2278 | if ((cmd->cmnd[0] != REQUEST_SENSE) && |
2248 | NEXT(cmd) = hostdata->issue_queue; | 2279 | (status_byte(cmd->SCp.Status) == CHECK_CONDITION)) { |
2249 | hostdata->issue_queue = (Scsi_Cmnd *) cmd; | 2280 | ASEN_PRINTK("scsi%d: performing request sense\n", HOSTNO); |
2250 | local_irq_restore(flags); | 2281 | cmd->cmnd[0] = REQUEST_SENSE; |
2251 | QU_PRINTK("scsi%d: REQUEST SENSE added to head of " | 2282 | cmd->cmnd[1] &= 0xe0; |
2252 | "issue queue\n", H_NO(cmd)); | 2283 | cmd->cmnd[2] = 0; |
2253 | } else | 2284 | cmd->cmnd[3] = 0; |
2285 | cmd->cmnd[4] = sizeof(cmd->sense_buffer); | ||
2286 | cmd->cmnd[5] = 0; | ||
2287 | cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); | ||
2288 | |||
2289 | cmd->use_sg = 0; | ||
2290 | /* this is initialized from initialize_SCp | ||
2291 | cmd->SCp.buffer = NULL; | ||
2292 | cmd->SCp.buffers_residual = 0; | ||
2293 | */ | ||
2294 | cmd->request_buffer = (char *) cmd->sense_buffer; | ||
2295 | cmd->request_bufflen = sizeof(cmd->sense_buffer); | ||
2296 | |||
2297 | local_irq_save(flags); | ||
2298 | LIST(cmd,hostdata->issue_queue); | ||
2299 | SET_NEXT(cmd, hostdata->issue_queue); | ||
2300 | hostdata->issue_queue = (Scsi_Cmnd *) cmd; | ||
2301 | local_irq_restore(flags); | ||
2302 | QU_PRINTK("scsi%d: REQUEST SENSE added to head of " | ||
2303 | "issue queue\n", H_NO(cmd)); | ||
2304 | } else | ||
2254 | #endif /* def AUTOSENSE */ | 2305 | #endif /* def AUTOSENSE */ |
2255 | { | 2306 | { |
2256 | #ifdef NCR5380_STATS | 2307 | #ifdef NCR5380_STATS |
2257 | collect_stats(hostdata, cmd); | 2308 | collect_stats(hostdata, cmd); |
2258 | #endif | 2309 | #endif |
2259 | cmd->scsi_done(cmd); | 2310 | cmd->scsi_done(cmd); |
2260 | } | 2311 | } |
2261 | 2312 | ||
2262 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 2313 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
2263 | /* | 2314 | /* |
2264 | * Restore phase bits to 0 so an interrupted selection, | 2315 | * Restore phase bits to 0 so an interrupted selection, |
2265 | * arbitration can resume. | 2316 | * arbitration can resume. |
2266 | */ | 2317 | */ |
2267 | NCR5380_write(TARGET_COMMAND_REG, 0); | 2318 | NCR5380_write(TARGET_COMMAND_REG, 0); |
2268 | 2319 | ||
2269 | while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) | 2320 | while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) |
2270 | barrier(); | 2321 | barrier(); |
2271 | 2322 | ||
2272 | falcon_dont_release--; | 2323 | falcon_dont_release--; |
2273 | /* ++roman: For Falcon SCSI, release the lock on the | 2324 | /* ++roman: For Falcon SCSI, release the lock on the |
2274 | * ST-DMA here if no other commands are waiting on the | 2325 | * ST-DMA here if no other commands are waiting on the |
2275 | * disconnected queue. | 2326 | * disconnected queue. |
2276 | */ | 2327 | */ |
2277 | falcon_release_lock_if_possible( hostdata ); | 2328 | falcon_release_lock_if_possible(hostdata); |
2278 | return; | 2329 | return; |
2279 | case MESSAGE_REJECT: | 2330 | case MESSAGE_REJECT: |
2280 | /* Accept message by clearing ACK */ | 2331 | /* Accept message by clearing ACK */ |
2281 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2332 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
2282 | /* Enable reselect interrupts */ | 2333 | /* Enable reselect interrupts */ |
2283 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 2334 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
2284 | switch (hostdata->last_message) { | 2335 | switch (hostdata->last_message) { |
2285 | case HEAD_OF_QUEUE_TAG: | 2336 | case HEAD_OF_QUEUE_TAG: |
2286 | case ORDERED_QUEUE_TAG: | 2337 | case ORDERED_QUEUE_TAG: |
2287 | case SIMPLE_QUEUE_TAG: | 2338 | case SIMPLE_QUEUE_TAG: |
2288 | /* The target obviously doesn't support tagged | 2339 | /* The target obviously doesn't support tagged |
2289 | * queuing, even though it announced this ability in | 2340 | * queuing, even though it announced this ability in |
2290 | * its INQUIRY data ?!? (maybe only this LUN?) Ok, | 2341 | * its INQUIRY data ?!? (maybe only this LUN?) Ok, |
2291 | * clear 'tagged_supported' and lock the LUN, since | 2342 | * clear 'tagged_supported' and lock the LUN, since |
2292 | * the command is treated as untagged further on. | 2343 | * the command is treated as untagged further on. |
2293 | */ | 2344 | */ |
2294 | cmd->device->tagged_supported = 0; | 2345 | cmd->device->tagged_supported = 0; |
2295 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); | 2346 | hostdata->busy[cmd->device->id] |= (1 << cmd->device->lun); |
2296 | cmd->tag = TAG_NONE; | 2347 | cmd->tag = TAG_NONE; |
2297 | TAG_PRINTK("scsi%d: target %d lun %d rejected " | 2348 | TAG_PRINTK("scsi%d: target %d lun %d rejected " |
2298 | "QUEUE_TAG message; tagged queuing " | 2349 | "QUEUE_TAG message; tagged queuing " |
2299 | "disabled\n", | 2350 | "disabled\n", |
2300 | HOSTNO, cmd->device->id, cmd->device->lun); | 2351 | HOSTNO, cmd->device->id, cmd->device->lun); |
2301 | break; | 2352 | break; |
2302 | } | 2353 | } |
2303 | break; | 2354 | break; |
2304 | case DISCONNECT: | 2355 | case DISCONNECT: |
2305 | /* Accept message by clearing ACK */ | 2356 | /* Accept message by clearing ACK */ |
2306 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2357 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
2307 | local_irq_save(flags); | 2358 | local_irq_save(flags); |
2308 | cmd->device->disconnect = 1; | 2359 | cmd->device->disconnect = 1; |
2309 | LIST(cmd,hostdata->disconnected_queue); | 2360 | LIST(cmd,hostdata->disconnected_queue); |
2310 | NEXT(cmd) = hostdata->disconnected_queue; | 2361 | SET_NEXT(cmd, hostdata->disconnected_queue); |
2311 | hostdata->connected = NULL; | 2362 | hostdata->connected = NULL; |
2312 | hostdata->disconnected_queue = cmd; | 2363 | hostdata->disconnected_queue = cmd; |
2313 | local_irq_restore(flags); | 2364 | local_irq_restore(flags); |
2314 | QU_PRINTK("scsi%d: command for target %d lun %d was " | 2365 | QU_PRINTK("scsi%d: command for target %d lun %d was " |
2315 | "moved from connected to the " | 2366 | "moved from connected to the " |
2316 | "disconnected_queue\n", HOSTNO, | 2367 | "disconnected_queue\n", HOSTNO, |
2317 | cmd->device->id, cmd->device->lun); | 2368 | cmd->device->id, cmd->device->lun); |
2318 | /* | 2369 | /* |
2319 | * Restore phase bits to 0 so an interrupted selection, | 2370 | * Restore phase bits to 0 so an interrupted selection, |
2320 | * arbitration can resume. | 2371 | * arbitration can resume. |
2321 | */ | 2372 | */ |
2322 | NCR5380_write(TARGET_COMMAND_REG, 0); | 2373 | NCR5380_write(TARGET_COMMAND_REG, 0); |
2323 | 2374 | ||
2324 | /* Enable reselect interrupts */ | 2375 | /* Enable reselect interrupts */ |
2325 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 2376 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
2326 | /* Wait for bus free to avoid nasty timeouts */ | 2377 | /* Wait for bus free to avoid nasty timeouts */ |
2327 | while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) | 2378 | while ((NCR5380_read(STATUS_REG) & SR_BSY) && !hostdata->connected) |
2328 | barrier(); | 2379 | barrier(); |
2329 | return; | 2380 | return; |
2330 | /* | 2381 | /* |
2331 | * The SCSI data pointer is *IMPLICITLY* saved on a disconnect | 2382 | * The SCSI data pointer is *IMPLICITLY* saved on a disconnect |
2332 | * operation, in violation of the SCSI spec so we can safely | 2383 | * operation, in violation of the SCSI spec so we can safely |
2333 | * ignore SAVE/RESTORE pointers calls. | 2384 | * ignore SAVE/RESTORE pointers calls. |
2334 | * | 2385 | * |
2335 | * Unfortunately, some disks violate the SCSI spec and | 2386 | * Unfortunately, some disks violate the SCSI spec and |
2336 | * don't issue the required SAVE_POINTERS message before | 2387 | * don't issue the required SAVE_POINTERS message before |
2337 | * disconnecting, and we have to break spec to remain | 2388 | * disconnecting, and we have to break spec to remain |
2338 | * compatible. | 2389 | * compatible. |
2339 | */ | 2390 | */ |
2340 | case SAVE_POINTERS: | 2391 | case SAVE_POINTERS: |
2341 | case RESTORE_POINTERS: | 2392 | case RESTORE_POINTERS: |
2342 | /* Accept message by clearing ACK */ | 2393 | /* Accept message by clearing ACK */ |
2343 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2394 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
2344 | /* Enable reselect interrupts */ | 2395 | /* Enable reselect interrupts */ |
2345 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 2396 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
2346 | break; | 2397 | break; |
2347 | case EXTENDED_MESSAGE: | 2398 | case EXTENDED_MESSAGE: |
2348 | /* | 2399 | /* |
2349 | * Extended messages are sent in the following format : | 2400 | * Extended messages are sent in the following format : |
2350 | * Byte | 2401 | * Byte |
2351 | * 0 EXTENDED_MESSAGE == 1 | 2402 | * 0 EXTENDED_MESSAGE == 1 |
2352 | * 1 length (includes one byte for code, doesn't | 2403 | * 1 length (includes one byte for code, doesn't |
2353 | * include first two bytes) | 2404 | * include first two bytes) |
2354 | * 2 code | 2405 | * 2 code |
2355 | * 3..length+1 arguments | 2406 | * 3..length+1 arguments |
2356 | * | 2407 | * |
2357 | * Start the extended message buffer with the EXTENDED_MESSAGE | 2408 | * Start the extended message buffer with the EXTENDED_MESSAGE |
2358 | * byte, since spi_print_msg() wants the whole thing. | 2409 | * byte, since spi_print_msg() wants the whole thing. |
2359 | */ | 2410 | */ |
2360 | extended_msg[0] = EXTENDED_MESSAGE; | 2411 | extended_msg[0] = EXTENDED_MESSAGE; |
2361 | /* Accept first byte by clearing ACK */ | 2412 | /* Accept first byte by clearing ACK */ |
2362 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2413 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
2363 | 2414 | ||
2364 | EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO); | 2415 | EXT_PRINTK("scsi%d: receiving extended message\n", HOSTNO); |
2365 | 2416 | ||
2366 | len = 2; | 2417 | len = 2; |
2367 | data = extended_msg + 1; | 2418 | data = extended_msg + 1; |
2368 | phase = PHASE_MSGIN; | 2419 | phase = PHASE_MSGIN; |
2369 | NCR5380_transfer_pio(instance, &phase, &len, &data); | 2420 | NCR5380_transfer_pio(instance, &phase, &len, &data); |
2370 | EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO, | 2421 | EXT_PRINTK("scsi%d: length=%d, code=0x%02x\n", HOSTNO, |
2371 | (int)extended_msg[1], (int)extended_msg[2]); | 2422 | (int)extended_msg[1], (int)extended_msg[2]); |
2372 | 2423 | ||
2373 | if (!len && extended_msg[1] <= | 2424 | if (!len && extended_msg[1] <= |
2374 | (sizeof (extended_msg) - 1)) { | 2425 | (sizeof(extended_msg) - 1)) { |
2375 | /* Accept third byte by clearing ACK */ | 2426 | /* Accept third byte by clearing ACK */ |
2376 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2427 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
2377 | len = extended_msg[1] - 1; | 2428 | len = extended_msg[1] - 1; |
2378 | data = extended_msg + 3; | 2429 | data = extended_msg + 3; |
2379 | phase = PHASE_MSGIN; | 2430 | phase = PHASE_MSGIN; |
2380 | 2431 | ||
2381 | NCR5380_transfer_pio(instance, &phase, &len, &data); | 2432 | NCR5380_transfer_pio(instance, &phase, &len, &data); |
2382 | EXT_PRINTK("scsi%d: message received, residual %d\n", | 2433 | EXT_PRINTK("scsi%d: message received, residual %d\n", |
2383 | HOSTNO, len); | 2434 | HOSTNO, len); |
2384 | 2435 | ||
2385 | switch (extended_msg[2]) { | 2436 | switch (extended_msg[2]) { |
2386 | case EXTENDED_SDTR: | 2437 | case EXTENDED_SDTR: |
2387 | case EXTENDED_WDTR: | 2438 | case EXTENDED_WDTR: |
2388 | case EXTENDED_MODIFY_DATA_POINTER: | 2439 | case EXTENDED_MODIFY_DATA_POINTER: |
2389 | case EXTENDED_EXTENDED_IDENTIFY: | 2440 | case EXTENDED_EXTENDED_IDENTIFY: |
2390 | tmp = 0; | 2441 | tmp = 0; |
2391 | } | 2442 | } |
2392 | } else if (len) { | 2443 | } else if (len) { |
2393 | printk(KERN_NOTICE "scsi%d: error receiving " | 2444 | printk(KERN_NOTICE "scsi%d: error receiving " |
2394 | "extended message\n", HOSTNO); | 2445 | "extended message\n", HOSTNO); |
2395 | tmp = 0; | 2446 | tmp = 0; |
2396 | } else { | 2447 | } else { |
2397 | printk(KERN_NOTICE "scsi%d: extended message " | 2448 | printk(KERN_NOTICE "scsi%d: extended message " |
2398 | "code %02x length %d is too long\n", | 2449 | "code %02x length %d is too long\n", |
2399 | HOSTNO, extended_msg[2], extended_msg[1]); | 2450 | HOSTNO, extended_msg[2], extended_msg[1]); |
2400 | tmp = 0; | 2451 | tmp = 0; |
2401 | } | 2452 | } |
2402 | /* Fall through to reject message */ | 2453 | /* Fall through to reject message */ |
2403 | 2454 | ||
2404 | /* | 2455 | /* |
2405 | * If we get something weird that we aren't expecting, | 2456 | * If we get something weird that we aren't expecting, |
2406 | * reject it. | 2457 | * reject it. |
2407 | */ | 2458 | */ |
2408 | default: | 2459 | default: |
2409 | if (!tmp) { | 2460 | if (!tmp) { |
2410 | printk(KERN_DEBUG "scsi%d: rejecting message ", HOSTNO); | 2461 | printk(KERN_DEBUG "scsi%d: rejecting message ", HOSTNO); |
2411 | spi_print_msg(extended_msg); | 2462 | spi_print_msg(extended_msg); |
2412 | printk("\n"); | 2463 | printk("\n"); |
2413 | } else if (tmp != EXTENDED_MESSAGE) | 2464 | } else if (tmp != EXTENDED_MESSAGE) |
2414 | printk(KERN_DEBUG "scsi%d: rejecting unknown " | 2465 | printk(KERN_DEBUG "scsi%d: rejecting unknown " |
2415 | "message %02x from target %d, lun %d\n", | 2466 | "message %02x from target %d, lun %d\n", |
2416 | HOSTNO, tmp, cmd->device->id, cmd->device->lun); | 2467 | HOSTNO, tmp, cmd->device->id, cmd->device->lun); |
2417 | else | 2468 | else |
2418 | printk(KERN_DEBUG "scsi%d: rejecting unknown " | 2469 | printk(KERN_DEBUG "scsi%d: rejecting unknown " |
2419 | "extended message " | 2470 | "extended message " |
2420 | "code %02x, length %d from target %d, lun %d\n", | 2471 | "code %02x, length %d from target %d, lun %d\n", |
2421 | HOSTNO, extended_msg[1], extended_msg[0], | 2472 | HOSTNO, extended_msg[1], extended_msg[0], |
2422 | cmd->device->id, cmd->device->lun); | 2473 | cmd->device->id, cmd->device->lun); |
2423 | 2474 | ||
2424 | 2475 | ||
2425 | msgout = MESSAGE_REJECT; | 2476 | msgout = MESSAGE_REJECT; |
2426 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | | 2477 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_ATN); |
2427 | ICR_ASSERT_ATN); | 2478 | break; |
2428 | break; | 2479 | } /* switch (tmp) */ |
2429 | } /* switch (tmp) */ | 2480 | break; |
2430 | break; | 2481 | case PHASE_MSGOUT: |
2431 | case PHASE_MSGOUT: | 2482 | len = 1; |
2432 | len = 1; | 2483 | data = &msgout; |
2433 | data = &msgout; | 2484 | hostdata->last_message = msgout; |
2434 | hostdata->last_message = msgout; | 2485 | NCR5380_transfer_pio(instance, &phase, &len, &data); |
2435 | NCR5380_transfer_pio(instance, &phase, &len, &data); | 2486 | if (msgout == ABORT) { |
2436 | if (msgout == ABORT) { | ||
2437 | #ifdef SUPPORT_TAGS | 2487 | #ifdef SUPPORT_TAGS |
2438 | cmd_free_tag( cmd ); | 2488 | cmd_free_tag(cmd); |
2439 | #else | 2489 | #else |
2440 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); | 2490 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); |
2441 | #endif | 2491 | #endif |
2442 | hostdata->connected = NULL; | 2492 | hostdata->connected = NULL; |
2443 | cmd->result = DID_ERROR << 16; | 2493 | cmd->result = DID_ERROR << 16; |
2444 | #ifdef NCR5380_STATS | 2494 | #ifdef NCR5380_STATS |
2445 | collect_stats(hostdata, cmd); | 2495 | collect_stats(hostdata, cmd); |
2446 | #endif | 2496 | #endif |
2447 | cmd->scsi_done(cmd); | 2497 | cmd->scsi_done(cmd); |
2448 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); | 2498 | NCR5380_write(SELECT_ENABLE_REG, hostdata->id_mask); |
2449 | falcon_release_lock_if_possible( hostdata ); | 2499 | falcon_release_lock_if_possible(hostdata); |
2450 | return; | 2500 | return; |
2451 | } | 2501 | } |
2452 | msgout = NOP; | 2502 | msgout = NOP; |
2453 | break; | 2503 | break; |
2454 | case PHASE_CMDOUT: | 2504 | case PHASE_CMDOUT: |
2455 | len = cmd->cmd_len; | 2505 | len = cmd->cmd_len; |
2456 | data = cmd->cmnd; | 2506 | data = cmd->cmnd; |
2457 | /* | 2507 | /* |
2458 | * XXX for performance reasons, on machines with a | 2508 | * XXX for performance reasons, on machines with a |
2459 | * PSEUDO-DMA architecture we should probably | 2509 | * PSEUDO-DMA architecture we should probably |
2460 | * use the dma transfer function. | 2510 | * use the dma transfer function. |
2461 | */ | 2511 | */ |
2462 | NCR5380_transfer_pio(instance, &phase, &len, | 2512 | NCR5380_transfer_pio(instance, &phase, &len, &data); |
2463 | &data); | 2513 | break; |
2464 | break; | 2514 | case PHASE_STATIN: |
2465 | case PHASE_STATIN: | 2515 | len = 1; |
2466 | len = 1; | 2516 | data = &tmp; |
2467 | data = &tmp; | 2517 | NCR5380_transfer_pio(instance, &phase, &len, &data); |
2468 | NCR5380_transfer_pio(instance, &phase, &len, &data); | 2518 | cmd->SCp.Status = tmp; |
2469 | cmd->SCp.Status = tmp; | 2519 | break; |
2470 | break; | 2520 | default: |
2471 | default: | 2521 | printk("scsi%d: unknown phase\n", HOSTNO); |
2472 | printk("scsi%d: unknown phase\n", HOSTNO); | 2522 | NCR_PRINT(NDEBUG_ANY); |
2473 | NCR_PRINT(NDEBUG_ANY); | 2523 | } /* switch(phase) */ |
2474 | } /* switch(phase) */ | 2524 | } /* if (tmp * SR_REQ) */ |
2475 | } /* if (tmp * SR_REQ) */ | 2525 | } /* while (1) */ |
2476 | } /* while (1) */ | ||
2477 | } | 2526 | } |
2478 | 2527 | ||
2479 | /* | 2528 | /* |
2480 | * Function : void NCR5380_reselect (struct Scsi_Host *instance) | 2529 | * Function : void NCR5380_reselect (struct Scsi_Host *instance) |
2481 | * | 2530 | * |
2482 | * Purpose : does reselection, initializing the instance->connected | 2531 | * Purpose : does reselection, initializing the instance->connected |
2483 | * field to point to the Scsi_Cmnd for which the I_T_L or I_T_L_Q | 2532 | * field to point to the Scsi_Cmnd for which the I_T_L or I_T_L_Q |
2484 | * nexus has been reestablished, | 2533 | * nexus has been reestablished, |
2485 | * | 2534 | * |
2486 | * Inputs : instance - this instance of the NCR5380. | 2535 | * Inputs : instance - this instance of the NCR5380. |
2487 | * | 2536 | * |
2488 | */ | 2537 | */ |
2489 | 2538 | ||
2490 | 2539 | ||
2491 | static void NCR5380_reselect (struct Scsi_Host *instance) | 2540 | static void NCR5380_reselect(struct Scsi_Host *instance) |
2492 | { | 2541 | { |
2493 | SETUP_HOSTDATA(instance); | 2542 | SETUP_HOSTDATA(instance); |
2494 | unsigned char target_mask; | 2543 | unsigned char target_mask; |
2495 | unsigned char lun, phase; | 2544 | unsigned char lun, phase; |
2496 | int len; | 2545 | int len; |
2497 | #ifdef SUPPORT_TAGS | 2546 | #ifdef SUPPORT_TAGS |
2498 | unsigned char tag; | 2547 | unsigned char tag; |
2499 | #endif | 2548 | #endif |
2500 | unsigned char msg[3]; | 2549 | unsigned char msg[3]; |
2501 | unsigned char *data; | 2550 | unsigned char *data; |
2502 | Scsi_Cmnd *tmp = NULL, *prev; | 2551 | Scsi_Cmnd *tmp = NULL, *prev; |
2503 | /* unsigned long flags; */ | 2552 | /* unsigned long flags; */ |
2504 | 2553 | ||
2505 | /* | 2554 | /* |
2506 | * Disable arbitration, etc. since the host adapter obviously | 2555 | * Disable arbitration, etc. since the host adapter obviously |
2507 | * lost, and tell an interrupted NCR5380_select() to restart. | 2556 | * lost, and tell an interrupted NCR5380_select() to restart. |
2508 | */ | 2557 | */ |
2509 | 2558 | ||
2510 | NCR5380_write(MODE_REG, MR_BASE); | 2559 | NCR5380_write(MODE_REG, MR_BASE); |
2511 | hostdata->restart_select = 1; | 2560 | hostdata->restart_select = 1; |
2512 | 2561 | ||
2513 | target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); | 2562 | target_mask = NCR5380_read(CURRENT_SCSI_DATA_REG) & ~(hostdata->id_mask); |
2514 | 2563 | ||
2515 | RSL_PRINTK("scsi%d: reselect\n", HOSTNO); | 2564 | RSL_PRINTK("scsi%d: reselect\n", HOSTNO); |
2516 | 2565 | ||
2517 | /* | 2566 | /* |
2518 | * At this point, we have detected that our SCSI ID is on the bus, | 2567 | * At this point, we have detected that our SCSI ID is on the bus, |
2519 | * SEL is true and BSY was false for at least one bus settle delay | 2568 | * SEL is true and BSY was false for at least one bus settle delay |
2520 | * (400 ns). | 2569 | * (400 ns). |
2521 | * | 2570 | * |
2522 | * We must assert BSY ourselves, until the target drops the SEL | 2571 | * We must assert BSY ourselves, until the target drops the SEL |
2523 | * signal. | 2572 | * signal. |
2524 | */ | 2573 | */ |
2525 | 2574 | ||
2526 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); | 2575 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_BSY); |
2527 | 2576 | ||
2528 | while (NCR5380_read(STATUS_REG) & SR_SEL); | 2577 | while (NCR5380_read(STATUS_REG) & SR_SEL) |
2529 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2578 | ; |
2530 | 2579 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | |
2531 | /* | 2580 | |
2532 | * Wait for target to go into MSGIN. | 2581 | /* |
2533 | */ | 2582 | * Wait for target to go into MSGIN. |
2534 | 2583 | */ | |
2535 | while (!(NCR5380_read(STATUS_REG) & SR_REQ)); | 2584 | |
2536 | 2585 | while (!(NCR5380_read(STATUS_REG) & SR_REQ)) | |
2537 | len = 1; | 2586 | ; |
2538 | data = msg; | 2587 | |
2539 | phase = PHASE_MSGIN; | 2588 | len = 1; |
2540 | NCR5380_transfer_pio(instance, &phase, &len, &data); | 2589 | data = msg; |
2541 | 2590 | phase = PHASE_MSGIN; | |
2542 | if (!(msg[0] & 0x80)) { | 2591 | NCR5380_transfer_pio(instance, &phase, &len, &data); |
2543 | printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO); | 2592 | |
2544 | spi_print_msg(msg); | 2593 | if (!(msg[0] & 0x80)) { |
2545 | do_abort(instance); | 2594 | printk(KERN_DEBUG "scsi%d: expecting IDENTIFY message, got ", HOSTNO); |
2546 | return; | 2595 | spi_print_msg(msg); |
2547 | } | 2596 | do_abort(instance); |
2548 | lun = (msg[0] & 0x07); | 2597 | return; |
2598 | } | ||
2599 | lun = (msg[0] & 0x07); | ||
2549 | 2600 | ||
2550 | #ifdef SUPPORT_TAGS | 2601 | #ifdef SUPPORT_TAGS |
2551 | /* If the phase is still MSGIN, the target wants to send some more | 2602 | /* If the phase is still MSGIN, the target wants to send some more |
2552 | * messages. In case it supports tagged queuing, this is probably a | 2603 | * messages. In case it supports tagged queuing, this is probably a |
2553 | * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. | 2604 | * SIMPLE_QUEUE_TAG for the I_T_L_Q nexus. |
2554 | */ | 2605 | */ |
2555 | tag = TAG_NONE; | 2606 | tag = TAG_NONE; |
2556 | if (phase == PHASE_MSGIN && setup_use_tagged_queuing) { | 2607 | if (phase == PHASE_MSGIN && setup_use_tagged_queuing) { |
2557 | /* Accept previous IDENTIFY message by clearing ACK */ | 2608 | /* Accept previous IDENTIFY message by clearing ACK */ |
2558 | NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); | 2609 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
2559 | len = 2; | 2610 | len = 2; |
2560 | data = msg+1; | 2611 | data = msg + 1; |
2561 | if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && | 2612 | if (!NCR5380_transfer_pio(instance, &phase, &len, &data) && |
2562 | msg[1] == SIMPLE_QUEUE_TAG) | 2613 | msg[1] == SIMPLE_QUEUE_TAG) |
2563 | tag = msg[2]; | 2614 | tag = msg[2]; |
2564 | TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at " | 2615 | TAG_PRINTK("scsi%d: target mask %02x, lun %d sent tag %d at " |
2565 | "reselection\n", HOSTNO, target_mask, lun, tag); | 2616 | "reselection\n", HOSTNO, target_mask, lun, tag); |
2566 | } | 2617 | } |
2567 | #endif | 2618 | #endif |
2568 | 2619 | ||
2569 | /* | 2620 | /* |
2570 | * Find the command corresponding to the I_T_L or I_T_L_Q nexus we | 2621 | * Find the command corresponding to the I_T_L or I_T_L_Q nexus we |
2571 | * just reestablished, and remove it from the disconnected queue. | 2622 | * just reestablished, and remove it from the disconnected queue. |
2572 | */ | 2623 | */ |
2573 | 2624 | ||
2574 | for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue, prev = NULL; | 2625 | for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue, prev = NULL; |
2575 | tmp; prev = tmp, tmp = NEXT(tmp) ) { | 2626 | tmp; prev = tmp, tmp = NEXT(tmp)) { |
2576 | if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun) | 2627 | if ((target_mask == (1 << tmp->device->id)) && (lun == tmp->device->lun) |
2577 | #ifdef SUPPORT_TAGS | 2628 | #ifdef SUPPORT_TAGS |
2578 | && (tag == tmp->tag) | 2629 | && (tag == tmp->tag) |
2579 | #endif | 2630 | #endif |
2580 | ) { | 2631 | ) { |
2581 | /* ++guenther: prevent race with falcon_release_lock */ | 2632 | /* ++guenther: prevent race with falcon_release_lock */ |
2582 | falcon_dont_release++; | 2633 | falcon_dont_release++; |
2583 | if (prev) { | 2634 | if (prev) { |
2584 | REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); | 2635 | REMOVE(prev, NEXT(prev), tmp, NEXT(tmp)); |
2585 | NEXT(prev) = NEXT(tmp); | 2636 | SET_NEXT(prev, NEXT(tmp)); |
2586 | } else { | 2637 | } else { |
2587 | REMOVE(-1, hostdata->disconnected_queue, tmp, NEXT(tmp)); | 2638 | REMOVE(-1, hostdata->disconnected_queue, tmp, NEXT(tmp)); |
2588 | hostdata->disconnected_queue = NEXT(tmp); | 2639 | hostdata->disconnected_queue = NEXT(tmp); |
2589 | } | 2640 | } |
2590 | NEXT(tmp) = NULL; | 2641 | SET_NEXT(tmp, NULL); |
2591 | break; | 2642 | break; |
2643 | } | ||
2592 | } | 2644 | } |
2593 | } | 2645 | |
2594 | 2646 | if (!tmp) { | |
2595 | if (!tmp) { | 2647 | printk(KERN_WARNING "scsi%d: warning: target bitmask %02x lun %d " |
2596 | printk(KERN_WARNING "scsi%d: warning: target bitmask %02x lun %d " | ||
2597 | #ifdef SUPPORT_TAGS | 2648 | #ifdef SUPPORT_TAGS |
2598 | "tag %d " | 2649 | "tag %d " |
2599 | #endif | 2650 | #endif |
2600 | "not in disconnected_queue.\n", | 2651 | "not in disconnected_queue.\n", |
2601 | HOSTNO, target_mask, lun | 2652 | HOSTNO, target_mask, lun |
2602 | #ifdef SUPPORT_TAGS | 2653 | #ifdef SUPPORT_TAGS |
2603 | , tag | 2654 | , tag |
2604 | #endif | 2655 | #endif |
2605 | ); | 2656 | ); |
2606 | /* | 2657 | /* |
2607 | * Since we have an established nexus that we can't do anything | 2658 | * Since we have an established nexus that we can't do anything |
2608 | * with, we must abort it. | 2659 | * with, we must abort it. |
2609 | */ | 2660 | */ |
2610 | do_abort(instance); | 2661 | do_abort(instance); |
2611 | return; | 2662 | return; |
2612 | } | 2663 | } |
2613 | 2664 | ||
2614 | /* Accept message by clearing ACK */ | 2665 | /* Accept message by clearing ACK */ |
2615 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); | 2666 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
2616 | 2667 | ||
2617 | hostdata->connected = tmp; | 2668 | hostdata->connected = tmp; |
2618 | RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", | 2669 | RSL_PRINTK("scsi%d: nexus established, target = %d, lun = %d, tag = %d\n", |
2619 | HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); | 2670 | HOSTNO, tmp->device->id, tmp->device->lun, tmp->tag); |
2620 | falcon_dont_release--; | 2671 | falcon_dont_release--; |
2621 | } | 2672 | } |
2622 | 2673 | ||
2623 | 2674 | ||
@@ -2626,362 +2677,361 @@ static void NCR5380_reselect (struct Scsi_Host *instance) | |||
2626 | * | 2677 | * |
2627 | * Purpose : abort a command | 2678 | * Purpose : abort a command |
2628 | * | 2679 | * |
2629 | * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the | 2680 | * Inputs : cmd - the Scsi_Cmnd to abort, code - code to set the |
2630 | * host byte of the result field to, if zero DID_ABORTED is | 2681 | * host byte of the result field to, if zero DID_ABORTED is |
2631 | * used. | 2682 | * used. |
2632 | * | 2683 | * |
2633 | * Returns : 0 - success, -1 on failure. | 2684 | * Returns : 0 - success, -1 on failure. |
2634 | * | 2685 | * |
2635 | * XXX - there is no way to abort the command that is currently | 2686 | * XXX - there is no way to abort the command that is currently |
2636 | * connected, you have to wait for it to complete. If this is | 2687 | * connected, you have to wait for it to complete. If this is |
2637 | * a problem, we could implement longjmp() / setjmp(), setjmp() | 2688 | * a problem, we could implement longjmp() / setjmp(), setjmp() |
2638 | * called where the loop started in NCR5380_main(). | 2689 | * called where the loop started in NCR5380_main(). |
2639 | */ | 2690 | */ |
2640 | 2691 | ||
2641 | static | 2692 | static |
2642 | int NCR5380_abort (Scsi_Cmnd *cmd) | 2693 | int NCR5380_abort(Scsi_Cmnd *cmd) |
2643 | { | 2694 | { |
2644 | struct Scsi_Host *instance = cmd->device->host; | 2695 | struct Scsi_Host *instance = cmd->device->host; |
2645 | SETUP_HOSTDATA(instance); | 2696 | SETUP_HOSTDATA(instance); |
2646 | Scsi_Cmnd *tmp, **prev; | 2697 | Scsi_Cmnd *tmp, **prev; |
2647 | unsigned long flags; | 2698 | unsigned long flags; |
2699 | |||
2700 | printk(KERN_NOTICE "scsi%d: aborting command\n", HOSTNO); | ||
2701 | scsi_print_command(cmd); | ||
2648 | 2702 | ||
2649 | printk(KERN_NOTICE "scsi%d: aborting command\n", HOSTNO); | 2703 | NCR5380_print_status(instance); |
2650 | scsi_print_command(cmd); | ||
2651 | 2704 | ||
2652 | NCR5380_print_status (instance); | 2705 | local_irq_save(flags); |
2653 | 2706 | ||
2654 | local_irq_save(flags); | 2707 | if (!IS_A_TT() && !falcon_got_lock) |
2655 | 2708 | printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_abort\n", | |
2656 | if (!IS_A_TT() && !falcon_got_lock) | 2709 | HOSTNO); |
2657 | printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_abort\n", | ||
2658 | HOSTNO); | ||
2659 | 2710 | ||
2660 | ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, | 2711 | ABRT_PRINTK("scsi%d: abort called basr 0x%02x, sr 0x%02x\n", HOSTNO, |
2661 | NCR5380_read(BUS_AND_STATUS_REG), | 2712 | NCR5380_read(BUS_AND_STATUS_REG), |
2662 | NCR5380_read(STATUS_REG)); | 2713 | NCR5380_read(STATUS_REG)); |
2663 | 2714 | ||
2664 | #if 1 | 2715 | #if 1 |
2665 | /* | 2716 | /* |
2666 | * Case 1 : If the command is the currently executing command, | 2717 | * Case 1 : If the command is the currently executing command, |
2667 | * we'll set the aborted flag and return control so that | 2718 | * we'll set the aborted flag and return control so that |
2668 | * information transfer routine can exit cleanly. | 2719 | * information transfer routine can exit cleanly. |
2669 | */ | 2720 | */ |
2670 | 2721 | ||
2671 | if (hostdata->connected == cmd) { | 2722 | if (hostdata->connected == cmd) { |
2672 | 2723 | ||
2673 | ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO); | 2724 | ABRT_PRINTK("scsi%d: aborting connected command\n", HOSTNO); |
2674 | /* | 2725 | /* |
2675 | * We should perform BSY checking, and make sure we haven't slipped | 2726 | * We should perform BSY checking, and make sure we haven't slipped |
2676 | * into BUS FREE. | 2727 | * into BUS FREE. |
2677 | */ | 2728 | */ |
2678 | 2729 | ||
2679 | /* NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN); */ | 2730 | /* NCR5380_write(INITIATOR_COMMAND_REG, ICR_ASSERT_ATN); */ |
2680 | /* | 2731 | /* |
2681 | * Since we can't change phases until we've completed the current | 2732 | * Since we can't change phases until we've completed the current |
2682 | * handshake, we have to source or sink a byte of data if the current | 2733 | * handshake, we have to source or sink a byte of data if the current |
2683 | * phase is not MSGOUT. | 2734 | * phase is not MSGOUT. |
2684 | */ | 2735 | */ |
2685 | 2736 | ||
2686 | /* | 2737 | /* |
2687 | * Return control to the executing NCR drive so we can clear the | 2738 | * Return control to the executing NCR drive so we can clear the |
2688 | * aborted flag and get back into our main loop. | 2739 | * aborted flag and get back into our main loop. |
2689 | */ | 2740 | */ |
2690 | 2741 | ||
2691 | if (do_abort(instance) == 0) { | 2742 | if (do_abort(instance) == 0) { |
2692 | hostdata->aborted = 1; | 2743 | hostdata->aborted = 1; |
2693 | hostdata->connected = NULL; | 2744 | hostdata->connected = NULL; |
2694 | cmd->result = DID_ABORT << 16; | 2745 | cmd->result = DID_ABORT << 16; |
2695 | #ifdef SUPPORT_TAGS | 2746 | #ifdef SUPPORT_TAGS |
2696 | cmd_free_tag( cmd ); | 2747 | cmd_free_tag(cmd); |
2697 | #else | 2748 | #else |
2698 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); | 2749 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); |
2699 | #endif | 2750 | #endif |
2700 | local_irq_restore(flags); | 2751 | local_irq_restore(flags); |
2701 | cmd->scsi_done(cmd); | 2752 | cmd->scsi_done(cmd); |
2702 | falcon_release_lock_if_possible( hostdata ); | 2753 | falcon_release_lock_if_possible(hostdata); |
2703 | return SCSI_ABORT_SUCCESS; | 2754 | return SCSI_ABORT_SUCCESS; |
2704 | } else { | 2755 | } else { |
2705 | /* local_irq_restore(flags); */ | 2756 | /* local_irq_restore(flags); */ |
2706 | printk("scsi%d: abort of connected command failed!\n", HOSTNO); | 2757 | printk("scsi%d: abort of connected command failed!\n", HOSTNO); |
2707 | return SCSI_ABORT_ERROR; | 2758 | return SCSI_ABORT_ERROR; |
2708 | } | 2759 | } |
2709 | } | 2760 | } |
2710 | #endif | 2761 | #endif |
2711 | 2762 | ||
2712 | /* | 2763 | /* |
2713 | * Case 2 : If the command hasn't been issued yet, we simply remove it | 2764 | * Case 2 : If the command hasn't been issued yet, we simply remove it |
2714 | * from the issue queue. | 2765 | * from the issue queue. |
2715 | */ | 2766 | */ |
2716 | for (prev = (Scsi_Cmnd **) &(hostdata->issue_queue), | 2767 | for (prev = (Scsi_Cmnd **)&(hostdata->issue_queue), |
2717 | tmp = (Scsi_Cmnd *) hostdata->issue_queue; | 2768 | tmp = (Scsi_Cmnd *)hostdata->issue_queue; |
2718 | tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp) ) | 2769 | tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) { |
2719 | if (cmd == tmp) { | 2770 | if (cmd == tmp) { |
2720 | REMOVE(5, *prev, tmp, NEXT(tmp)); | 2771 | REMOVE(5, *prev, tmp, NEXT(tmp)); |
2721 | (*prev) = NEXT(tmp); | 2772 | (*prev) = NEXT(tmp); |
2722 | NEXT(tmp) = NULL; | 2773 | SET_NEXT(tmp, NULL); |
2723 | tmp->result = DID_ABORT << 16; | 2774 | tmp->result = DID_ABORT << 16; |
2724 | local_irq_restore(flags); | 2775 | local_irq_restore(flags); |
2725 | ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n", | 2776 | ABRT_PRINTK("scsi%d: abort removed command from issue queue.\n", |
2726 | HOSTNO); | 2777 | HOSTNO); |
2727 | /* Tagged queuing note: no tag to free here, hasn't been assigned | 2778 | /* Tagged queuing note: no tag to free here, hasn't been assigned |
2728 | * yet... */ | 2779 | * yet... */ |
2729 | tmp->scsi_done(tmp); | 2780 | tmp->scsi_done(tmp); |
2730 | falcon_release_lock_if_possible( hostdata ); | 2781 | falcon_release_lock_if_possible(hostdata); |
2731 | return SCSI_ABORT_SUCCESS; | 2782 | return SCSI_ABORT_SUCCESS; |
2783 | } | ||
2732 | } | 2784 | } |
2733 | 2785 | ||
2734 | /* | 2786 | /* |
2735 | * Case 3 : If any commands are connected, we're going to fail the abort | 2787 | * Case 3 : If any commands are connected, we're going to fail the abort |
2736 | * and let the high level SCSI driver retry at a later time or | 2788 | * and let the high level SCSI driver retry at a later time or |
2737 | * issue a reset. | 2789 | * issue a reset. |
2738 | * | 2790 | * |
2739 | * Timeouts, and therefore aborted commands, will be highly unlikely | 2791 | * Timeouts, and therefore aborted commands, will be highly unlikely |
2740 | * and handling them cleanly in this situation would make the common | 2792 | * and handling them cleanly in this situation would make the common |
2741 | * case of noresets less efficient, and would pollute our code. So, | 2793 | * case of noresets less efficient, and would pollute our code. So, |
2742 | * we fail. | 2794 | * we fail. |
2743 | */ | 2795 | */ |
2744 | 2796 | ||
2745 | if (hostdata->connected) { | 2797 | if (hostdata->connected) { |
2746 | local_irq_restore(flags); | 2798 | local_irq_restore(flags); |
2747 | ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO); | 2799 | ABRT_PRINTK("scsi%d: abort failed, command connected.\n", HOSTNO); |
2748 | return SCSI_ABORT_SNOOZE; | 2800 | return SCSI_ABORT_SNOOZE; |
2749 | } | 2801 | } |
2750 | 2802 | ||
2751 | /* | 2803 | /* |
2752 | * Case 4: If the command is currently disconnected from the bus, and | 2804 | * Case 4: If the command is currently disconnected from the bus, and |
2753 | * there are no connected commands, we reconnect the I_T_L or | 2805 | * there are no connected commands, we reconnect the I_T_L or |
2754 | * I_T_L_Q nexus associated with it, go into message out, and send | 2806 | * I_T_L_Q nexus associated with it, go into message out, and send |
2755 | * an abort message. | 2807 | * an abort message. |
2756 | * | 2808 | * |
2757 | * This case is especially ugly. In order to reestablish the nexus, we | 2809 | * This case is especially ugly. In order to reestablish the nexus, we |
2758 | * need to call NCR5380_select(). The easiest way to implement this | 2810 | * need to call NCR5380_select(). The easiest way to implement this |
2759 | * function was to abort if the bus was busy, and let the interrupt | 2811 | * function was to abort if the bus was busy, and let the interrupt |
2760 | * handler triggered on the SEL for reselect take care of lost arbitrations | 2812 | * handler triggered on the SEL for reselect take care of lost arbitrations |
2761 | * where necessary, meaning interrupts need to be enabled. | 2813 | * where necessary, meaning interrupts need to be enabled. |
2762 | * | 2814 | * |
2763 | * When interrupts are enabled, the queues may change - so we | 2815 | * When interrupts are enabled, the queues may change - so we |
2764 | * can't remove it from the disconnected queue before selecting it | 2816 | * can't remove it from the disconnected queue before selecting it |
2765 | * because that could cause a failure in hashing the nexus if that | 2817 | * because that could cause a failure in hashing the nexus if that |
2766 | * device reselected. | 2818 | * device reselected. |
2767 | * | 2819 | * |
2768 | * Since the queues may change, we can't use the pointers from when we | 2820 | * Since the queues may change, we can't use the pointers from when we |
2769 | * first locate it. | 2821 | * first locate it. |
2770 | * | 2822 | * |
2771 | * So, we must first locate the command, and if NCR5380_select() | 2823 | * So, we must first locate the command, and if NCR5380_select() |
2772 | * succeeds, then issue the abort, relocate the command and remove | 2824 | * succeeds, then issue the abort, relocate the command and remove |
2773 | * it from the disconnected queue. | 2825 | * it from the disconnected queue. |
2774 | */ | 2826 | */ |
2827 | |||
2828 | for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; | ||
2829 | tmp = NEXT(tmp)) { | ||
2830 | if (cmd == tmp) { | ||
2831 | local_irq_restore(flags); | ||
2832 | ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO); | ||
2775 | 2833 | ||
2776 | for (tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; tmp; | 2834 | if (NCR5380_select(instance, cmd, (int)cmd->tag)) |
2777 | tmp = NEXT(tmp)) | 2835 | return SCSI_ABORT_BUSY; |
2778 | if (cmd == tmp) { | 2836 | |
2779 | local_irq_restore(flags); | 2837 | ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO); |
2780 | ABRT_PRINTK("scsi%d: aborting disconnected command.\n", HOSTNO); | 2838 | |
2781 | 2839 | do_abort(instance); | |
2782 | if (NCR5380_select (instance, cmd, (int) cmd->tag)) | 2840 | |
2783 | return SCSI_ABORT_BUSY; | 2841 | local_irq_save(flags); |
2784 | 2842 | for (prev = (Scsi_Cmnd **)&(hostdata->disconnected_queue), | |
2785 | ABRT_PRINTK("scsi%d: nexus reestablished.\n", HOSTNO); | 2843 | tmp = (Scsi_Cmnd *)hostdata->disconnected_queue; |
2786 | 2844 | tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp)) { | |
2787 | do_abort (instance); | 2845 | if (cmd == tmp) { |
2788 | 2846 | REMOVE(5, *prev, tmp, NEXT(tmp)); | |
2789 | local_irq_save(flags); | 2847 | *prev = NEXT(tmp); |
2790 | for (prev = (Scsi_Cmnd **) &(hostdata->disconnected_queue), | 2848 | SET_NEXT(tmp, NULL); |
2791 | tmp = (Scsi_Cmnd *) hostdata->disconnected_queue; | 2849 | tmp->result = DID_ABORT << 16; |
2792 | tmp; prev = NEXTADDR(tmp), tmp = NEXT(tmp) ) | 2850 | /* We must unlock the tag/LUN immediately here, since the |
2793 | if (cmd == tmp) { | 2851 | * target goes to BUS FREE and doesn't send us another |
2794 | REMOVE(5, *prev, tmp, NEXT(tmp)); | 2852 | * message (COMMAND_COMPLETE or the like) |
2795 | *prev = NEXT(tmp); | 2853 | */ |
2796 | NEXT(tmp) = NULL; | ||
2797 | tmp->result = DID_ABORT << 16; | ||
2798 | /* We must unlock the tag/LUN immediately here, since the | ||
2799 | * target goes to BUS FREE and doesn't send us another | ||
2800 | * message (COMMAND_COMPLETE or the like) | ||
2801 | */ | ||
2802 | #ifdef SUPPORT_TAGS | 2854 | #ifdef SUPPORT_TAGS |
2803 | cmd_free_tag( tmp ); | 2855 | cmd_free_tag(tmp); |
2804 | #else | 2856 | #else |
2805 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); | 2857 | hostdata->busy[cmd->device->id] &= ~(1 << cmd->device->lun); |
2806 | #endif | 2858 | #endif |
2807 | local_irq_restore(flags); | 2859 | local_irq_restore(flags); |
2808 | tmp->scsi_done(tmp); | 2860 | tmp->scsi_done(tmp); |
2809 | falcon_release_lock_if_possible( hostdata ); | 2861 | falcon_release_lock_if_possible(hostdata); |
2810 | return SCSI_ABORT_SUCCESS; | 2862 | return SCSI_ABORT_SUCCESS; |
2863 | } | ||
2864 | } | ||
2811 | } | 2865 | } |
2812 | } | 2866 | } |
2813 | 2867 | ||
2814 | /* | 2868 | /* |
2815 | * Case 5 : If we reached this point, the command was not found in any of | 2869 | * Case 5 : If we reached this point, the command was not found in any of |
2816 | * the queues. | 2870 | * the queues. |
2817 | * | 2871 | * |
2818 | * We probably reached this point because of an unlikely race condition | 2872 | * We probably reached this point because of an unlikely race condition |
2819 | * between the command completing successfully and the abortion code, | 2873 | * between the command completing successfully and the abortion code, |
2820 | * so we won't panic, but we will notify the user in case something really | 2874 | * so we won't panic, but we will notify the user in case something really |
2821 | * broke. | 2875 | * broke. |
2822 | */ | 2876 | */ |
2823 | 2877 | ||
2824 | local_irq_restore(flags); | 2878 | local_irq_restore(flags); |
2825 | printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully\n" | 2879 | printk(KERN_INFO "scsi%d: warning : SCSI command probably completed successfully\n" |
2826 | KERN_INFO " before abortion\n", HOSTNO); | 2880 | KERN_INFO " before abortion\n", HOSTNO); |
2827 | 2881 | ||
2828 | /* Maybe it is sufficient just to release the ST-DMA lock... (if | 2882 | /* Maybe it is sufficient just to release the ST-DMA lock... (if |
2829 | * possible at all) At least, we should check if the lock could be | 2883 | * possible at all) At least, we should check if the lock could be |
2830 | * released after the abort, in case it is kept due to some bug. | 2884 | * released after the abort, in case it is kept due to some bug. |
2831 | */ | 2885 | */ |
2832 | falcon_release_lock_if_possible( hostdata ); | 2886 | falcon_release_lock_if_possible(hostdata); |
2833 | 2887 | ||
2834 | return SCSI_ABORT_NOT_RUNNING; | 2888 | return SCSI_ABORT_NOT_RUNNING; |
2835 | } | 2889 | } |
2836 | 2890 | ||
2837 | 2891 | ||
2838 | /* | 2892 | /* |
2839 | * Function : int NCR5380_reset (Scsi_Cmnd *cmd) | 2893 | * Function : int NCR5380_reset (Scsi_Cmnd *cmd) |
2840 | * | 2894 | * |
2841 | * Purpose : reset the SCSI bus. | 2895 | * Purpose : reset the SCSI bus. |
2842 | * | 2896 | * |
2843 | * Returns : SCSI_RESET_WAKEUP | 2897 | * Returns : SCSI_RESET_WAKEUP |
2844 | * | 2898 | * |
2845 | */ | 2899 | */ |
2846 | 2900 | ||
2847 | static int NCR5380_bus_reset( Scsi_Cmnd *cmd) | 2901 | static int NCR5380_bus_reset(Scsi_Cmnd *cmd) |
2848 | { | 2902 | { |
2849 | SETUP_HOSTDATA(cmd->device->host); | 2903 | SETUP_HOSTDATA(cmd->device->host); |
2850 | int i; | 2904 | int i; |
2851 | unsigned long flags; | 2905 | unsigned long flags; |
2852 | #if 1 | 2906 | #if 1 |
2853 | Scsi_Cmnd *connected, *disconnected_queue; | 2907 | Scsi_Cmnd *connected, *disconnected_queue; |
2854 | #endif | 2908 | #endif |
2855 | 2909 | ||
2856 | if (!IS_A_TT() && !falcon_got_lock) | 2910 | if (!IS_A_TT() && !falcon_got_lock) |
2857 | printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_reset\n", | 2911 | printk(KERN_ERR "scsi%d: !!BINGO!! Falcon has no lock in NCR5380_reset\n", |
2858 | H_NO(cmd) ); | 2912 | H_NO(cmd)); |
2859 | 2913 | ||
2860 | NCR5380_print_status (cmd->device->host); | 2914 | NCR5380_print_status(cmd->device->host); |
2861 | 2915 | ||
2862 | /* get in phase */ | 2916 | /* get in phase */ |
2863 | NCR5380_write( TARGET_COMMAND_REG, | 2917 | NCR5380_write(TARGET_COMMAND_REG, |
2864 | PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) )); | 2918 | PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG))); |
2865 | /* assert RST */ | 2919 | /* assert RST */ |
2866 | NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST ); | 2920 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST); |
2867 | udelay (40); | 2921 | udelay(40); |
2868 | /* reset NCR registers */ | 2922 | /* reset NCR registers */ |
2869 | NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); | 2923 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
2870 | NCR5380_write( MODE_REG, MR_BASE ); | 2924 | NCR5380_write(MODE_REG, MR_BASE); |
2871 | NCR5380_write( TARGET_COMMAND_REG, 0 ); | 2925 | NCR5380_write(TARGET_COMMAND_REG, 0); |
2872 | NCR5380_write( SELECT_ENABLE_REG, 0 ); | 2926 | NCR5380_write(SELECT_ENABLE_REG, 0); |
2873 | /* ++roman: reset interrupt condition! otherwise no interrupts don't get | 2927 | /* ++roman: reset interrupt condition! otherwise no interrupts don't get |
2874 | * through anymore ... */ | 2928 | * through anymore ... */ |
2875 | (void)NCR5380_read( RESET_PARITY_INTERRUPT_REG ); | 2929 | (void)NCR5380_read(RESET_PARITY_INTERRUPT_REG); |
2876 | 2930 | ||
2877 | #if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */ | 2931 | #if 1 /* XXX Should now be done by midlevel code, but it's broken XXX */ |
2878 | /* XXX see below XXX */ | 2932 | /* XXX see below XXX */ |
2879 | 2933 | ||
2880 | /* MSch: old-style reset: actually abort all command processing here */ | 2934 | /* MSch: old-style reset: actually abort all command processing here */ |
2881 | 2935 | ||
2882 | /* After the reset, there are no more connected or disconnected commands | 2936 | /* After the reset, there are no more connected or disconnected commands |
2883 | * and no busy units; to avoid problems with re-inserting the commands | 2937 | * and no busy units; to avoid problems with re-inserting the commands |
2884 | * into the issue_queue (via scsi_done()), the aborted commands are | 2938 | * into the issue_queue (via scsi_done()), the aborted commands are |
2885 | * remembered in local variables first. | 2939 | * remembered in local variables first. |
2886 | */ | 2940 | */ |
2887 | local_irq_save(flags); | 2941 | local_irq_save(flags); |
2888 | connected = (Scsi_Cmnd *)hostdata->connected; | 2942 | connected = (Scsi_Cmnd *)hostdata->connected; |
2889 | hostdata->connected = NULL; | 2943 | hostdata->connected = NULL; |
2890 | disconnected_queue = (Scsi_Cmnd *)hostdata->disconnected_queue; | 2944 | disconnected_queue = (Scsi_Cmnd *)hostdata->disconnected_queue; |
2891 | hostdata->disconnected_queue = NULL; | 2945 | hostdata->disconnected_queue = NULL; |
2892 | #ifdef SUPPORT_TAGS | 2946 | #ifdef SUPPORT_TAGS |
2893 | free_all_tags(); | 2947 | free_all_tags(); |
2894 | #endif | 2948 | #endif |
2895 | for( i = 0; i < 8; ++i ) | 2949 | for (i = 0; i < 8; ++i) |
2896 | hostdata->busy[i] = 0; | 2950 | hostdata->busy[i] = 0; |
2897 | #ifdef REAL_DMA | 2951 | #ifdef REAL_DMA |
2898 | hostdata->dma_len = 0; | 2952 | hostdata->dma_len = 0; |
2899 | #endif | 2953 | #endif |
2900 | local_irq_restore(flags); | 2954 | local_irq_restore(flags); |
2901 | 2955 | ||
2902 | /* In order to tell the mid-level code which commands were aborted, | 2956 | /* In order to tell the mid-level code which commands were aborted, |
2903 | * set the command status to DID_RESET and call scsi_done() !!! | 2957 | * set the command status to DID_RESET and call scsi_done() !!! |
2904 | * This ultimately aborts processing of these commands in the mid-level. | 2958 | * This ultimately aborts processing of these commands in the mid-level. |
2905 | */ | 2959 | */ |
2906 | 2960 | ||
2907 | if ((cmd = connected)) { | 2961 | if ((cmd = connected)) { |
2908 | ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); | 2962 | ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); |
2909 | cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); | 2963 | cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); |
2910 | cmd->scsi_done( cmd ); | 2964 | cmd->scsi_done(cmd); |
2911 | } | 2965 | } |
2912 | |||
2913 | for (i = 0; (cmd = disconnected_queue); ++i) { | ||
2914 | disconnected_queue = NEXT(cmd); | ||
2915 | NEXT(cmd) = NULL; | ||
2916 | cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); | ||
2917 | cmd->scsi_done( cmd ); | ||
2918 | } | ||
2919 | if (i > 0) | ||
2920 | ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i); | ||
2921 | |||
2922 | /* The Falcon lock should be released after a reset... | ||
2923 | */ | ||
2924 | /* ++guenther: moved to atari_scsi_reset(), to prevent a race between | ||
2925 | * unlocking and enabling dma interrupt. | ||
2926 | */ | ||
2927 | /* falcon_release_lock_if_possible( hostdata );*/ | ||
2928 | 2966 | ||
2929 | /* since all commands have been explicitly terminated, we need to tell | 2967 | for (i = 0; (cmd = disconnected_queue); ++i) { |
2930 | * the midlevel code that the reset was SUCCESSFUL, and there is no | 2968 | disconnected_queue = NEXT(cmd); |
2931 | * need to 'wake up' the commands by a request_sense | 2969 | SET_NEXT(cmd, NULL); |
2932 | */ | 2970 | cmd->result = (cmd->result & 0xffff) | (DID_RESET << 16); |
2933 | return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET; | 2971 | cmd->scsi_done(cmd); |
2972 | } | ||
2973 | if (i > 0) | ||
2974 | ABRT_PRINTK("scsi: reset aborted %d disconnected command(s)\n", i); | ||
2975 | |||
2976 | /* The Falcon lock should be released after a reset... | ||
2977 | */ | ||
2978 | /* ++guenther: moved to atari_scsi_reset(), to prevent a race between | ||
2979 | * unlocking and enabling dma interrupt. | ||
2980 | */ | ||
2981 | /* falcon_release_lock_if_possible( hostdata );*/ | ||
2982 | |||
2983 | /* since all commands have been explicitly terminated, we need to tell | ||
2984 | * the midlevel code that the reset was SUCCESSFUL, and there is no | ||
2985 | * need to 'wake up' the commands by a request_sense | ||
2986 | */ | ||
2987 | return SCSI_RESET_SUCCESS | SCSI_RESET_BUS_RESET; | ||
2934 | #else /* 1 */ | 2988 | #else /* 1 */ |
2935 | 2989 | ||
2936 | /* MSch: new-style reset handling: let the mid-level do what it can */ | 2990 | /* MSch: new-style reset handling: let the mid-level do what it can */ |
2937 | 2991 | ||
2938 | /* ++guenther: MID-LEVEL IS STILL BROKEN. | 2992 | /* ++guenther: MID-LEVEL IS STILL BROKEN. |
2939 | * Mid-level is supposed to requeue all commands that were active on the | 2993 | * Mid-level is supposed to requeue all commands that were active on the |
2940 | * various low-level queues. In fact it does this, but that's not enough | 2994 | * various low-level queues. In fact it does this, but that's not enough |
2941 | * because all these commands are subject to timeout. And if a timeout | 2995 | * because all these commands are subject to timeout. And if a timeout |
2942 | * happens for any removed command, *_abort() is called but all queues | 2996 | * happens for any removed command, *_abort() is called but all queues |
2943 | * are now empty. Abort then gives up the falcon lock, which is fatal, | 2997 | * are now empty. Abort then gives up the falcon lock, which is fatal, |
2944 | * since the mid-level will queue more commands and must have the lock | 2998 | * since the mid-level will queue more commands and must have the lock |
2945 | * (it's all happening inside timer interrupt handler!!). | 2999 | * (it's all happening inside timer interrupt handler!!). |
2946 | * Even worse, abort will return NOT_RUNNING for all those commands not | 3000 | * Even worse, abort will return NOT_RUNNING for all those commands not |
2947 | * on any queue, so they won't be retried ... | 3001 | * on any queue, so they won't be retried ... |
2948 | * | 3002 | * |
2949 | * Conclusion: either scsi.c disables timeout for all resetted commands | 3003 | * Conclusion: either scsi.c disables timeout for all resetted commands |
2950 | * immediately, or we lose! As of linux-2.0.20 it doesn't. | 3004 | * immediately, or we lose! As of linux-2.0.20 it doesn't. |
2951 | */ | 3005 | */ |
2952 | 3006 | ||
2953 | /* After the reset, there are no more connected or disconnected commands | 3007 | /* After the reset, there are no more connected or disconnected commands |
2954 | * and no busy units; so clear the low-level status here to avoid | 3008 | * and no busy units; so clear the low-level status here to avoid |
2955 | * conflicts when the mid-level code tries to wake up the affected | 3009 | * conflicts when the mid-level code tries to wake up the affected |
2956 | * commands! | 3010 | * commands! |
2957 | */ | 3011 | */ |
2958 | 3012 | ||
2959 | if (hostdata->issue_queue) | 3013 | if (hostdata->issue_queue) |
2960 | ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); | 3014 | ABRT_PRINTK("scsi%d: reset aborted issued command(s)\n", H_NO(cmd)); |
2961 | if (hostdata->connected) | 3015 | if (hostdata->connected) |
2962 | ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); | 3016 | ABRT_PRINTK("scsi%d: reset aborted a connected command\n", H_NO(cmd)); |
2963 | if (hostdata->disconnected_queue) | 3017 | if (hostdata->disconnected_queue) |
2964 | ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); | 3018 | ABRT_PRINTK("scsi%d: reset aborted disconnected command(s)\n", H_NO(cmd)); |
2965 | 3019 | ||
2966 | local_irq_save(flags); | 3020 | local_irq_save(flags); |
2967 | hostdata->issue_queue = NULL; | 3021 | hostdata->issue_queue = NULL; |
2968 | hostdata->connected = NULL; | 3022 | hostdata->connected = NULL; |
2969 | hostdata->disconnected_queue = NULL; | 3023 | hostdata->disconnected_queue = NULL; |
2970 | #ifdef SUPPORT_TAGS | 3024 | #ifdef SUPPORT_TAGS |
2971 | free_all_tags(); | 3025 | free_all_tags(); |
2972 | #endif | 3026 | #endif |
2973 | for( i = 0; i < 8; ++i ) | 3027 | for (i = 0; i < 8; ++i) |
2974 | hostdata->busy[i] = 0; | 3028 | hostdata->busy[i] = 0; |
2975 | #ifdef REAL_DMA | 3029 | #ifdef REAL_DMA |
2976 | hostdata->dma_len = 0; | 3030 | hostdata->dma_len = 0; |
2977 | #endif | 3031 | #endif |
2978 | local_irq_restore(flags); | 3032 | local_irq_restore(flags); |
2979 | 3033 | ||
2980 | /* we did no complete reset of all commands, so a wakeup is required */ | 3034 | /* we did no complete reset of all commands, so a wakeup is required */ |
2981 | return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET; | 3035 | return SCSI_RESET_WAKEUP | SCSI_RESET_BUS_RESET; |
2982 | #endif /* 1 */ | 3036 | #endif /* 1 */ |
2983 | } | 3037 | } |
2984 | |||
2985 | /* Local Variables: */ | ||
2986 | /* tab-width: 8 */ | ||
2987 | /* End: */ | ||
diff --git a/drivers/scsi/atari_scsi.c b/drivers/scsi/atari_scsi.c index 642de7b2b7a2..6f8403b82ba1 100644 --- a/drivers/scsi/atari_scsi.c +++ b/drivers/scsi/atari_scsi.c | |||
@@ -69,9 +69,9 @@ | |||
69 | 69 | ||
70 | #define NDEBUG (0) | 70 | #define NDEBUG (0) |
71 | 71 | ||
72 | #define NDEBUG_ABORT 0x800000 | 72 | #define NDEBUG_ABORT 0x00100000 |
73 | #define NDEBUG_TAGS 0x1000000 | 73 | #define NDEBUG_TAGS 0x00200000 |
74 | #define NDEBUG_MERGING 0x2000000 | 74 | #define NDEBUG_MERGING 0x00400000 |
75 | 75 | ||
76 | #define AUTOSENSE | 76 | #define AUTOSENSE |
77 | /* For the Atari version, use only polled IO or REAL_DMA */ | 77 | /* For the Atari version, use only polled IO or REAL_DMA */ |
@@ -186,38 +186,37 @@ static inline void DISABLE_IRQ(void) | |||
186 | /***************************** Prototypes *****************************/ | 186 | /***************************** Prototypes *****************************/ |
187 | 187 | ||
188 | #ifdef REAL_DMA | 188 | #ifdef REAL_DMA |
189 | static int scsi_dma_is_ignored_buserr( unsigned char dma_stat ); | 189 | static int scsi_dma_is_ignored_buserr(unsigned char dma_stat); |
190 | static void atari_scsi_fetch_restbytes( void ); | 190 | static void atari_scsi_fetch_restbytes(void); |
191 | static long atari_scsi_dma_residual( struct Scsi_Host *instance ); | 191 | static long atari_scsi_dma_residual(struct Scsi_Host *instance); |
192 | static int falcon_classify_cmd( Scsi_Cmnd *cmd ); | 192 | static int falcon_classify_cmd(Scsi_Cmnd *cmd); |
193 | static unsigned long atari_dma_xfer_len( unsigned long wanted_len, | 193 | static unsigned long atari_dma_xfer_len(unsigned long wanted_len, |
194 | Scsi_Cmnd *cmd, int write_flag ); | 194 | Scsi_Cmnd *cmd, int write_flag); |
195 | #endif | 195 | #endif |
196 | static irqreturn_t scsi_tt_intr( int irq, void *dummy); | 196 | static irqreturn_t scsi_tt_intr(int irq, void *dummy); |
197 | static irqreturn_t scsi_falcon_intr( int irq, void *dummy); | 197 | static irqreturn_t scsi_falcon_intr(int irq, void *dummy); |
198 | static void falcon_release_lock_if_possible( struct NCR5380_hostdata * | 198 | static void falcon_release_lock_if_possible(struct NCR5380_hostdata *hostdata); |
199 | hostdata ); | 199 | static void falcon_get_lock(void); |
200 | static void falcon_get_lock( void ); | ||
201 | #ifdef CONFIG_ATARI_SCSI_RESET_BOOT | 200 | #ifdef CONFIG_ATARI_SCSI_RESET_BOOT |
202 | static void atari_scsi_reset_boot( void ); | 201 | static void atari_scsi_reset_boot(void); |
203 | #endif | 202 | #endif |
204 | static unsigned char atari_scsi_tt_reg_read( unsigned char reg ); | 203 | static unsigned char atari_scsi_tt_reg_read(unsigned char reg); |
205 | static void atari_scsi_tt_reg_write( unsigned char reg, unsigned char value); | 204 | static void atari_scsi_tt_reg_write(unsigned char reg, unsigned char value); |
206 | static unsigned char atari_scsi_falcon_reg_read( unsigned char reg ); | 205 | static unsigned char atari_scsi_falcon_reg_read(unsigned char reg); |
207 | static void atari_scsi_falcon_reg_write( unsigned char reg, unsigned char value ); | 206 | static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value); |
208 | 207 | ||
209 | /************************* End of Prototypes **************************/ | 208 | /************************* End of Prototypes **************************/ |
210 | 209 | ||
211 | 210 | ||
212 | static struct Scsi_Host *atari_scsi_host = NULL; | 211 | static struct Scsi_Host *atari_scsi_host; |
213 | static unsigned char (*atari_scsi_reg_read)( unsigned char reg ); | 212 | static unsigned char (*atari_scsi_reg_read)(unsigned char reg); |
214 | static void (*atari_scsi_reg_write)( unsigned char reg, unsigned char value ); | 213 | static void (*atari_scsi_reg_write)(unsigned char reg, unsigned char value); |
215 | 214 | ||
216 | #ifdef REAL_DMA | 215 | #ifdef REAL_DMA |
217 | static unsigned long atari_dma_residual, atari_dma_startaddr; | 216 | static unsigned long atari_dma_residual, atari_dma_startaddr; |
218 | static short atari_dma_active; | 217 | static short atari_dma_active; |
219 | /* pointer to the dribble buffer */ | 218 | /* pointer to the dribble buffer */ |
220 | static char *atari_dma_buffer = NULL; | 219 | static char *atari_dma_buffer; |
221 | /* precalculated physical address of the dribble buffer */ | 220 | /* precalculated physical address of the dribble buffer */ |
222 | static unsigned long atari_dma_phys_buffer; | 221 | static unsigned long atari_dma_phys_buffer; |
223 | /* != 0 tells the Falcon int handler to copy data from the dribble buffer */ | 222 | /* != 0 tells the Falcon int handler to copy data from the dribble buffer */ |
@@ -233,7 +232,7 @@ static char *atari_dma_orig_addr; | |||
233 | static unsigned long atari_dma_stram_mask; | 232 | static unsigned long atari_dma_stram_mask; |
234 | #define STRAM_ADDR(a) (((a) & atari_dma_stram_mask) == 0) | 233 | #define STRAM_ADDR(a) (((a) & atari_dma_stram_mask) == 0) |
235 | /* number of bytes to cut from a transfer to handle NCR overruns */ | 234 | /* number of bytes to cut from a transfer to handle NCR overruns */ |
236 | static int atari_read_overruns = 0; | 235 | static int atari_read_overruns; |
237 | #endif | 236 | #endif |
238 | 237 | ||
239 | static int setup_can_queue = -1; | 238 | static int setup_can_queue = -1; |
@@ -256,10 +255,10 @@ module_param(setup_hostid, int, 0); | |||
256 | 255 | ||
257 | #if defined(REAL_DMA) | 256 | #if defined(REAL_DMA) |
258 | 257 | ||
259 | static int scsi_dma_is_ignored_buserr( unsigned char dma_stat ) | 258 | static int scsi_dma_is_ignored_buserr(unsigned char dma_stat) |
260 | { | 259 | { |
261 | int i; | 260 | int i; |
262 | unsigned long addr = SCSI_DMA_READ_P( dma_addr ), end_addr; | 261 | unsigned long addr = SCSI_DMA_READ_P(dma_addr), end_addr; |
263 | 262 | ||
264 | if (dma_stat & 0x01) { | 263 | if (dma_stat & 0x01) { |
265 | 264 | ||
@@ -267,15 +266,14 @@ static int scsi_dma_is_ignored_buserr( unsigned char dma_stat ) | |||
267 | * physical memory chunk (DMA prefetch!), but that doesn't hurt. | 266 | * physical memory chunk (DMA prefetch!), but that doesn't hurt. |
268 | * Check for this case: | 267 | * Check for this case: |
269 | */ | 268 | */ |
270 | 269 | ||
271 | for( i = 0; i < m68k_num_memory; ++i ) { | 270 | for (i = 0; i < m68k_num_memory; ++i) { |
272 | end_addr = m68k_memory[i].addr + | 271 | end_addr = m68k_memory[i].addr + m68k_memory[i].size; |
273 | m68k_memory[i].size; | ||
274 | if (end_addr <= addr && addr <= end_addr + 4) | 272 | if (end_addr <= addr && addr <= end_addr + 4) |
275 | return( 1 ); | 273 | return 1; |
276 | } | 274 | } |
277 | } | 275 | } |
278 | return( 0 ); | 276 | return 0; |
279 | } | 277 | } |
280 | 278 | ||
281 | 279 | ||
@@ -284,28 +282,27 @@ static int scsi_dma_is_ignored_buserr( unsigned char dma_stat ) | |||
284 | * end-of-DMA, both SCSI ints are triggered simultaneously, so the NCR int has | 282 | * end-of-DMA, both SCSI ints are triggered simultaneously, so the NCR int has |
285 | * to clear the DMA int pending bit before it allows other level 6 interrupts. | 283 | * to clear the DMA int pending bit before it allows other level 6 interrupts. |
286 | */ | 284 | */ |
287 | static void scsi_dma_buserr (int irq, void *dummy) | 285 | static void scsi_dma_buserr(int irq, void *dummy) |
288 | { | 286 | { |
289 | unsigned char dma_stat = tt_scsi_dma.dma_ctrl; | 287 | unsigned char dma_stat = tt_scsi_dma.dma_ctrl; |
290 | 288 | ||
291 | /* Don't do anything if a NCR interrupt is pending. Probably it's just | 289 | /* Don't do anything if a NCR interrupt is pending. Probably it's just |
292 | * masked... */ | 290 | * masked... */ |
293 | if (atari_irq_pending( IRQ_TT_MFP_SCSI )) | 291 | if (atari_irq_pending(IRQ_TT_MFP_SCSI)) |
294 | return; | 292 | return; |
295 | 293 | ||
296 | printk("Bad SCSI DMA interrupt! dma_addr=0x%08lx dma_stat=%02x dma_cnt=%08lx\n", | 294 | printk("Bad SCSI DMA interrupt! dma_addr=0x%08lx dma_stat=%02x dma_cnt=%08lx\n", |
297 | SCSI_DMA_READ_P(dma_addr), dma_stat, SCSI_DMA_READ_P(dma_cnt)); | 295 | SCSI_DMA_READ_P(dma_addr), dma_stat, SCSI_DMA_READ_P(dma_cnt)); |
298 | if (dma_stat & 0x80) { | 296 | if (dma_stat & 0x80) { |
299 | if (!scsi_dma_is_ignored_buserr( dma_stat )) | 297 | if (!scsi_dma_is_ignored_buserr(dma_stat)) |
300 | printk( "SCSI DMA bus error -- bad DMA programming!\n" ); | 298 | printk("SCSI DMA bus error -- bad DMA programming!\n"); |
301 | } | 299 | } else { |
302 | else { | ||
303 | /* Under normal circumstances we never should get to this point, | 300 | /* Under normal circumstances we never should get to this point, |
304 | * since both interrupts are triggered simultaneously and the 5380 | 301 | * since both interrupts are triggered simultaneously and the 5380 |
305 | * int has higher priority. When this irq is handled, that DMA | 302 | * int has higher priority. When this irq is handled, that DMA |
306 | * interrupt is cleared. So a warning message is printed here. | 303 | * interrupt is cleared. So a warning message is printed here. |
307 | */ | 304 | */ |
308 | printk( "SCSI DMA intr ?? -- this shouldn't happen!\n" ); | 305 | printk("SCSI DMA intr ?? -- this shouldn't happen!\n"); |
309 | } | 306 | } |
310 | } | 307 | } |
311 | #endif | 308 | #endif |
@@ -313,7 +310,7 @@ static void scsi_dma_buserr (int irq, void *dummy) | |||
313 | #endif | 310 | #endif |
314 | 311 | ||
315 | 312 | ||
316 | static irqreturn_t scsi_tt_intr (int irq, void *dummy) | 313 | static irqreturn_t scsi_tt_intr(int irq, void *dummy) |
317 | { | 314 | { |
318 | #ifdef REAL_DMA | 315 | #ifdef REAL_DMA |
319 | int dma_stat; | 316 | int dma_stat; |
@@ -327,7 +324,7 @@ static irqreturn_t scsi_tt_intr (int irq, void *dummy) | |||
327 | * is that a bus error occurred... | 324 | * is that a bus error occurred... |
328 | */ | 325 | */ |
329 | if (dma_stat & 0x80) { | 326 | if (dma_stat & 0x80) { |
330 | if (!scsi_dma_is_ignored_buserr( dma_stat )) { | 327 | if (!scsi_dma_is_ignored_buserr(dma_stat)) { |
331 | printk(KERN_ERR "SCSI DMA caused bus error near 0x%08lx\n", | 328 | printk(KERN_ERR "SCSI DMA caused bus error near 0x%08lx\n", |
332 | SCSI_DMA_READ_P(dma_addr)); | 329 | SCSI_DMA_READ_P(dma_addr)); |
333 | printk(KERN_CRIT "SCSI DMA bus error -- bad DMA programming!"); | 330 | printk(KERN_CRIT "SCSI DMA bus error -- bad DMA programming!"); |
@@ -344,8 +341,7 @@ static irqreturn_t scsi_tt_intr (int irq, void *dummy) | |||
344 | * data reg! | 341 | * data reg! |
345 | */ | 342 | */ |
346 | if ((dma_stat & 0x02) && !(dma_stat & 0x40)) { | 343 | if ((dma_stat & 0x02) && !(dma_stat & 0x40)) { |
347 | atari_dma_residual = HOSTDATA_DMALEN - (SCSI_DMA_READ_P( dma_addr ) - | 344 | atari_dma_residual = HOSTDATA_DMALEN - (SCSI_DMA_READ_P(dma_addr) - atari_dma_startaddr); |
348 | atari_dma_startaddr); | ||
349 | 345 | ||
350 | DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n", | 346 | DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n", |
351 | atari_dma_residual); | 347 | atari_dma_residual); |
@@ -353,28 +349,30 @@ static irqreturn_t scsi_tt_intr (int irq, void *dummy) | |||
353 | if ((signed int)atari_dma_residual < 0) | 349 | if ((signed int)atari_dma_residual < 0) |
354 | atari_dma_residual = 0; | 350 | atari_dma_residual = 0; |
355 | if ((dma_stat & 1) == 0) { | 351 | if ((dma_stat & 1) == 0) { |
356 | /* After read operations, we maybe have to | 352 | /* |
357 | transport some rest bytes */ | 353 | * After read operations, we maybe have to |
354 | * transport some rest bytes | ||
355 | */ | ||
358 | atari_scsi_fetch_restbytes(); | 356 | atari_scsi_fetch_restbytes(); |
359 | } | 357 | } else { |
360 | else { | 358 | /* |
361 | /* There seems to be a nasty bug in some SCSI-DMA/NCR | 359 | * There seems to be a nasty bug in some SCSI-DMA/NCR |
362 | combinations: If a target disconnects while a write | 360 | * combinations: If a target disconnects while a write |
363 | operation is going on, the address register of the | 361 | * operation is going on, the address register of the |
364 | DMA may be a few bytes farer than it actually read. | 362 | * DMA may be a few bytes farer than it actually read. |
365 | This is probably due to DMA prefetching and a delay | 363 | * This is probably due to DMA prefetching and a delay |
366 | between DMA and NCR. Experiments showed that the | 364 | * between DMA and NCR. Experiments showed that the |
367 | dma_addr is 9 bytes to high, but this could vary. | 365 | * dma_addr is 9 bytes to high, but this could vary. |
368 | The problem is, that the residual is thus calculated | 366 | * The problem is, that the residual is thus calculated |
369 | wrong and the next transfer will start behind where | 367 | * wrong and the next transfer will start behind where |
370 | it should. So we round up the residual to the next | 368 | * it should. So we round up the residual to the next |
371 | multiple of a sector size, if it isn't already a | 369 | * multiple of a sector size, if it isn't already a |
372 | multiple and the originally expected transfer size | 370 | * multiple and the originally expected transfer size |
373 | was. The latter condition is there to ensure that | 371 | * was. The latter condition is there to ensure that |
374 | the correction is taken only for "real" data | 372 | * the correction is taken only for "real" data |
375 | transfers and not for, e.g., the parameters of some | 373 | * transfers and not for, e.g., the parameters of some |
376 | other command. These shouldn't disconnect anyway. | 374 | * other command. These shouldn't disconnect anyway. |
377 | */ | 375 | */ |
378 | if (atari_dma_residual & 0x1ff) { | 376 | if (atari_dma_residual & 0x1ff) { |
379 | DMA_PRINTK("SCSI DMA: DMA bug corrected, " | 377 | DMA_PRINTK("SCSI DMA: DMA bug corrected, " |
380 | "difference %ld bytes\n", | 378 | "difference %ld bytes\n", |
@@ -394,18 +392,18 @@ static irqreturn_t scsi_tt_intr (int irq, void *dummy) | |||
394 | } | 392 | } |
395 | 393 | ||
396 | #endif /* REAL_DMA */ | 394 | #endif /* REAL_DMA */ |
397 | 395 | ||
398 | NCR5380_intr (0, 0, 0); | 396 | NCR5380_intr(0, 0); |
399 | 397 | ||
400 | #if 0 | 398 | #if 0 |
401 | /* To be sure the int is not masked */ | 399 | /* To be sure the int is not masked */ |
402 | atari_enable_irq( IRQ_TT_MFP_SCSI ); | 400 | atari_enable_irq(IRQ_TT_MFP_SCSI); |
403 | #endif | 401 | #endif |
404 | return IRQ_HANDLED; | 402 | return IRQ_HANDLED; |
405 | } | 403 | } |
406 | 404 | ||
407 | 405 | ||
408 | static irqreturn_t scsi_falcon_intr (int irq, void *dummy) | 406 | static irqreturn_t scsi_falcon_intr(int irq, void *dummy) |
409 | { | 407 | { |
410 | #ifdef REAL_DMA | 408 | #ifdef REAL_DMA |
411 | int dma_stat; | 409 | int dma_stat; |
@@ -430,7 +428,7 @@ static irqreturn_t scsi_falcon_intr (int irq, void *dummy) | |||
430 | * bytes are stuck in the ST-DMA fifo (there's no way to reach them!) | 428 | * bytes are stuck in the ST-DMA fifo (there's no way to reach them!) |
431 | */ | 429 | */ |
432 | if (atari_dma_active && (dma_stat & 0x02)) { | 430 | if (atari_dma_active && (dma_stat & 0x02)) { |
433 | unsigned long transferred; | 431 | unsigned long transferred; |
434 | 432 | ||
435 | transferred = SCSI_DMA_GETADR() - atari_dma_startaddr; | 433 | transferred = SCSI_DMA_GETADR() - atari_dma_startaddr; |
436 | /* The ST-DMA address is incremented in 2-byte steps, but the | 434 | /* The ST-DMA address is incremented in 2-byte steps, but the |
@@ -445,8 +443,7 @@ static irqreturn_t scsi_falcon_intr (int irq, void *dummy) | |||
445 | atari_dma_residual = HOSTDATA_DMALEN - transferred; | 443 | atari_dma_residual = HOSTDATA_DMALEN - transferred; |
446 | DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n", | 444 | DMA_PRINTK("SCSI DMA: There are %ld residual bytes.\n", |
447 | atari_dma_residual); | 445 | atari_dma_residual); |
448 | } | 446 | } else |
449 | else | ||
450 | atari_dma_residual = 0; | 447 | atari_dma_residual = 0; |
451 | atari_dma_active = 0; | 448 | atari_dma_active = 0; |
452 | 449 | ||
@@ -461,13 +458,13 @@ static irqreturn_t scsi_falcon_intr (int irq, void *dummy) | |||
461 | 458 | ||
462 | #endif /* REAL_DMA */ | 459 | #endif /* REAL_DMA */ |
463 | 460 | ||
464 | NCR5380_intr (0, 0, 0); | 461 | NCR5380_intr(0, 0); |
465 | return IRQ_HANDLED; | 462 | return IRQ_HANDLED; |
466 | } | 463 | } |
467 | 464 | ||
468 | 465 | ||
469 | #ifdef REAL_DMA | 466 | #ifdef REAL_DMA |
470 | static void atari_scsi_fetch_restbytes( void ) | 467 | static void atari_scsi_fetch_restbytes(void) |
471 | { | 468 | { |
472 | int nr; | 469 | int nr; |
473 | char *src, *dst; | 470 | char *src, *dst; |
@@ -505,19 +502,17 @@ static int falcon_dont_release = 0; | |||
505 | * again (but others waiting longer more probably will win). | 502 | * again (but others waiting longer more probably will win). |
506 | */ | 503 | */ |
507 | 504 | ||
508 | static void | 505 | static void falcon_release_lock_if_possible(struct NCR5380_hostdata *hostdata) |
509 | falcon_release_lock_if_possible( struct NCR5380_hostdata * hostdata ) | ||
510 | { | 506 | { |
511 | unsigned long flags; | 507 | unsigned long flags; |
512 | 508 | ||
513 | if (IS_A_TT()) return; | 509 | if (IS_A_TT()) |
514 | 510 | return; | |
511 | |||
515 | local_irq_save(flags); | 512 | local_irq_save(flags); |
516 | 513 | ||
517 | if (falcon_got_lock && | 514 | if (falcon_got_lock && !hostdata->disconnected_queue && |
518 | !hostdata->disconnected_queue && | 515 | !hostdata->issue_queue && !hostdata->connected) { |
519 | !hostdata->issue_queue && | ||
520 | !hostdata->connected) { | ||
521 | 516 | ||
522 | if (falcon_dont_release) { | 517 | if (falcon_dont_release) { |
523 | #if 0 | 518 | #if 0 |
@@ -528,7 +523,7 @@ falcon_release_lock_if_possible( struct NCR5380_hostdata * hostdata ) | |||
528 | } | 523 | } |
529 | falcon_got_lock = 0; | 524 | falcon_got_lock = 0; |
530 | stdma_release(); | 525 | stdma_release(); |
531 | wake_up( &falcon_fairness_wait ); | 526 | wake_up(&falcon_fairness_wait); |
532 | } | 527 | } |
533 | 528 | ||
534 | local_irq_restore(flags); | 529 | local_irq_restore(flags); |
@@ -549,31 +544,31 @@ falcon_release_lock_if_possible( struct NCR5380_hostdata * hostdata ) | |||
549 | * Complicated, complicated.... Sigh... | 544 | * Complicated, complicated.... Sigh... |
550 | */ | 545 | */ |
551 | 546 | ||
552 | static void falcon_get_lock( void ) | 547 | static void falcon_get_lock(void) |
553 | { | 548 | { |
554 | unsigned long flags; | 549 | unsigned long flags; |
555 | 550 | ||
556 | if (IS_A_TT()) return; | 551 | if (IS_A_TT()) |
552 | return; | ||
557 | 553 | ||
558 | local_irq_save(flags); | 554 | local_irq_save(flags); |
559 | 555 | ||
560 | while( !in_interrupt() && falcon_got_lock && stdma_others_waiting() ) | 556 | while (!in_irq() && falcon_got_lock && stdma_others_waiting()) |
561 | sleep_on( &falcon_fairness_wait ); | 557 | sleep_on(&falcon_fairness_wait); |
562 | 558 | ||
563 | while (!falcon_got_lock) { | 559 | while (!falcon_got_lock) { |
564 | if (in_interrupt()) | 560 | if (in_irq()) |
565 | panic( "Falcon SCSI hasn't ST-DMA lock in interrupt" ); | 561 | panic("Falcon SCSI hasn't ST-DMA lock in interrupt"); |
566 | if (!falcon_trying_lock) { | 562 | if (!falcon_trying_lock) { |
567 | falcon_trying_lock = 1; | 563 | falcon_trying_lock = 1; |
568 | stdma_lock(scsi_falcon_intr, NULL); | 564 | stdma_lock(scsi_falcon_intr, NULL); |
569 | falcon_got_lock = 1; | 565 | falcon_got_lock = 1; |
570 | falcon_trying_lock = 0; | 566 | falcon_trying_lock = 0; |
571 | wake_up( &falcon_try_wait ); | 567 | wake_up(&falcon_try_wait); |
572 | } | 568 | } else { |
573 | else { | 569 | sleep_on(&falcon_try_wait); |
574 | sleep_on( &falcon_try_wait ); | ||
575 | } | 570 | } |
576 | } | 571 | } |
577 | 572 | ||
578 | local_irq_restore(flags); | 573 | local_irq_restore(flags); |
579 | if (!falcon_got_lock) | 574 | if (!falcon_got_lock) |
@@ -587,18 +582,18 @@ static void falcon_get_lock( void ) | |||
587 | */ | 582 | */ |
588 | 583 | ||
589 | #if 0 | 584 | #if 0 |
590 | int atari_queue_command (Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) | 585 | int atari_queue_command(Scsi_Cmnd *cmd, void (*done)(Scsi_Cmnd *)) |
591 | { | 586 | { |
592 | /* falcon_get_lock(); | 587 | /* falcon_get_lock(); |
593 | * ++guenther: moved to NCR5380_queue_command() to prevent | 588 | * ++guenther: moved to NCR5380_queue_command() to prevent |
594 | * race condition, see there for an explanation. | 589 | * race condition, see there for an explanation. |
595 | */ | 590 | */ |
596 | return( NCR5380_queue_command( cmd, done ) ); | 591 | return NCR5380_queue_command(cmd, done); |
597 | } | 592 | } |
598 | #endif | 593 | #endif |
599 | 594 | ||
600 | 595 | ||
601 | int atari_scsi_detect (struct scsi_host_template *host) | 596 | int atari_scsi_detect(struct scsi_host_template *host) |
602 | { | 597 | { |
603 | static int called = 0; | 598 | static int called = 0; |
604 | struct Scsi_Host *instance; | 599 | struct Scsi_Host *instance; |
@@ -606,7 +601,7 @@ int atari_scsi_detect (struct scsi_host_template *host) | |||
606 | if (!MACH_IS_ATARI || | 601 | if (!MACH_IS_ATARI || |
607 | (!ATARIHW_PRESENT(ST_SCSI) && !ATARIHW_PRESENT(TT_SCSI)) || | 602 | (!ATARIHW_PRESENT(ST_SCSI) && !ATARIHW_PRESENT(TT_SCSI)) || |
608 | called) | 603 | called) |
609 | return( 0 ); | 604 | return 0; |
610 | 605 | ||
611 | host->proc_name = "Atari"; | 606 | host->proc_name = "Atari"; |
612 | 607 | ||
@@ -655,32 +650,33 @@ int atari_scsi_detect (struct scsi_host_template *host) | |||
655 | !ATARIHW_PRESENT(EXTD_DMA) && m68k_num_memory > 1) { | 650 | !ATARIHW_PRESENT(EXTD_DMA) && m68k_num_memory > 1) { |
656 | atari_dma_buffer = atari_stram_alloc(STRAM_BUFFER_SIZE, "SCSI"); | 651 | atari_dma_buffer = atari_stram_alloc(STRAM_BUFFER_SIZE, "SCSI"); |
657 | if (!atari_dma_buffer) { | 652 | if (!atari_dma_buffer) { |
658 | printk( KERN_ERR "atari_scsi_detect: can't allocate ST-RAM " | 653 | printk(KERN_ERR "atari_scsi_detect: can't allocate ST-RAM " |
659 | "double buffer\n" ); | 654 | "double buffer\n"); |
660 | return( 0 ); | 655 | return 0; |
661 | } | 656 | } |
662 | atari_dma_phys_buffer = virt_to_phys( atari_dma_buffer ); | 657 | atari_dma_phys_buffer = virt_to_phys(atari_dma_buffer); |
663 | atari_dma_orig_addr = 0; | 658 | atari_dma_orig_addr = 0; |
664 | } | 659 | } |
665 | #endif | 660 | #endif |
666 | instance = scsi_register (host, sizeof (struct NCR5380_hostdata)); | 661 | instance = scsi_register(host, sizeof(struct NCR5380_hostdata)); |
667 | if(instance == NULL) | 662 | if (instance == NULL) { |
668 | { | ||
669 | atari_stram_free(atari_dma_buffer); | 663 | atari_stram_free(atari_dma_buffer); |
670 | atari_dma_buffer = 0; | 664 | atari_dma_buffer = 0; |
671 | return 0; | 665 | return 0; |
672 | } | 666 | } |
673 | atari_scsi_host = instance; | 667 | atari_scsi_host = instance; |
674 | /* Set irq to 0, to avoid that the mid-level code disables our interrupt | 668 | /* |
675 | * during queue_command calls. This is completely unnecessary, and even | 669 | * Set irq to 0, to avoid that the mid-level code disables our interrupt |
676 | * worse causes bad problems on the Falcon, where the int is shared with | 670 | * during queue_command calls. This is completely unnecessary, and even |
677 | * IDE and floppy! */ | 671 | * worse causes bad problems on the Falcon, where the int is shared with |
672 | * IDE and floppy! | ||
673 | */ | ||
678 | instance->irq = 0; | 674 | instance->irq = 0; |
679 | 675 | ||
680 | #ifdef CONFIG_ATARI_SCSI_RESET_BOOT | 676 | #ifdef CONFIG_ATARI_SCSI_RESET_BOOT |
681 | atari_scsi_reset_boot(); | 677 | atari_scsi_reset_boot(); |
682 | #endif | 678 | #endif |
683 | NCR5380_init (instance, 0); | 679 | NCR5380_init(instance, 0); |
684 | 680 | ||
685 | if (IS_A_TT()) { | 681 | if (IS_A_TT()) { |
686 | 682 | ||
@@ -727,11 +723,10 @@ int atari_scsi_detect (struct scsi_host_template *host) | |||
727 | * the rest data bug is fixed, this can be lowered to 1. | 723 | * the rest data bug is fixed, this can be lowered to 1. |
728 | */ | 724 | */ |
729 | atari_read_overruns = 4; | 725 | atari_read_overruns = 4; |
730 | } | 726 | } |
731 | #endif /*REAL_DMA*/ | 727 | #endif /*REAL_DMA*/ |
732 | } | 728 | } else { /* ! IS_A_TT */ |
733 | else { /* ! IS_A_TT */ | 729 | |
734 | |||
735 | /* Nothing to do for the interrupt: the ST-DMA is initialized | 730 | /* Nothing to do for the interrupt: the ST-DMA is initialized |
736 | * already by atari_init_INTS() | 731 | * already by atari_init_INTS() |
737 | */ | 732 | */ |
@@ -756,23 +751,21 @@ int atari_scsi_detect (struct scsi_host_template *host) | |||
756 | setup_use_tagged_queuing ? "yes" : "no", | 751 | setup_use_tagged_queuing ? "yes" : "no", |
757 | #endif | 752 | #endif |
758 | instance->hostt->this_id ); | 753 | instance->hostt->this_id ); |
759 | NCR5380_print_options (instance); | 754 | NCR5380_print_options(instance); |
760 | printk ("\n"); | 755 | printk("\n"); |
761 | 756 | ||
762 | called = 1; | 757 | called = 1; |
763 | return( 1 ); | 758 | return 1; |
764 | } | 759 | } |
765 | 760 | ||
766 | #ifdef MODULE | 761 | int atari_scsi_release(struct Scsi_Host *sh) |
767 | int atari_scsi_release (struct Scsi_Host *sh) | ||
768 | { | 762 | { |
769 | if (IS_A_TT()) | 763 | if (IS_A_TT()) |
770 | free_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr); | 764 | free_irq(IRQ_TT_MFP_SCSI, scsi_tt_intr); |
771 | if (atari_dma_buffer) | 765 | if (atari_dma_buffer) |
772 | atari_stram_free (atari_dma_buffer); | 766 | atari_stram_free(atari_dma_buffer); |
773 | return 1; | 767 | return 1; |
774 | } | 768 | } |
775 | #endif | ||
776 | 769 | ||
777 | void __init atari_scsi_setup(char *str, int *ints) | 770 | void __init atari_scsi_setup(char *str, int *ints) |
778 | { | 771 | { |
@@ -781,9 +774,9 @@ void __init atari_scsi_setup(char *str, int *ints) | |||
781 | * Defaults depend on TT or Falcon, hostid determined at run time. | 774 | * Defaults depend on TT or Falcon, hostid determined at run time. |
782 | * Negative values mean don't change. | 775 | * Negative values mean don't change. |
783 | */ | 776 | */ |
784 | 777 | ||
785 | if (ints[0] < 1) { | 778 | if (ints[0] < 1) { |
786 | printk( "atari_scsi_setup: no arguments!\n" ); | 779 | printk("atari_scsi_setup: no arguments!\n"); |
787 | return; | 780 | return; |
788 | } | 781 | } |
789 | 782 | ||
@@ -809,7 +802,7 @@ void __init atari_scsi_setup(char *str, int *ints) | |||
809 | if (ints[4] >= 0 && ints[4] <= 7) | 802 | if (ints[4] >= 0 && ints[4] <= 7) |
810 | setup_hostid = ints[4]; | 803 | setup_hostid = ints[4]; |
811 | else if (ints[4] > 7) | 804 | else if (ints[4] > 7) |
812 | printk( "atari_scsi_setup: invalid host ID %d !\n", ints[4] ); | 805 | printk("atari_scsi_setup: invalid host ID %d !\n", ints[4]); |
813 | } | 806 | } |
814 | #ifdef SUPPORT_TAGS | 807 | #ifdef SUPPORT_TAGS |
815 | if (ints[0] >= 5) { | 808 | if (ints[0] >= 5) { |
@@ -821,7 +814,7 @@ void __init atari_scsi_setup(char *str, int *ints) | |||
821 | 814 | ||
822 | int atari_scsi_bus_reset(Scsi_Cmnd *cmd) | 815 | int atari_scsi_bus_reset(Scsi_Cmnd *cmd) |
823 | { | 816 | { |
824 | int rv; | 817 | int rv; |
825 | struct NCR5380_hostdata *hostdata = | 818 | struct NCR5380_hostdata *hostdata = |
826 | (struct NCR5380_hostdata *)cmd->device->host->hostdata; | 819 | (struct NCR5380_hostdata *)cmd->device->host->hostdata; |
827 | 820 | ||
@@ -831,13 +824,12 @@ int atari_scsi_bus_reset(Scsi_Cmnd *cmd) | |||
831 | */ | 824 | */ |
832 | /* And abort a maybe active DMA transfer */ | 825 | /* And abort a maybe active DMA transfer */ |
833 | if (IS_A_TT()) { | 826 | if (IS_A_TT()) { |
834 | atari_turnoff_irq( IRQ_TT_MFP_SCSI ); | 827 | atari_turnoff_irq(IRQ_TT_MFP_SCSI); |
835 | #ifdef REAL_DMA | 828 | #ifdef REAL_DMA |
836 | tt_scsi_dma.dma_ctrl = 0; | 829 | tt_scsi_dma.dma_ctrl = 0; |
837 | #endif /* REAL_DMA */ | 830 | #endif /* REAL_DMA */ |
838 | } | 831 | } else { |
839 | else { | 832 | atari_turnoff_irq(IRQ_MFP_FSCSI); |
840 | atari_turnoff_irq( IRQ_MFP_FSCSI ); | ||
841 | #ifdef REAL_DMA | 833 | #ifdef REAL_DMA |
842 | st_dma.dma_mode_status = 0x90; | 834 | st_dma.dma_mode_status = 0x90; |
843 | atari_dma_active = 0; | 835 | atari_dma_active = 0; |
@@ -849,52 +841,51 @@ int atari_scsi_bus_reset(Scsi_Cmnd *cmd) | |||
849 | 841 | ||
850 | /* Re-enable ints */ | 842 | /* Re-enable ints */ |
851 | if (IS_A_TT()) { | 843 | if (IS_A_TT()) { |
852 | atari_turnon_irq( IRQ_TT_MFP_SCSI ); | 844 | atari_turnon_irq(IRQ_TT_MFP_SCSI); |
853 | } | 845 | } else { |
854 | else { | 846 | atari_turnon_irq(IRQ_MFP_FSCSI); |
855 | atari_turnon_irq( IRQ_MFP_FSCSI ); | ||
856 | } | 847 | } |
857 | if ((rv & SCSI_RESET_ACTION) == SCSI_RESET_SUCCESS) | 848 | if ((rv & SCSI_RESET_ACTION) == SCSI_RESET_SUCCESS) |
858 | falcon_release_lock_if_possible(hostdata); | 849 | falcon_release_lock_if_possible(hostdata); |
859 | 850 | ||
860 | return( rv ); | 851 | return rv; |
861 | } | 852 | } |
862 | 853 | ||
863 | 854 | ||
864 | #ifdef CONFIG_ATARI_SCSI_RESET_BOOT | 855 | #ifdef CONFIG_ATARI_SCSI_RESET_BOOT |
865 | static void __init atari_scsi_reset_boot(void) | 856 | static void __init atari_scsi_reset_boot(void) |
866 | { | 857 | { |
867 | unsigned long end; | 858 | unsigned long end; |
868 | 859 | ||
869 | /* | 860 | /* |
870 | * Do a SCSI reset to clean up the bus during initialization. No messing | 861 | * Do a SCSI reset to clean up the bus during initialization. No messing |
871 | * with the queues, interrupts, or locks necessary here. | 862 | * with the queues, interrupts, or locks necessary here. |
872 | */ | 863 | */ |
873 | 864 | ||
874 | printk( "Atari SCSI: resetting the SCSI bus..." ); | 865 | printk("Atari SCSI: resetting the SCSI bus..."); |
875 | 866 | ||
876 | /* get in phase */ | 867 | /* get in phase */ |
877 | NCR5380_write( TARGET_COMMAND_REG, | 868 | NCR5380_write(TARGET_COMMAND_REG, |
878 | PHASE_SR_TO_TCR( NCR5380_read(STATUS_REG) )); | 869 | PHASE_SR_TO_TCR(NCR5380_read(STATUS_REG))); |
879 | 870 | ||
880 | /* assert RST */ | 871 | /* assert RST */ |
881 | NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST ); | 872 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE | ICR_ASSERT_RST); |
882 | /* The min. reset hold time is 25us, so 40us should be enough */ | 873 | /* The min. reset hold time is 25us, so 40us should be enough */ |
883 | udelay( 50 ); | 874 | udelay(50); |
884 | /* reset RST and interrupt */ | 875 | /* reset RST and interrupt */ |
885 | NCR5380_write( INITIATOR_COMMAND_REG, ICR_BASE ); | 876 | NCR5380_write(INITIATOR_COMMAND_REG, ICR_BASE); |
886 | NCR5380_read( RESET_PARITY_INTERRUPT_REG ); | 877 | NCR5380_read(RESET_PARITY_INTERRUPT_REG); |
887 | 878 | ||
888 | end = jiffies + AFTER_RESET_DELAY; | 879 | end = jiffies + AFTER_RESET_DELAY; |
889 | while (time_before(jiffies, end)) | 880 | while (time_before(jiffies, end)) |
890 | barrier(); | 881 | barrier(); |
891 | 882 | ||
892 | printk( " done\n" ); | 883 | printk(" done\n"); |
893 | } | 884 | } |
894 | #endif | 885 | #endif |
895 | 886 | ||
896 | 887 | ||
897 | const char * atari_scsi_info (struct Scsi_Host *host) | 888 | const char *atari_scsi_info(struct Scsi_Host *host) |
898 | { | 889 | { |
899 | /* atari_scsi_detect() is verbose enough... */ | 890 | /* atari_scsi_detect() is verbose enough... */ |
900 | static const char string[] = "Atari native SCSI"; | 891 | static const char string[] = "Atari native SCSI"; |
@@ -904,10 +895,10 @@ const char * atari_scsi_info (struct Scsi_Host *host) | |||
904 | 895 | ||
905 | #if defined(REAL_DMA) | 896 | #if defined(REAL_DMA) |
906 | 897 | ||
907 | unsigned long atari_scsi_dma_setup( struct Scsi_Host *instance, void *data, | 898 | unsigned long atari_scsi_dma_setup(struct Scsi_Host *instance, void *data, |
908 | unsigned long count, int dir ) | 899 | unsigned long count, int dir) |
909 | { | 900 | { |
910 | unsigned long addr = virt_to_phys( data ); | 901 | unsigned long addr = virt_to_phys(data); |
911 | 902 | ||
912 | DMA_PRINTK("scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, " | 903 | DMA_PRINTK("scsi%d: setting up dma, data = %p, phys = %lx, count = %ld, " |
913 | "dir = %d\n", instance->host_no, data, addr, count, dir); | 904 | "dir = %d\n", instance->host_no, data, addr, count, dir); |
@@ -919,38 +910,37 @@ unsigned long atari_scsi_dma_setup( struct Scsi_Host *instance, void *data, | |||
919 | * wanted address. | 910 | * wanted address. |
920 | */ | 911 | */ |
921 | if (dir) | 912 | if (dir) |
922 | memcpy( atari_dma_buffer, data, count ); | 913 | memcpy(atari_dma_buffer, data, count); |
923 | else | 914 | else |
924 | atari_dma_orig_addr = data; | 915 | atari_dma_orig_addr = data; |
925 | addr = atari_dma_phys_buffer; | 916 | addr = atari_dma_phys_buffer; |
926 | } | 917 | } |
927 | 918 | ||
928 | atari_dma_startaddr = addr; /* Needed for calculating residual later. */ | 919 | atari_dma_startaddr = addr; /* Needed for calculating residual later. */ |
929 | 920 | ||
930 | /* Cache cleanup stuff: On writes, push any dirty cache out before sending | 921 | /* Cache cleanup stuff: On writes, push any dirty cache out before sending |
931 | * it to the peripheral. (Must be done before DMA setup, since at least | 922 | * it to the peripheral. (Must be done before DMA setup, since at least |
932 | * the ST-DMA begins to fill internal buffers right after setup. For | 923 | * the ST-DMA begins to fill internal buffers right after setup. For |
933 | * reads, invalidate any cache, may be altered after DMA without CPU | 924 | * reads, invalidate any cache, may be altered after DMA without CPU |
934 | * knowledge. | 925 | * knowledge. |
935 | * | 926 | * |
936 | * ++roman: For the Medusa, there's no need at all for that cache stuff, | 927 | * ++roman: For the Medusa, there's no need at all for that cache stuff, |
937 | * because the hardware does bus snooping (fine!). | 928 | * because the hardware does bus snooping (fine!). |
938 | */ | 929 | */ |
939 | dma_cache_maintenance( addr, count, dir ); | 930 | dma_cache_maintenance(addr, count, dir); |
940 | 931 | ||
941 | if (count == 0) | 932 | if (count == 0) |
942 | printk(KERN_NOTICE "SCSI warning: DMA programmed for 0 bytes !\n"); | 933 | printk(KERN_NOTICE "SCSI warning: DMA programmed for 0 bytes !\n"); |
943 | 934 | ||
944 | if (IS_A_TT()) { | 935 | if (IS_A_TT()) { |
945 | tt_scsi_dma.dma_ctrl = dir; | 936 | tt_scsi_dma.dma_ctrl = dir; |
946 | SCSI_DMA_WRITE_P( dma_addr, addr ); | 937 | SCSI_DMA_WRITE_P(dma_addr, addr); |
947 | SCSI_DMA_WRITE_P( dma_cnt, count ); | 938 | SCSI_DMA_WRITE_P(dma_cnt, count); |
948 | tt_scsi_dma.dma_ctrl = dir | 2; | 939 | tt_scsi_dma.dma_ctrl = dir | 2; |
949 | } | 940 | } else { /* ! IS_A_TT */ |
950 | else { /* ! IS_A_TT */ | 941 | |
951 | |||
952 | /* set address */ | 942 | /* set address */ |
953 | SCSI_DMA_SETADR( addr ); | 943 | SCSI_DMA_SETADR(addr); |
954 | 944 | ||
955 | /* toggle direction bit to clear FIFO and set DMA direction */ | 945 | /* toggle direction bit to clear FIFO and set DMA direction */ |
956 | dir <<= 8; | 946 | dir <<= 8; |
@@ -968,13 +958,13 @@ unsigned long atari_scsi_dma_setup( struct Scsi_Host *instance, void *data, | |||
968 | atari_dma_active = 1; | 958 | atari_dma_active = 1; |
969 | } | 959 | } |
970 | 960 | ||
971 | return( count ); | 961 | return count; |
972 | } | 962 | } |
973 | 963 | ||
974 | 964 | ||
975 | static long atari_scsi_dma_residual( struct Scsi_Host *instance ) | 965 | static long atari_scsi_dma_residual(struct Scsi_Host *instance) |
976 | { | 966 | { |
977 | return( atari_dma_residual ); | 967 | return atari_dma_residual; |
978 | } | 968 | } |
979 | 969 | ||
980 | 970 | ||
@@ -982,13 +972,13 @@ static long atari_scsi_dma_residual( struct Scsi_Host *instance ) | |||
982 | #define CMD_SURELY_BYTE_MODE 1 | 972 | #define CMD_SURELY_BYTE_MODE 1 |
983 | #define CMD_MODE_UNKNOWN 2 | 973 | #define CMD_MODE_UNKNOWN 2 |
984 | 974 | ||
985 | static int falcon_classify_cmd( Scsi_Cmnd *cmd ) | 975 | static int falcon_classify_cmd(Scsi_Cmnd *cmd) |
986 | { | 976 | { |
987 | unsigned char opcode = cmd->cmnd[0]; | 977 | unsigned char opcode = cmd->cmnd[0]; |
988 | 978 | ||
989 | if (opcode == READ_DEFECT_DATA || opcode == READ_LONG || | 979 | if (opcode == READ_DEFECT_DATA || opcode == READ_LONG || |
990 | opcode == READ_BUFFER) | 980 | opcode == READ_BUFFER) |
991 | return( CMD_SURELY_BYTE_MODE ); | 981 | return CMD_SURELY_BYTE_MODE; |
992 | else if (opcode == READ_6 || opcode == READ_10 || | 982 | else if (opcode == READ_6 || opcode == READ_10 || |
993 | opcode == 0xa8 /* READ_12 */ || opcode == READ_REVERSE || | 983 | opcode == 0xa8 /* READ_12 */ || opcode == READ_REVERSE || |
994 | opcode == RECOVER_BUFFERED_DATA) { | 984 | opcode == RECOVER_BUFFERED_DATA) { |
@@ -996,12 +986,11 @@ static int falcon_classify_cmd( Scsi_Cmnd *cmd ) | |||
996 | * needed here: The transfer is block-mode only if the 'fixed' bit is | 986 | * needed here: The transfer is block-mode only if the 'fixed' bit is |
997 | * set! */ | 987 | * set! */ |
998 | if (cmd->device->type == TYPE_TAPE && !(cmd->cmnd[1] & 1)) | 988 | if (cmd->device->type == TYPE_TAPE && !(cmd->cmnd[1] & 1)) |
999 | return( CMD_SURELY_BYTE_MODE ); | 989 | return CMD_SURELY_BYTE_MODE; |
1000 | else | 990 | else |
1001 | return( CMD_SURELY_BLOCK_MODE ); | 991 | return CMD_SURELY_BLOCK_MODE; |
1002 | } | 992 | } else |
1003 | else | 993 | return CMD_MODE_UNKNOWN; |
1004 | return( CMD_MODE_UNKNOWN ); | ||
1005 | } | 994 | } |
1006 | 995 | ||
1007 | 996 | ||
@@ -1014,19 +1003,18 @@ static int falcon_classify_cmd( Scsi_Cmnd *cmd ) | |||
1014 | * the overrun problem, so this question is academic :-) | 1003 | * the overrun problem, so this question is academic :-) |
1015 | */ | 1004 | */ |
1016 | 1005 | ||
1017 | static unsigned long atari_dma_xfer_len( unsigned long wanted_len, | 1006 | static unsigned long atari_dma_xfer_len(unsigned long wanted_len, |
1018 | Scsi_Cmnd *cmd, | 1007 | Scsi_Cmnd *cmd, int write_flag) |
1019 | int write_flag ) | ||
1020 | { | 1008 | { |
1021 | unsigned long possible_len, limit; | 1009 | unsigned long possible_len, limit; |
1022 | #ifndef CONFIG_TT_DMA_EMUL | 1010 | #ifndef CONFIG_TT_DMA_EMUL |
1023 | if (MACH_IS_HADES) | 1011 | if (MACH_IS_HADES) |
1024 | /* Hades has no SCSI DMA at all :-( Always force use of PIO */ | 1012 | /* Hades has no SCSI DMA at all :-( Always force use of PIO */ |
1025 | return( 0 ); | 1013 | return 0; |
1026 | #endif | 1014 | #endif |
1027 | if (IS_A_TT()) | 1015 | if (IS_A_TT()) |
1028 | /* TT SCSI DMA can transfer arbitrary #bytes */ | 1016 | /* TT SCSI DMA can transfer arbitrary #bytes */ |
1029 | return( wanted_len ); | 1017 | return wanted_len; |
1030 | 1018 | ||
1031 | /* ST DMA chip is stupid -- only multiples of 512 bytes! (and max. | 1019 | /* ST DMA chip is stupid -- only multiples of 512 bytes! (and max. |
1032 | * 255*512 bytes, but this should be enough) | 1020 | * 255*512 bytes, but this should be enough) |
@@ -1062,8 +1050,7 @@ static unsigned long atari_dma_xfer_len( unsigned long wanted_len, | |||
1062 | * this). | 1050 | * this). |
1063 | */ | 1051 | */ |
1064 | possible_len = wanted_len; | 1052 | possible_len = wanted_len; |
1065 | } | 1053 | } else { |
1066 | else { | ||
1067 | /* Read operations: if the wanted transfer length is not a multiple of | 1054 | /* Read operations: if the wanted transfer length is not a multiple of |
1068 | * 512, we cannot use DMA, since the ST-DMA cannot split transfers | 1055 | * 512, we cannot use DMA, since the ST-DMA cannot split transfers |
1069 | * (no interrupt on DMA finished!) | 1056 | * (no interrupt on DMA finished!) |
@@ -1073,15 +1060,15 @@ static unsigned long atari_dma_xfer_len( unsigned long wanted_len, | |||
1073 | else { | 1060 | else { |
1074 | /* Now classify the command (see above) and decide whether it is | 1061 | /* Now classify the command (see above) and decide whether it is |
1075 | * allowed to do DMA at all */ | 1062 | * allowed to do DMA at all */ |
1076 | switch( falcon_classify_cmd( cmd )) { | 1063 | switch (falcon_classify_cmd(cmd)) { |
1077 | case CMD_SURELY_BLOCK_MODE: | 1064 | case CMD_SURELY_BLOCK_MODE: |
1078 | possible_len = wanted_len; | 1065 | possible_len = wanted_len; |
1079 | break; | 1066 | break; |
1080 | case CMD_SURELY_BYTE_MODE: | 1067 | case CMD_SURELY_BYTE_MODE: |
1081 | possible_len = 0; /* DMA prohibited */ | 1068 | possible_len = 0; /* DMA prohibited */ |
1082 | break; | 1069 | break; |
1083 | case CMD_MODE_UNKNOWN: | 1070 | case CMD_MODE_UNKNOWN: |
1084 | default: | 1071 | default: |
1085 | /* For unknown commands assume block transfers if the transfer | 1072 | /* For unknown commands assume block transfers if the transfer |
1086 | * size/allocation length is >= 1024 */ | 1073 | * size/allocation length is >= 1024 */ |
1087 | possible_len = (wanted_len < 1024) ? 0 : wanted_len; | 1074 | possible_len = (wanted_len < 1024) ? 0 : wanted_len; |
@@ -1089,9 +1076,9 @@ static unsigned long atari_dma_xfer_len( unsigned long wanted_len, | |||
1089 | } | 1076 | } |
1090 | } | 1077 | } |
1091 | } | 1078 | } |
1092 | 1079 | ||
1093 | /* Last step: apply the hard limit on DMA transfers */ | 1080 | /* Last step: apply the hard limit on DMA transfers */ |
1094 | limit = (atari_dma_buffer && !STRAM_ADDR( virt_to_phys(cmd->SCp.ptr) )) ? | 1081 | limit = (atari_dma_buffer && !STRAM_ADDR(virt_to_phys(cmd->SCp.ptr))) ? |
1095 | STRAM_BUFFER_SIZE : 255*512; | 1082 | STRAM_BUFFER_SIZE : 255*512; |
1096 | if (possible_len > limit) | 1083 | if (possible_len > limit) |
1097 | possible_len = limit; | 1084 | possible_len = limit; |
@@ -1100,7 +1087,7 @@ static unsigned long atari_dma_xfer_len( unsigned long wanted_len, | |||
1100 | DMA_PRINTK("Sorry, must cut DMA transfer size to %ld bytes " | 1087 | DMA_PRINTK("Sorry, must cut DMA transfer size to %ld bytes " |
1101 | "instead of %ld\n", possible_len, wanted_len); | 1088 | "instead of %ld\n", possible_len, wanted_len); |
1102 | 1089 | ||
1103 | return( possible_len ); | 1090 | return possible_len; |
1104 | } | 1091 | } |
1105 | 1092 | ||
1106 | 1093 | ||
@@ -1114,23 +1101,23 @@ static unsigned long atari_dma_xfer_len( unsigned long wanted_len, | |||
1114 | * NCR5380_write call these functions via function pointers. | 1101 | * NCR5380_write call these functions via function pointers. |
1115 | */ | 1102 | */ |
1116 | 1103 | ||
1117 | static unsigned char atari_scsi_tt_reg_read( unsigned char reg ) | 1104 | static unsigned char atari_scsi_tt_reg_read(unsigned char reg) |
1118 | { | 1105 | { |
1119 | return( tt_scsi_regp[reg * 2] ); | 1106 | return tt_scsi_regp[reg * 2]; |
1120 | } | 1107 | } |
1121 | 1108 | ||
1122 | static void atari_scsi_tt_reg_write( unsigned char reg, unsigned char value ) | 1109 | static void atari_scsi_tt_reg_write(unsigned char reg, unsigned char value) |
1123 | { | 1110 | { |
1124 | tt_scsi_regp[reg * 2] = value; | 1111 | tt_scsi_regp[reg * 2] = value; |
1125 | } | 1112 | } |
1126 | 1113 | ||
1127 | static unsigned char atari_scsi_falcon_reg_read( unsigned char reg ) | 1114 | static unsigned char atari_scsi_falcon_reg_read(unsigned char reg) |
1128 | { | 1115 | { |
1129 | dma_wd.dma_mode_status= (u_short)(0x88 + reg); | 1116 | dma_wd.dma_mode_status= (u_short)(0x88 + reg); |
1130 | return( (u_char)dma_wd.fdc_acces_seccount ); | 1117 | return (u_char)dma_wd.fdc_acces_seccount; |
1131 | } | 1118 | } |
1132 | 1119 | ||
1133 | static void atari_scsi_falcon_reg_write( unsigned char reg, unsigned char value ) | 1120 | static void atari_scsi_falcon_reg_write(unsigned char reg, unsigned char value) |
1134 | { | 1121 | { |
1135 | dma_wd.dma_mode_status = (u_short)(0x88 + reg); | 1122 | dma_wd.dma_mode_status = (u_short)(0x88 + reg); |
1136 | dma_wd.fdc_acces_seccount = (u_short)value; | 1123 | dma_wd.fdc_acces_seccount = (u_short)value; |
diff --git a/drivers/scsi/atari_scsi.h b/drivers/scsi/atari_scsi.h index f917bdd09b41..efadb8d567c2 100644 --- a/drivers/scsi/atari_scsi.h +++ b/drivers/scsi/atari_scsi.h | |||
@@ -21,11 +21,7 @@ | |||
21 | int atari_scsi_detect (struct scsi_host_template *); | 21 | int atari_scsi_detect (struct scsi_host_template *); |
22 | const char *atari_scsi_info (struct Scsi_Host *); | 22 | const char *atari_scsi_info (struct Scsi_Host *); |
23 | int atari_scsi_reset (Scsi_Cmnd *, unsigned int); | 23 | int atari_scsi_reset (Scsi_Cmnd *, unsigned int); |
24 | #ifdef MODULE | ||
25 | int atari_scsi_release (struct Scsi_Host *); | 24 | int atari_scsi_release (struct Scsi_Host *); |
26 | #else | ||
27 | #define atari_scsi_release NULL | ||
28 | #endif | ||
29 | 25 | ||
30 | /* The values for CMD_PER_LUN and CAN_QUEUE are somehow arbitrary. Higher | 26 | /* The values for CMD_PER_LUN and CAN_QUEUE are somehow arbitrary. Higher |
31 | * values should work, too; try it! (but cmd_per_lun costs memory!) */ | 27 | * values should work, too; try it! (but cmd_per_lun costs memory!) */ |
@@ -63,6 +59,32 @@ int atari_scsi_release (struct Scsi_Host *); | |||
63 | #define NCR5380_dma_xfer_len(i,cmd,phase) \ | 59 | #define NCR5380_dma_xfer_len(i,cmd,phase) \ |
64 | atari_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1) | 60 | atari_dma_xfer_len(cmd->SCp.this_residual,cmd,((phase) & SR_IO) ? 0 : 1) |
65 | 61 | ||
62 | /* former generic SCSI error handling stuff */ | ||
63 | |||
64 | #define SCSI_ABORT_SNOOZE 0 | ||
65 | #define SCSI_ABORT_SUCCESS 1 | ||
66 | #define SCSI_ABORT_PENDING 2 | ||
67 | #define SCSI_ABORT_BUSY 3 | ||
68 | #define SCSI_ABORT_NOT_RUNNING 4 | ||
69 | #define SCSI_ABORT_ERROR 5 | ||
70 | |||
71 | #define SCSI_RESET_SNOOZE 0 | ||
72 | #define SCSI_RESET_PUNT 1 | ||
73 | #define SCSI_RESET_SUCCESS 2 | ||
74 | #define SCSI_RESET_PENDING 3 | ||
75 | #define SCSI_RESET_WAKEUP 4 | ||
76 | #define SCSI_RESET_NOT_RUNNING 5 | ||
77 | #define SCSI_RESET_ERROR 6 | ||
78 | |||
79 | #define SCSI_RESET_SYNCHRONOUS 0x01 | ||
80 | #define SCSI_RESET_ASYNCHRONOUS 0x02 | ||
81 | #define SCSI_RESET_SUGGEST_BUS_RESET 0x04 | ||
82 | #define SCSI_RESET_SUGGEST_HOST_RESET 0x08 | ||
83 | |||
84 | #define SCSI_RESET_BUS_RESET 0x100 | ||
85 | #define SCSI_RESET_HOST_RESET 0x200 | ||
86 | #define SCSI_RESET_ACTION 0xff | ||
87 | |||
66 | /* Debugging printk definitions: | 88 | /* Debugging printk definitions: |
67 | * | 89 | * |
68 | * ARB -> arbitration | 90 | * ARB -> arbitration |
@@ -91,144 +113,58 @@ int atari_scsi_release (struct Scsi_Host *); | |||
91 | * | 113 | * |
92 | */ | 114 | */ |
93 | 115 | ||
94 | #if NDEBUG & NDEBUG_ARBITRATION | 116 | #define dprint(flg, format...) \ |
117 | ({ \ | ||
118 | if (NDEBUG & (flg)) \ | ||
119 | printk(KERN_DEBUG format); \ | ||
120 | }) | ||
121 | |||
95 | #define ARB_PRINTK(format, args...) \ | 122 | #define ARB_PRINTK(format, args...) \ |
96 | printk(KERN_DEBUG format , ## args) | 123 | dprint(NDEBUG_ARBITRATION, format , ## args) |
97 | #else | ||
98 | #define ARB_PRINTK(format, args...) | ||
99 | #endif | ||
100 | #if NDEBUG & NDEBUG_AUTOSENSE | ||
101 | #define ASEN_PRINTK(format, args...) \ | 124 | #define ASEN_PRINTK(format, args...) \ |
102 | printk(KERN_DEBUG format , ## args) | 125 | dprint(NDEBUG_AUTOSENSE, format , ## args) |
103 | #else | ||
104 | #define ASEN_PRINTK(format, args...) | ||
105 | #endif | ||
106 | #if NDEBUG & NDEBUG_DMA | ||
107 | #define DMA_PRINTK(format, args...) \ | 126 | #define DMA_PRINTK(format, args...) \ |
108 | printk(KERN_DEBUG format , ## args) | 127 | dprint(NDEBUG_DMA, format , ## args) |
109 | #else | ||
110 | #define DMA_PRINTK(format, args...) | ||
111 | #endif | ||
112 | #if NDEBUG & NDEBUG_HANDSHAKE | ||
113 | #define HSH_PRINTK(format, args...) \ | 128 | #define HSH_PRINTK(format, args...) \ |
114 | printk(KERN_DEBUG format , ## args) | 129 | dprint(NDEBUG_HANDSHAKE, format , ## args) |
115 | #else | ||
116 | #define HSH_PRINTK(format, args...) | ||
117 | #endif | ||
118 | #if NDEBUG & NDEBUG_INFORMATION | ||
119 | #define INF_PRINTK(format, args...) \ | 130 | #define INF_PRINTK(format, args...) \ |
120 | printk(KERN_DEBUG format , ## args) | 131 | dprint(NDEBUG_INFORMATION, format , ## args) |
121 | #else | ||
122 | #define INF_PRINTK(format, args...) | ||
123 | #endif | ||
124 | #if NDEBUG & NDEBUG_INIT | ||
125 | #define INI_PRINTK(format, args...) \ | 132 | #define INI_PRINTK(format, args...) \ |
126 | printk(KERN_DEBUG format , ## args) | 133 | dprint(NDEBUG_INIT, format , ## args) |
127 | #else | ||
128 | #define INI_PRINTK(format, args...) | ||
129 | #endif | ||
130 | #if NDEBUG & NDEBUG_INTR | ||
131 | #define INT_PRINTK(format, args...) \ | 134 | #define INT_PRINTK(format, args...) \ |
132 | printk(KERN_DEBUG format , ## args) | 135 | dprint(NDEBUG_INTR, format , ## args) |
133 | #else | ||
134 | #define INT_PRINTK(format, args...) | ||
135 | #endif | ||
136 | #if NDEBUG & NDEBUG_LINKED | ||
137 | #define LNK_PRINTK(format, args...) \ | 136 | #define LNK_PRINTK(format, args...) \ |
138 | printk(KERN_DEBUG format , ## args) | 137 | dprint(NDEBUG_LINKED, format , ## args) |
139 | #else | ||
140 | #define LNK_PRINTK(format, args...) | ||
141 | #endif | ||
142 | #if NDEBUG & NDEBUG_MAIN | ||
143 | #define MAIN_PRINTK(format, args...) \ | 138 | #define MAIN_PRINTK(format, args...) \ |
144 | printk(KERN_DEBUG format , ## args) | 139 | dprint(NDEBUG_MAIN, format , ## args) |
145 | #else | ||
146 | #define MAIN_PRINTK(format, args...) | ||
147 | #endif | ||
148 | #if NDEBUG & NDEBUG_NO_DATAOUT | ||
149 | #define NDAT_PRINTK(format, args...) \ | 140 | #define NDAT_PRINTK(format, args...) \ |
150 | printk(KERN_DEBUG format , ## args) | 141 | dprint(NDEBUG_NO_DATAOUT, format , ## args) |
151 | #else | ||
152 | #define NDAT_PRINTK(format, args...) | ||
153 | #endif | ||
154 | #if NDEBUG & NDEBUG_NO_WRITE | ||
155 | #define NWR_PRINTK(format, args...) \ | 142 | #define NWR_PRINTK(format, args...) \ |
156 | printk(KERN_DEBUG format , ## args) | 143 | dprint(NDEBUG_NO_WRITE, format , ## args) |
157 | #else | ||
158 | #define NWR_PRINTK(format, args...) | ||
159 | #endif | ||
160 | #if NDEBUG & NDEBUG_PIO | ||
161 | #define PIO_PRINTK(format, args...) \ | 144 | #define PIO_PRINTK(format, args...) \ |
162 | printk(KERN_DEBUG format , ## args) | 145 | dprint(NDEBUG_PIO, format , ## args) |
163 | #else | ||
164 | #define PIO_PRINTK(format, args...) | ||
165 | #endif | ||
166 | #if NDEBUG & NDEBUG_PSEUDO_DMA | ||
167 | #define PDMA_PRINTK(format, args...) \ | 146 | #define PDMA_PRINTK(format, args...) \ |
168 | printk(KERN_DEBUG format , ## args) | 147 | dprint(NDEBUG_PSEUDO_DMA, format , ## args) |
169 | #else | ||
170 | #define PDMA_PRINTK(format, args...) | ||
171 | #endif | ||
172 | #if NDEBUG & NDEBUG_QUEUES | ||
173 | #define QU_PRINTK(format, args...) \ | 148 | #define QU_PRINTK(format, args...) \ |
174 | printk(KERN_DEBUG format , ## args) | 149 | dprint(NDEBUG_QUEUES, format , ## args) |
175 | #else | ||
176 | #define QU_PRINTK(format, args...) | ||
177 | #endif | ||
178 | #if NDEBUG & NDEBUG_RESELECTION | ||
179 | #define RSL_PRINTK(format, args...) \ | 150 | #define RSL_PRINTK(format, args...) \ |
180 | printk(KERN_DEBUG format , ## args) | 151 | dprint(NDEBUG_RESELECTION, format , ## args) |
181 | #else | ||
182 | #define RSL_PRINTK(format, args...) | ||
183 | #endif | ||
184 | #if NDEBUG & NDEBUG_SELECTION | ||
185 | #define SEL_PRINTK(format, args...) \ | 152 | #define SEL_PRINTK(format, args...) \ |
186 | printk(KERN_DEBUG format , ## args) | 153 | dprint(NDEBUG_SELECTION, format , ## args) |
187 | #else | ||
188 | #define SEL_PRINTK(format, args...) | ||
189 | #endif | ||
190 | #if NDEBUG & NDEBUG_USLEEP | ||
191 | #define USL_PRINTK(format, args...) \ | 154 | #define USL_PRINTK(format, args...) \ |
192 | printk(KERN_DEBUG format , ## args) | 155 | dprint(NDEBUG_USLEEP, format , ## args) |
193 | #else | ||
194 | #define USL_PRINTK(format, args...) | ||
195 | #endif | ||
196 | #if NDEBUG & NDEBUG_LAST_BYTE_SENT | ||
197 | #define LBS_PRINTK(format, args...) \ | 156 | #define LBS_PRINTK(format, args...) \ |
198 | printk(KERN_DEBUG format , ## args) | 157 | dprint(NDEBUG_LAST_BYTE_SENT, format , ## args) |
199 | #else | ||
200 | #define LBS_PRINTK(format, args...) | ||
201 | #endif | ||
202 | #if NDEBUG & NDEBUG_RESTART_SELECT | ||
203 | #define RSS_PRINTK(format, args...) \ | 158 | #define RSS_PRINTK(format, args...) \ |
204 | printk(KERN_DEBUG format , ## args) | 159 | dprint(NDEBUG_RESTART_SELECT, format , ## args) |
205 | #else | ||
206 | #define RSS_PRINTK(format, args...) | ||
207 | #endif | ||
208 | #if NDEBUG & NDEBUG_EXTENDED | ||
209 | #define EXT_PRINTK(format, args...) \ | 160 | #define EXT_PRINTK(format, args...) \ |
210 | printk(KERN_DEBUG format , ## args) | 161 | dprint(NDEBUG_EXTENDED, format , ## args) |
211 | #else | ||
212 | #define EXT_PRINTK(format, args...) | ||
213 | #endif | ||
214 | #if NDEBUG & NDEBUG_ABORT | ||
215 | #define ABRT_PRINTK(format, args...) \ | 162 | #define ABRT_PRINTK(format, args...) \ |
216 | printk(KERN_DEBUG format , ## args) | 163 | dprint(NDEBUG_ABORT, format , ## args) |
217 | #else | ||
218 | #define ABRT_PRINTK(format, args...) | ||
219 | #endif | ||
220 | #if NDEBUG & NDEBUG_TAGS | ||
221 | #define TAG_PRINTK(format, args...) \ | 164 | #define TAG_PRINTK(format, args...) \ |
222 | printk(KERN_DEBUG format , ## args) | 165 | dprint(NDEBUG_TAGS, format , ## args) |
223 | #else | ||
224 | #define TAG_PRINTK(format, args...) | ||
225 | #endif | ||
226 | #if NDEBUG & NDEBUG_MERGING | ||
227 | #define MER_PRINTK(format, args...) \ | 166 | #define MER_PRINTK(format, args...) \ |
228 | printk(KERN_DEBUG format , ## args) | 167 | dprint(NDEBUG_MERGING, format , ## args) |
229 | #else | ||
230 | #define MER_PRINTK(format, args...) | ||
231 | #endif | ||
232 | 168 | ||
233 | /* conditional macros for NCR5380_print_{,phase,status} */ | 169 | /* conditional macros for NCR5380_print_{,phase,status} */ |
234 | 170 | ||
diff --git a/drivers/scsi/esp.c b/drivers/scsi/esp.c deleted file mode 100644 index 2c2fe80bc42a..000000000000 --- a/drivers/scsi/esp.c +++ /dev/null | |||
@@ -1,4394 +0,0 @@ | |||
1 | /* esp.c: ESP Sun SCSI driver. | ||
2 | * | ||
3 | * Copyright (C) 1995, 1998, 2006 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | /* TODO: | ||
7 | * | ||
8 | * 1) Maybe disable parity checking in config register one for SCSI1 | ||
9 | * targets. (Gilmore says parity error on the SBus can lock up | ||
10 | * old sun4c's) | ||
11 | * 2) Add support for DMA2 pipelining. | ||
12 | * 3) Add tagged queueing. | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/string.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/blkdev.h> | ||
21 | #include <linux/proc_fs.h> | ||
22 | #include <linux/stat.h> | ||
23 | #include <linux/init.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/module.h> | ||
27 | |||
28 | #include "esp.h" | ||
29 | |||
30 | #include <asm/sbus.h> | ||
31 | #include <asm/dma.h> | ||
32 | #include <asm/system.h> | ||
33 | #include <asm/ptrace.h> | ||
34 | #include <asm/pgtable.h> | ||
35 | #include <asm/oplib.h> | ||
36 | #include <asm/io.h> | ||
37 | #include <asm/irq.h> | ||
38 | #ifndef __sparc_v9__ | ||
39 | #include <asm/machines.h> | ||
40 | #include <asm/idprom.h> | ||
41 | #endif | ||
42 | |||
43 | #include <scsi/scsi.h> | ||
44 | #include <scsi/scsi_cmnd.h> | ||
45 | #include <scsi/scsi_device.h> | ||
46 | #include <scsi/scsi_eh.h> | ||
47 | #include <scsi/scsi_host.h> | ||
48 | #include <scsi/scsi_tcq.h> | ||
49 | |||
50 | #define DRV_VERSION "1.101" | ||
51 | |||
52 | #define DEBUG_ESP | ||
53 | /* #define DEBUG_ESP_HME */ | ||
54 | /* #define DEBUG_ESP_DATA */ | ||
55 | /* #define DEBUG_ESP_QUEUE */ | ||
56 | /* #define DEBUG_ESP_DISCONNECT */ | ||
57 | /* #define DEBUG_ESP_STATUS */ | ||
58 | /* #define DEBUG_ESP_PHASES */ | ||
59 | /* #define DEBUG_ESP_WORKBUS */ | ||
60 | /* #define DEBUG_STATE_MACHINE */ | ||
61 | /* #define DEBUG_ESP_CMDS */ | ||
62 | /* #define DEBUG_ESP_IRQS */ | ||
63 | /* #define DEBUG_SDTR */ | ||
64 | /* #define DEBUG_ESP_SG */ | ||
65 | |||
66 | /* Use the following to sprinkle debugging messages in a way which | ||
67 | * suits you if combinations of the above become too verbose when | ||
68 | * trying to track down a specific problem. | ||
69 | */ | ||
70 | /* #define DEBUG_ESP_MISC */ | ||
71 | |||
72 | #if defined(DEBUG_ESP) | ||
73 | #define ESPLOG(foo) printk foo | ||
74 | #else | ||
75 | #define ESPLOG(foo) | ||
76 | #endif /* (DEBUG_ESP) */ | ||
77 | |||
78 | #if defined(DEBUG_ESP_HME) | ||
79 | #define ESPHME(foo) printk foo | ||
80 | #else | ||
81 | #define ESPHME(foo) | ||
82 | #endif | ||
83 | |||
84 | #if defined(DEBUG_ESP_DATA) | ||
85 | #define ESPDATA(foo) printk foo | ||
86 | #else | ||
87 | #define ESPDATA(foo) | ||
88 | #endif | ||
89 | |||
90 | #if defined(DEBUG_ESP_QUEUE) | ||
91 | #define ESPQUEUE(foo) printk foo | ||
92 | #else | ||
93 | #define ESPQUEUE(foo) | ||
94 | #endif | ||
95 | |||
96 | #if defined(DEBUG_ESP_DISCONNECT) | ||
97 | #define ESPDISC(foo) printk foo | ||
98 | #else | ||
99 | #define ESPDISC(foo) | ||
100 | #endif | ||
101 | |||
102 | #if defined(DEBUG_ESP_STATUS) | ||
103 | #define ESPSTAT(foo) printk foo | ||
104 | #else | ||
105 | #define ESPSTAT(foo) | ||
106 | #endif | ||
107 | |||
108 | #if defined(DEBUG_ESP_PHASES) | ||
109 | #define ESPPHASE(foo) printk foo | ||
110 | #else | ||
111 | #define ESPPHASE(foo) | ||
112 | #endif | ||
113 | |||
114 | #if defined(DEBUG_ESP_WORKBUS) | ||
115 | #define ESPBUS(foo) printk foo | ||
116 | #else | ||
117 | #define ESPBUS(foo) | ||
118 | #endif | ||
119 | |||
120 | #if defined(DEBUG_ESP_IRQS) | ||
121 | #define ESPIRQ(foo) printk foo | ||
122 | #else | ||
123 | #define ESPIRQ(foo) | ||
124 | #endif | ||
125 | |||
126 | #if defined(DEBUG_SDTR) | ||
127 | #define ESPSDTR(foo) printk foo | ||
128 | #else | ||
129 | #define ESPSDTR(foo) | ||
130 | #endif | ||
131 | |||
132 | #if defined(DEBUG_ESP_MISC) | ||
133 | #define ESPMISC(foo) printk foo | ||
134 | #else | ||
135 | #define ESPMISC(foo) | ||
136 | #endif | ||
137 | |||
138 | /* Command phase enumeration. */ | ||
139 | enum { | ||
140 | not_issued = 0x00, /* Still in the issue_SC queue. */ | ||
141 | |||
142 | /* Various forms of selecting a target. */ | ||
143 | #define in_slct_mask 0x10 | ||
144 | in_slct_norm = 0x10, /* ESP is arbitrating, normal selection */ | ||
145 | in_slct_stop = 0x11, /* ESP will select, then stop with IRQ */ | ||
146 | in_slct_msg = 0x12, /* select, then send a message */ | ||
147 | in_slct_tag = 0x13, /* select and send tagged queue msg */ | ||
148 | in_slct_sneg = 0x14, /* select and acquire sync capabilities */ | ||
149 | |||
150 | /* Any post selection activity. */ | ||
151 | #define in_phases_mask 0x20 | ||
152 | in_datain = 0x20, /* Data is transferring from the bus */ | ||
153 | in_dataout = 0x21, /* Data is transferring to the bus */ | ||
154 | in_data_done = 0x22, /* Last DMA data operation done (maybe) */ | ||
155 | in_msgin = 0x23, /* Eating message from target */ | ||
156 | in_msgincont = 0x24, /* Eating more msg bytes from target */ | ||
157 | in_msgindone = 0x25, /* Decide what to do with what we got */ | ||
158 | in_msgout = 0x26, /* Sending message to target */ | ||
159 | in_msgoutdone = 0x27, /* Done sending msg out */ | ||
160 | in_cmdbegin = 0x28, /* Sending cmd after abnormal selection */ | ||
161 | in_cmdend = 0x29, /* Done sending slow cmd */ | ||
162 | in_status = 0x2a, /* Was in status phase, finishing cmd */ | ||
163 | in_freeing = 0x2b, /* freeing the bus for cmd cmplt or disc */ | ||
164 | in_the_dark = 0x2c, /* Don't know what bus phase we are in */ | ||
165 | |||
166 | /* Special states, ie. not normal bus transitions... */ | ||
167 | #define in_spec_mask 0x80 | ||
168 | in_abortone = 0x80, /* Aborting one command currently */ | ||
169 | in_abortall = 0x81, /* Blowing away all commands we have */ | ||
170 | in_resetdev = 0x82, /* SCSI target reset in progress */ | ||
171 | in_resetbus = 0x83, /* SCSI bus reset in progress */ | ||
172 | in_tgterror = 0x84, /* Target did something stupid */ | ||
173 | }; | ||
174 | |||
175 | enum { | ||
176 | /* Zero has special meaning, see skipahead[12]. */ | ||
177 | /*0*/ do_never, | ||
178 | |||
179 | /*1*/ do_phase_determine, | ||
180 | /*2*/ do_reset_bus, | ||
181 | /*3*/ do_reset_complete, | ||
182 | /*4*/ do_work_bus, | ||
183 | /*5*/ do_intr_end | ||
184 | }; | ||
185 | |||
186 | /* Forward declarations. */ | ||
187 | static irqreturn_t esp_intr(int irq, void *dev_id); | ||
188 | |||
189 | /* Debugging routines */ | ||
190 | struct esp_cmdstrings { | ||
191 | u8 cmdchar; | ||
192 | char *text; | ||
193 | } esp_cmd_strings[] = { | ||
194 | /* Miscellaneous */ | ||
195 | { ESP_CMD_NULL, "ESP_NOP", }, | ||
196 | { ESP_CMD_FLUSH, "FIFO_FLUSH", }, | ||
197 | { ESP_CMD_RC, "RSTESP", }, | ||
198 | { ESP_CMD_RS, "RSTSCSI", }, | ||
199 | /* Disconnected State Group */ | ||
200 | { ESP_CMD_RSEL, "RESLCTSEQ", }, | ||
201 | { ESP_CMD_SEL, "SLCTNATN", }, | ||
202 | { ESP_CMD_SELA, "SLCTATN", }, | ||
203 | { ESP_CMD_SELAS, "SLCTATNSTOP", }, | ||
204 | { ESP_CMD_ESEL, "ENSLCTRESEL", }, | ||
205 | { ESP_CMD_DSEL, "DISSELRESEL", }, | ||
206 | { ESP_CMD_SA3, "SLCTATN3", }, | ||
207 | { ESP_CMD_RSEL3, "RESLCTSEQ", }, | ||
208 | /* Target State Group */ | ||
209 | { ESP_CMD_SMSG, "SNDMSG", }, | ||
210 | { ESP_CMD_SSTAT, "SNDSTATUS", }, | ||
211 | { ESP_CMD_SDATA, "SNDDATA", }, | ||
212 | { ESP_CMD_DSEQ, "DISCSEQ", }, | ||
213 | { ESP_CMD_TSEQ, "TERMSEQ", }, | ||
214 | { ESP_CMD_TCCSEQ, "TRGTCMDCOMPSEQ", }, | ||
215 | { ESP_CMD_DCNCT, "DISC", }, | ||
216 | { ESP_CMD_RMSG, "RCVMSG", }, | ||
217 | { ESP_CMD_RCMD, "RCVCMD", }, | ||
218 | { ESP_CMD_RDATA, "RCVDATA", }, | ||
219 | { ESP_CMD_RCSEQ, "RCVCMDSEQ", }, | ||
220 | /* Initiator State Group */ | ||
221 | { ESP_CMD_TI, "TRANSINFO", }, | ||
222 | { ESP_CMD_ICCSEQ, "INICMDSEQCOMP", }, | ||
223 | { ESP_CMD_MOK, "MSGACCEPTED", }, | ||
224 | { ESP_CMD_TPAD, "TPAD", }, | ||
225 | { ESP_CMD_SATN, "SATN", }, | ||
226 | { ESP_CMD_RATN, "RATN", }, | ||
227 | }; | ||
228 | #define NUM_ESP_COMMANDS ((sizeof(esp_cmd_strings)) / (sizeof(struct esp_cmdstrings))) | ||
229 | |||
230 | /* Print textual representation of an ESP command */ | ||
231 | static inline void esp_print_cmd(u8 espcmd) | ||
232 | { | ||
233 | u8 dma_bit = espcmd & ESP_CMD_DMA; | ||
234 | int i; | ||
235 | |||
236 | espcmd &= ~dma_bit; | ||
237 | for (i = 0; i < NUM_ESP_COMMANDS; i++) | ||
238 | if (esp_cmd_strings[i].cmdchar == espcmd) | ||
239 | break; | ||
240 | if (i == NUM_ESP_COMMANDS) | ||
241 | printk("ESP_Unknown"); | ||
242 | else | ||
243 | printk("%s%s", esp_cmd_strings[i].text, | ||
244 | ((dma_bit) ? "+DMA" : "")); | ||
245 | } | ||
246 | |||
247 | /* Print the status register's value */ | ||
248 | static inline void esp_print_statreg(u8 statreg) | ||
249 | { | ||
250 | u8 phase; | ||
251 | |||
252 | printk("STATUS<"); | ||
253 | phase = statreg & ESP_STAT_PMASK; | ||
254 | printk("%s,", (phase == ESP_DOP ? "DATA-OUT" : | ||
255 | (phase == ESP_DIP ? "DATA-IN" : | ||
256 | (phase == ESP_CMDP ? "COMMAND" : | ||
257 | (phase == ESP_STATP ? "STATUS" : | ||
258 | (phase == ESP_MOP ? "MSG-OUT" : | ||
259 | (phase == ESP_MIP ? "MSG_IN" : | ||
260 | "unknown"))))))); | ||
261 | if (statreg & ESP_STAT_TDONE) | ||
262 | printk("TRANS_DONE,"); | ||
263 | if (statreg & ESP_STAT_TCNT) | ||
264 | printk("TCOUNT_ZERO,"); | ||
265 | if (statreg & ESP_STAT_PERR) | ||
266 | printk("P_ERROR,"); | ||
267 | if (statreg & ESP_STAT_SPAM) | ||
268 | printk("SPAM,"); | ||
269 | if (statreg & ESP_STAT_INTR) | ||
270 | printk("IRQ,"); | ||
271 | printk(">"); | ||
272 | } | ||
273 | |||
274 | /* Print the interrupt register's value */ | ||
275 | static inline void esp_print_ireg(u8 intreg) | ||
276 | { | ||
277 | printk("INTREG< "); | ||
278 | if (intreg & ESP_INTR_S) | ||
279 | printk("SLCT_NATN "); | ||
280 | if (intreg & ESP_INTR_SATN) | ||
281 | printk("SLCT_ATN "); | ||
282 | if (intreg & ESP_INTR_RSEL) | ||
283 | printk("RSLCT "); | ||
284 | if (intreg & ESP_INTR_FDONE) | ||
285 | printk("FDONE "); | ||
286 | if (intreg & ESP_INTR_BSERV) | ||
287 | printk("BSERV "); | ||
288 | if (intreg & ESP_INTR_DC) | ||
289 | printk("DISCNCT "); | ||
290 | if (intreg & ESP_INTR_IC) | ||
291 | printk("ILL_CMD "); | ||
292 | if (intreg & ESP_INTR_SR) | ||
293 | printk("SCSI_BUS_RESET "); | ||
294 | printk(">"); | ||
295 | } | ||
296 | |||
297 | /* Print the sequence step registers contents */ | ||
298 | static inline void esp_print_seqreg(u8 stepreg) | ||
299 | { | ||
300 | stepreg &= ESP_STEP_VBITS; | ||
301 | printk("STEP<%s>", | ||
302 | (stepreg == ESP_STEP_ASEL ? "SLCT_ARB_CMPLT" : | ||
303 | (stepreg == ESP_STEP_SID ? "1BYTE_MSG_SENT" : | ||
304 | (stepreg == ESP_STEP_NCMD ? "NOT_IN_CMD_PHASE" : | ||
305 | (stepreg == ESP_STEP_PPC ? "CMD_BYTES_LOST" : | ||
306 | (stepreg == ESP_STEP_FINI4 ? "CMD_SENT_OK" : | ||
307 | "UNKNOWN")))))); | ||
308 | } | ||
309 | |||
310 | static char *phase_string(int phase) | ||
311 | { | ||
312 | switch (phase) { | ||
313 | case not_issued: | ||
314 | return "UNISSUED"; | ||
315 | case in_slct_norm: | ||
316 | return "SLCTNORM"; | ||
317 | case in_slct_stop: | ||
318 | return "SLCTSTOP"; | ||
319 | case in_slct_msg: | ||
320 | return "SLCTMSG"; | ||
321 | case in_slct_tag: | ||
322 | return "SLCTTAG"; | ||
323 | case in_slct_sneg: | ||
324 | return "SLCTSNEG"; | ||
325 | case in_datain: | ||
326 | return "DATAIN"; | ||
327 | case in_dataout: | ||
328 | return "DATAOUT"; | ||
329 | case in_data_done: | ||
330 | return "DATADONE"; | ||
331 | case in_msgin: | ||
332 | return "MSGIN"; | ||
333 | case in_msgincont: | ||
334 | return "MSGINCONT"; | ||
335 | case in_msgindone: | ||
336 | return "MSGINDONE"; | ||
337 | case in_msgout: | ||
338 | return "MSGOUT"; | ||
339 | case in_msgoutdone: | ||
340 | return "MSGOUTDONE"; | ||
341 | case in_cmdbegin: | ||
342 | return "CMDBEGIN"; | ||
343 | case in_cmdend: | ||
344 | return "CMDEND"; | ||
345 | case in_status: | ||
346 | return "STATUS"; | ||
347 | case in_freeing: | ||
348 | return "FREEING"; | ||
349 | case in_the_dark: | ||
350 | return "CLUELESS"; | ||
351 | case in_abortone: | ||
352 | return "ABORTONE"; | ||
353 | case in_abortall: | ||
354 | return "ABORTALL"; | ||
355 | case in_resetdev: | ||
356 | return "RESETDEV"; | ||
357 | case in_resetbus: | ||
358 | return "RESETBUS"; | ||
359 | case in_tgterror: | ||
360 | return "TGTERROR"; | ||
361 | default: | ||
362 | return "UNKNOWN"; | ||
363 | }; | ||
364 | } | ||
365 | |||
366 | #ifdef DEBUG_STATE_MACHINE | ||
367 | static inline void esp_advance_phase(struct scsi_cmnd *s, int newphase) | ||
368 | { | ||
369 | ESPLOG(("<%s>", phase_string(newphase))); | ||
370 | s->SCp.sent_command = s->SCp.phase; | ||
371 | s->SCp.phase = newphase; | ||
372 | } | ||
373 | #else | ||
374 | #define esp_advance_phase(__s, __newphase) \ | ||
375 | (__s)->SCp.sent_command = (__s)->SCp.phase; \ | ||
376 | (__s)->SCp.phase = (__newphase); | ||
377 | #endif | ||
378 | |||
379 | #ifdef DEBUG_ESP_CMDS | ||
380 | static inline void esp_cmd(struct esp *esp, u8 cmd) | ||
381 | { | ||
382 | esp->espcmdlog[esp->espcmdent] = cmd; | ||
383 | esp->espcmdent = (esp->espcmdent + 1) & 31; | ||
384 | sbus_writeb(cmd, esp->eregs + ESP_CMD); | ||
385 | } | ||
386 | #else | ||
387 | #define esp_cmd(__esp, __cmd) \ | ||
388 | sbus_writeb((__cmd), ((__esp)->eregs) + ESP_CMD) | ||
389 | #endif | ||
390 | |||
391 | #define ESP_INTSOFF(__dregs) \ | ||
392 | sbus_writel(sbus_readl((__dregs)+DMA_CSR)&~(DMA_INT_ENAB), (__dregs)+DMA_CSR) | ||
393 | #define ESP_INTSON(__dregs) \ | ||
394 | sbus_writel(sbus_readl((__dregs)+DMA_CSR)|DMA_INT_ENAB, (__dregs)+DMA_CSR) | ||
395 | #define ESP_IRQ_P(__dregs) \ | ||
396 | (sbus_readl((__dregs)+DMA_CSR) & (DMA_HNDL_INTR|DMA_HNDL_ERROR)) | ||
397 | |||
398 | /* How we use the various Linux SCSI data structures for operation. | ||
399 | * | ||
400 | * struct scsi_cmnd: | ||
401 | * | ||
402 | * We keep track of the synchronous capabilities of a target | ||
403 | * in the device member, using sync_min_period and | ||
404 | * sync_max_offset. These are the values we directly write | ||
405 | * into the ESP registers while running a command. If offset | ||
406 | * is zero the ESP will use asynchronous transfers. | ||
407 | * If the borken flag is set we assume we shouldn't even bother | ||
408 | * trying to negotiate for synchronous transfer as this target | ||
409 | * is really stupid. If we notice the target is dropping the | ||
410 | * bus, and we have been allowing it to disconnect, we clear | ||
411 | * the disconnect flag. | ||
412 | */ | ||
413 | |||
414 | |||
415 | /* Manipulation of the ESP command queues. Thanks to the aha152x driver | ||
416 | * and its author, Juergen E. Fischer, for the methods used here. | ||
417 | * Note that these are per-ESP queues, not global queues like | ||
418 | * the aha152x driver uses. | ||
419 | */ | ||
420 | static inline void append_SC(struct scsi_cmnd **SC, struct scsi_cmnd *new_SC) | ||
421 | { | ||
422 | struct scsi_cmnd *end; | ||
423 | |||
424 | new_SC->host_scribble = (unsigned char *) NULL; | ||
425 | if (!*SC) | ||
426 | *SC = new_SC; | ||
427 | else { | ||
428 | for (end=*SC;end->host_scribble;end=(struct scsi_cmnd *)end->host_scribble) | ||
429 | ; | ||
430 | end->host_scribble = (unsigned char *) new_SC; | ||
431 | } | ||
432 | } | ||
433 | |||
434 | static inline void prepend_SC(struct scsi_cmnd **SC, struct scsi_cmnd *new_SC) | ||
435 | { | ||
436 | new_SC->host_scribble = (unsigned char *) *SC; | ||
437 | *SC = new_SC; | ||
438 | } | ||
439 | |||
440 | static inline struct scsi_cmnd *remove_first_SC(struct scsi_cmnd **SC) | ||
441 | { | ||
442 | struct scsi_cmnd *ptr; | ||
443 | ptr = *SC; | ||
444 | if (ptr) | ||
445 | *SC = (struct scsi_cmnd *) (*SC)->host_scribble; | ||
446 | return ptr; | ||
447 | } | ||
448 | |||
449 | static inline struct scsi_cmnd *remove_SC(struct scsi_cmnd **SC, int target, int lun) | ||
450 | { | ||
451 | struct scsi_cmnd *ptr, *prev; | ||
452 | |||
453 | for (ptr = *SC, prev = NULL; | ||
454 | ptr && ((ptr->device->id != target) || (ptr->device->lun != lun)); | ||
455 | prev = ptr, ptr = (struct scsi_cmnd *) ptr->host_scribble) | ||
456 | ; | ||
457 | if (ptr) { | ||
458 | if (prev) | ||
459 | prev->host_scribble=ptr->host_scribble; | ||
460 | else | ||
461 | *SC=(struct scsi_cmnd *)ptr->host_scribble; | ||
462 | } | ||
463 | return ptr; | ||
464 | } | ||
465 | |||
466 | /* Resetting various pieces of the ESP scsi driver chipset/buses. */ | ||
467 | static void esp_reset_dma(struct esp *esp) | ||
468 | { | ||
469 | int can_do_burst16, can_do_burst32, can_do_burst64; | ||
470 | int can_do_sbus64; | ||
471 | u32 tmp; | ||
472 | |||
473 | can_do_burst16 = (esp->bursts & DMA_BURST16) != 0; | ||
474 | can_do_burst32 = (esp->bursts & DMA_BURST32) != 0; | ||
475 | can_do_burst64 = 0; | ||
476 | can_do_sbus64 = 0; | ||
477 | if (sbus_can_dma_64bit(esp->sdev)) | ||
478 | can_do_sbus64 = 1; | ||
479 | if (sbus_can_burst64(esp->sdev)) | ||
480 | can_do_burst64 = (esp->bursts & DMA_BURST64) != 0; | ||
481 | |||
482 | /* Punt the DVMA into a known state. */ | ||
483 | if (esp->dma->revision != dvmahme) { | ||
484 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
485 | sbus_writel(tmp | DMA_RST_SCSI, esp->dregs + DMA_CSR); | ||
486 | sbus_writel(tmp & ~DMA_RST_SCSI, esp->dregs + DMA_CSR); | ||
487 | } | ||
488 | switch (esp->dma->revision) { | ||
489 | case dvmahme: | ||
490 | /* This is the HME DVMA gate array. */ | ||
491 | |||
492 | sbus_writel(DMA_RESET_FAS366, esp->dregs + DMA_CSR); | ||
493 | sbus_writel(DMA_RST_SCSI, esp->dregs + DMA_CSR); | ||
494 | |||
495 | esp->prev_hme_dmacsr = (DMA_PARITY_OFF|DMA_2CLKS|DMA_SCSI_DISAB|DMA_INT_ENAB); | ||
496 | esp->prev_hme_dmacsr &= ~(DMA_ENABLE|DMA_ST_WRITE|DMA_BRST_SZ); | ||
497 | |||
498 | if (can_do_burst64) | ||
499 | esp->prev_hme_dmacsr |= DMA_BRST64; | ||
500 | else if (can_do_burst32) | ||
501 | esp->prev_hme_dmacsr |= DMA_BRST32; | ||
502 | |||
503 | if (can_do_sbus64) { | ||
504 | esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64; | ||
505 | sbus_set_sbus64(esp->sdev, esp->bursts); | ||
506 | } | ||
507 | |||
508 | /* This chip is horrible. */ | ||
509 | while (sbus_readl(esp->dregs + DMA_CSR) & DMA_PEND_READ) | ||
510 | udelay(1); | ||
511 | |||
512 | sbus_writel(0, esp->dregs + DMA_CSR); | ||
513 | sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR); | ||
514 | |||
515 | /* This is necessary to avoid having the SCSI channel | ||
516 | * engine lock up on us. | ||
517 | */ | ||
518 | sbus_writel(0, esp->dregs + DMA_ADDR); | ||
519 | |||
520 | break; | ||
521 | case dvmarev2: | ||
522 | /* This is the gate array found in the sun4m | ||
523 | * NCR SBUS I/O subsystem. | ||
524 | */ | ||
525 | if (esp->erev != esp100) { | ||
526 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
527 | sbus_writel(tmp | DMA_3CLKS, esp->dregs + DMA_CSR); | ||
528 | } | ||
529 | break; | ||
530 | case dvmarev3: | ||
531 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
532 | tmp &= ~DMA_3CLKS; | ||
533 | tmp |= DMA_2CLKS; | ||
534 | if (can_do_burst32) { | ||
535 | tmp &= ~DMA_BRST_SZ; | ||
536 | tmp |= DMA_BRST32; | ||
537 | } | ||
538 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
539 | break; | ||
540 | case dvmaesc1: | ||
541 | /* This is the DMA unit found on SCSI/Ether cards. */ | ||
542 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
543 | tmp |= DMA_ADD_ENABLE; | ||
544 | tmp &= ~DMA_BCNT_ENAB; | ||
545 | if (!can_do_burst32 && can_do_burst16) { | ||
546 | tmp |= DMA_ESC_BURST; | ||
547 | } else { | ||
548 | tmp &= ~(DMA_ESC_BURST); | ||
549 | } | ||
550 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
551 | break; | ||
552 | default: | ||
553 | break; | ||
554 | }; | ||
555 | ESP_INTSON(esp->dregs); | ||
556 | } | ||
557 | |||
558 | /* Reset the ESP chip, _not_ the SCSI bus. */ | ||
559 | static void __init esp_reset_esp(struct esp *esp) | ||
560 | { | ||
561 | u8 family_code, version; | ||
562 | int i; | ||
563 | |||
564 | /* Now reset the ESP chip */ | ||
565 | esp_cmd(esp, ESP_CMD_RC); | ||
566 | esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); | ||
567 | esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); | ||
568 | |||
569 | /* Reload the configuration registers */ | ||
570 | sbus_writeb(esp->cfact, esp->eregs + ESP_CFACT); | ||
571 | esp->prev_stp = 0; | ||
572 | sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP); | ||
573 | esp->prev_soff = 0; | ||
574 | sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF); | ||
575 | sbus_writeb(esp->neg_defp, esp->eregs + ESP_TIMEO); | ||
576 | |||
577 | /* This is the only point at which it is reliable to read | ||
578 | * the ID-code for a fast ESP chip variants. | ||
579 | */ | ||
580 | esp->max_period = ((35 * esp->ccycle) / 1000); | ||
581 | if (esp->erev == fast) { | ||
582 | version = sbus_readb(esp->eregs + ESP_UID); | ||
583 | family_code = (version & 0xf8) >> 3; | ||
584 | if (family_code == 0x02) | ||
585 | esp->erev = fas236; | ||
586 | else if (family_code == 0x0a) | ||
587 | esp->erev = fashme; /* Version is usually '5'. */ | ||
588 | else | ||
589 | esp->erev = fas100a; | ||
590 | ESPMISC(("esp%d: FAST chip is %s (family=%d, version=%d)\n", | ||
591 | esp->esp_id, | ||
592 | (esp->erev == fas236) ? "fas236" : | ||
593 | ((esp->erev == fas100a) ? "fas100a" : | ||
594 | "fasHME"), family_code, (version & 7))); | ||
595 | |||
596 | esp->min_period = ((4 * esp->ccycle) / 1000); | ||
597 | } else { | ||
598 | esp->min_period = ((5 * esp->ccycle) / 1000); | ||
599 | } | ||
600 | esp->max_period = (esp->max_period + 3)>>2; | ||
601 | esp->min_period = (esp->min_period + 3)>>2; | ||
602 | |||
603 | sbus_writeb(esp->config1, esp->eregs + ESP_CFG1); | ||
604 | switch (esp->erev) { | ||
605 | case esp100: | ||
606 | /* nothing to do */ | ||
607 | break; | ||
608 | case esp100a: | ||
609 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
610 | break; | ||
611 | case esp236: | ||
612 | /* Slow 236 */ | ||
613 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
614 | esp->prev_cfg3 = esp->config3[0]; | ||
615 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
616 | break; | ||
617 | case fashme: | ||
618 | esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB); | ||
619 | /* fallthrough... */ | ||
620 | case fas236: | ||
621 | /* Fast 236 or HME */ | ||
622 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
623 | for (i = 0; i < 16; i++) { | ||
624 | if (esp->erev == fashme) { | ||
625 | u8 cfg3; | ||
626 | |||
627 | cfg3 = ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH; | ||
628 | if (esp->scsi_id >= 8) | ||
629 | cfg3 |= ESP_CONFIG3_IDBIT3; | ||
630 | esp->config3[i] |= cfg3; | ||
631 | } else { | ||
632 | esp->config3[i] |= ESP_CONFIG3_FCLK; | ||
633 | } | ||
634 | } | ||
635 | esp->prev_cfg3 = esp->config3[0]; | ||
636 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
637 | if (esp->erev == fashme) { | ||
638 | esp->radelay = 80; | ||
639 | } else { | ||
640 | if (esp->diff) | ||
641 | esp->radelay = 0; | ||
642 | else | ||
643 | esp->radelay = 96; | ||
644 | } | ||
645 | break; | ||
646 | case fas100a: | ||
647 | /* Fast 100a */ | ||
648 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
649 | for (i = 0; i < 16; i++) | ||
650 | esp->config3[i] |= ESP_CONFIG3_FCLOCK; | ||
651 | esp->prev_cfg3 = esp->config3[0]; | ||
652 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
653 | esp->radelay = 32; | ||
654 | break; | ||
655 | default: | ||
656 | panic("esp: what could it be... I wonder..."); | ||
657 | break; | ||
658 | }; | ||
659 | |||
660 | /* Eat any bitrot in the chip */ | ||
661 | sbus_readb(esp->eregs + ESP_INTRPT); | ||
662 | udelay(100); | ||
663 | } | ||
664 | |||
665 | /* This places the ESP into a known state at boot time. */ | ||
666 | static void __init esp_bootup_reset(struct esp *esp) | ||
667 | { | ||
668 | u8 tmp; | ||
669 | |||
670 | /* Reset the DMA */ | ||
671 | esp_reset_dma(esp); | ||
672 | |||
673 | /* Reset the ESP */ | ||
674 | esp_reset_esp(esp); | ||
675 | |||
676 | /* Reset the SCSI bus, but tell ESP not to generate an irq */ | ||
677 | tmp = sbus_readb(esp->eregs + ESP_CFG1); | ||
678 | tmp |= ESP_CONFIG1_SRRDISAB; | ||
679 | sbus_writeb(tmp, esp->eregs + ESP_CFG1); | ||
680 | |||
681 | esp_cmd(esp, ESP_CMD_RS); | ||
682 | udelay(400); | ||
683 | |||
684 | sbus_writeb(esp->config1, esp->eregs + ESP_CFG1); | ||
685 | |||
686 | /* Eat any bitrot in the chip and we are done... */ | ||
687 | sbus_readb(esp->eregs + ESP_INTRPT); | ||
688 | } | ||
689 | |||
690 | static int __init esp_find_dvma(struct esp *esp, struct sbus_dev *dma_sdev) | ||
691 | { | ||
692 | struct sbus_dev *sdev = esp->sdev; | ||
693 | struct sbus_dma *dma; | ||
694 | |||
695 | if (dma_sdev != NULL) { | ||
696 | for_each_dvma(dma) { | ||
697 | if (dma->sdev == dma_sdev) | ||
698 | break; | ||
699 | } | ||
700 | } else { | ||
701 | for_each_dvma(dma) { | ||
702 | /* If allocated already, can't use it. */ | ||
703 | if (dma->allocated) | ||
704 | continue; | ||
705 | |||
706 | if (dma->sdev == NULL) | ||
707 | break; | ||
708 | |||
709 | /* If bus + slot are the same and it has the | ||
710 | * correct OBP name, it's ours. | ||
711 | */ | ||
712 | if (sdev->bus == dma->sdev->bus && | ||
713 | sdev->slot == dma->sdev->slot && | ||
714 | (!strcmp(dma->sdev->prom_name, "dma") || | ||
715 | !strcmp(dma->sdev->prom_name, "espdma"))) | ||
716 | break; | ||
717 | } | ||
718 | } | ||
719 | |||
720 | /* If we don't know how to handle the dvma, | ||
721 | * do not use this device. | ||
722 | */ | ||
723 | if (dma == NULL) { | ||
724 | printk("Cannot find dvma for ESP%d's SCSI\n", esp->esp_id); | ||
725 | return -1; | ||
726 | } | ||
727 | if (dma->allocated) { | ||
728 | printk("esp%d: can't use my espdma\n", esp->esp_id); | ||
729 | return -1; | ||
730 | } | ||
731 | dma->allocated = 1; | ||
732 | esp->dma = dma; | ||
733 | esp->dregs = dma->regs; | ||
734 | |||
735 | return 0; | ||
736 | } | ||
737 | |||
738 | static int __init esp_map_regs(struct esp *esp, int hme) | ||
739 | { | ||
740 | struct sbus_dev *sdev = esp->sdev; | ||
741 | struct resource *res; | ||
742 | |||
743 | /* On HME, two reg sets exist, first is DVMA, | ||
744 | * second is ESP registers. | ||
745 | */ | ||
746 | if (hme) | ||
747 | res = &sdev->resource[1]; | ||
748 | else | ||
749 | res = &sdev->resource[0]; | ||
750 | |||
751 | esp->eregs = sbus_ioremap(res, 0, ESP_REG_SIZE, "ESP Registers"); | ||
752 | |||
753 | if (esp->eregs == 0) | ||
754 | return -1; | ||
755 | return 0; | ||
756 | } | ||
757 | |||
758 | static int __init esp_map_cmdarea(struct esp *esp) | ||
759 | { | ||
760 | struct sbus_dev *sdev = esp->sdev; | ||
761 | |||
762 | esp->esp_command = sbus_alloc_consistent(sdev, 16, | ||
763 | &esp->esp_command_dvma); | ||
764 | if (esp->esp_command == NULL || | ||
765 | esp->esp_command_dvma == 0) | ||
766 | return -1; | ||
767 | return 0; | ||
768 | } | ||
769 | |||
770 | static int __init esp_register_irq(struct esp *esp) | ||
771 | { | ||
772 | esp->ehost->irq = esp->irq = esp->sdev->irqs[0]; | ||
773 | |||
774 | /* We used to try various overly-clever things to | ||
775 | * reduce the interrupt processing overhead on | ||
776 | * sun4c/sun4m when multiple ESP's shared the | ||
777 | * same IRQ. It was too complex and messy to | ||
778 | * sanely maintain. | ||
779 | */ | ||
780 | if (request_irq(esp->ehost->irq, esp_intr, | ||
781 | IRQF_SHARED, "ESP SCSI", esp)) { | ||
782 | printk("esp%d: Cannot acquire irq line\n", | ||
783 | esp->esp_id); | ||
784 | return -1; | ||
785 | } | ||
786 | |||
787 | printk("esp%d: IRQ %d ", esp->esp_id, | ||
788 | esp->ehost->irq); | ||
789 | |||
790 | return 0; | ||
791 | } | ||
792 | |||
793 | static void __init esp_get_scsi_id(struct esp *esp) | ||
794 | { | ||
795 | struct sbus_dev *sdev = esp->sdev; | ||
796 | struct device_node *dp = sdev->ofdev.node; | ||
797 | |||
798 | esp->scsi_id = of_getintprop_default(dp, | ||
799 | "initiator-id", | ||
800 | -1); | ||
801 | if (esp->scsi_id == -1) | ||
802 | esp->scsi_id = of_getintprop_default(dp, | ||
803 | "scsi-initiator-id", | ||
804 | -1); | ||
805 | if (esp->scsi_id == -1) | ||
806 | esp->scsi_id = (sdev->bus == NULL) ? 7 : | ||
807 | of_getintprop_default(sdev->bus->ofdev.node, | ||
808 | "scsi-initiator-id", | ||
809 | 7); | ||
810 | esp->ehost->this_id = esp->scsi_id; | ||
811 | esp->scsi_id_mask = (1 << esp->scsi_id); | ||
812 | |||
813 | } | ||
814 | |||
815 | static void __init esp_get_clock_params(struct esp *esp) | ||
816 | { | ||
817 | struct sbus_dev *sdev = esp->sdev; | ||
818 | int prom_node = esp->prom_node; | ||
819 | int sbus_prom_node; | ||
820 | unsigned int fmhz; | ||
821 | u8 ccf; | ||
822 | |||
823 | if (sdev != NULL && sdev->bus != NULL) | ||
824 | sbus_prom_node = sdev->bus->prom_node; | ||
825 | else | ||
826 | sbus_prom_node = 0; | ||
827 | |||
828 | /* This is getting messy but it has to be done | ||
829 | * correctly or else you get weird behavior all | ||
830 | * over the place. We are trying to basically | ||
831 | * figure out three pieces of information. | ||
832 | * | ||
833 | * a) Clock Conversion Factor | ||
834 | * | ||
835 | * This is a representation of the input | ||
836 | * crystal clock frequency going into the | ||
837 | * ESP on this machine. Any operation whose | ||
838 | * timing is longer than 400ns depends on this | ||
839 | * value being correct. For example, you'll | ||
840 | * get blips for arbitration/selection during | ||
841 | * high load or with multiple targets if this | ||
842 | * is not set correctly. | ||
843 | * | ||
844 | * b) Selection Time-Out | ||
845 | * | ||
846 | * The ESP isn't very bright and will arbitrate | ||
847 | * for the bus and try to select a target | ||
848 | * forever if you let it. This value tells | ||
849 | * the ESP when it has taken too long to | ||
850 | * negotiate and that it should interrupt | ||
851 | * the CPU so we can see what happened. | ||
852 | * The value is computed as follows (from | ||
853 | * NCR/Symbios chip docs). | ||
854 | * | ||
855 | * (Time Out Period) * (Input Clock) | ||
856 | * STO = ---------------------------------- | ||
857 | * (8192) * (Clock Conversion Factor) | ||
858 | * | ||
859 | * You usually want the time out period to be | ||
860 | * around 250ms, I think we'll set it a little | ||
861 | * bit higher to account for fully loaded SCSI | ||
862 | * bus's and slow devices that don't respond so | ||
863 | * quickly to selection attempts. (yeah, I know | ||
864 | * this is out of spec. but there is a lot of | ||
865 | * buggy pieces of firmware out there so bite me) | ||
866 | * | ||
867 | * c) Imperical constants for synchronous offset | ||
868 | * and transfer period register values | ||
869 | * | ||
870 | * This entails the smallest and largest sync | ||
871 | * period we could ever handle on this ESP. | ||
872 | */ | ||
873 | |||
874 | fmhz = prom_getintdefault(prom_node, "clock-frequency", -1); | ||
875 | if (fmhz == -1) | ||
876 | fmhz = (!sbus_prom_node) ? 0 : | ||
877 | prom_getintdefault(sbus_prom_node, "clock-frequency", -1); | ||
878 | |||
879 | if (fmhz <= (5000000)) | ||
880 | ccf = 0; | ||
881 | else | ||
882 | ccf = (((5000000 - 1) + (fmhz))/(5000000)); | ||
883 | |||
884 | if (!ccf || ccf > 8) { | ||
885 | /* If we can't find anything reasonable, | ||
886 | * just assume 20MHZ. This is the clock | ||
887 | * frequency of the older sun4c's where I've | ||
888 | * been unable to find the clock-frequency | ||
889 | * PROM property. All other machines provide | ||
890 | * useful values it seems. | ||
891 | */ | ||
892 | ccf = ESP_CCF_F4; | ||
893 | fmhz = (20000000); | ||
894 | } | ||
895 | |||
896 | if (ccf == (ESP_CCF_F7 + 1)) | ||
897 | esp->cfact = ESP_CCF_F0; | ||
898 | else if (ccf == ESP_CCF_NEVER) | ||
899 | esp->cfact = ESP_CCF_F2; | ||
900 | else | ||
901 | esp->cfact = ccf; | ||
902 | esp->raw_cfact = ccf; | ||
903 | |||
904 | esp->cfreq = fmhz; | ||
905 | esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz); | ||
906 | esp->ctick = ESP_TICK(ccf, esp->ccycle); | ||
907 | esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf); | ||
908 | esp->sync_defp = SYNC_DEFP_SLOW; | ||
909 | |||
910 | printk("SCSI ID %d Clk %dMHz CCYC=%d CCF=%d TOut %d ", | ||
911 | esp->scsi_id, (fmhz / 1000000), | ||
912 | (int)esp->ccycle, (int)ccf, (int) esp->neg_defp); | ||
913 | } | ||
914 | |||
915 | static void __init esp_get_bursts(struct esp *esp, struct sbus_dev *dma) | ||
916 | { | ||
917 | struct sbus_dev *sdev = esp->sdev; | ||
918 | u8 bursts; | ||
919 | |||
920 | bursts = prom_getintdefault(esp->prom_node, "burst-sizes", 0xff); | ||
921 | |||
922 | if (dma) { | ||
923 | u8 tmp = prom_getintdefault(dma->prom_node, | ||
924 | "burst-sizes", 0xff); | ||
925 | if (tmp != 0xff) | ||
926 | bursts &= tmp; | ||
927 | } | ||
928 | |||
929 | if (sdev->bus) { | ||
930 | u8 tmp = prom_getintdefault(sdev->bus->prom_node, | ||
931 | "burst-sizes", 0xff); | ||
932 | if (tmp != 0xff) | ||
933 | bursts &= tmp; | ||
934 | } | ||
935 | |||
936 | if (bursts == 0xff || | ||
937 | (bursts & DMA_BURST16) == 0 || | ||
938 | (bursts & DMA_BURST32) == 0) | ||
939 | bursts = (DMA_BURST32 - 1); | ||
940 | |||
941 | esp->bursts = bursts; | ||
942 | } | ||
943 | |||
944 | static void __init esp_get_revision(struct esp *esp) | ||
945 | { | ||
946 | u8 tmp; | ||
947 | |||
948 | esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7)); | ||
949 | esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY); | ||
950 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
951 | |||
952 | tmp = sbus_readb(esp->eregs + ESP_CFG2); | ||
953 | tmp &= ~ESP_CONFIG2_MAGIC; | ||
954 | if (tmp != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) { | ||
955 | /* If what we write to cfg2 does not come back, cfg2 | ||
956 | * is not implemented, therefore this must be a plain | ||
957 | * esp100. | ||
958 | */ | ||
959 | esp->erev = esp100; | ||
960 | printk("NCR53C90(esp100)\n"); | ||
961 | } else { | ||
962 | esp->config2 = 0; | ||
963 | esp->prev_cfg3 = esp->config3[0] = 5; | ||
964 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
965 | sbus_writeb(0, esp->eregs + ESP_CFG3); | ||
966 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
967 | |||
968 | tmp = sbus_readb(esp->eregs + ESP_CFG3); | ||
969 | if (tmp != 5) { | ||
970 | /* The cfg2 register is implemented, however | ||
971 | * cfg3 is not, must be esp100a. | ||
972 | */ | ||
973 | esp->erev = esp100a; | ||
974 | printk("NCR53C90A(esp100a)\n"); | ||
975 | } else { | ||
976 | int target; | ||
977 | |||
978 | for (target = 0; target < 16; target++) | ||
979 | esp->config3[target] = 0; | ||
980 | esp->prev_cfg3 = 0; | ||
981 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
982 | |||
983 | /* All of cfg{1,2,3} implemented, must be one of | ||
984 | * the fas variants, figure out which one. | ||
985 | */ | ||
986 | if (esp->raw_cfact > ESP_CCF_F5) { | ||
987 | esp->erev = fast; | ||
988 | esp->sync_defp = SYNC_DEFP_FAST; | ||
989 | printk("NCR53C9XF(espfast)\n"); | ||
990 | } else { | ||
991 | esp->erev = esp236; | ||
992 | printk("NCR53C9x(esp236)\n"); | ||
993 | } | ||
994 | esp->config2 = 0; | ||
995 | sbus_writeb(esp->config2, esp->eregs + ESP_CFG2); | ||
996 | } | ||
997 | } | ||
998 | } | ||
999 | |||
1000 | static void __init esp_init_swstate(struct esp *esp) | ||
1001 | { | ||
1002 | int i; | ||
1003 | |||
1004 | /* Command queues... */ | ||
1005 | esp->current_SC = NULL; | ||
1006 | esp->disconnected_SC = NULL; | ||
1007 | esp->issue_SC = NULL; | ||
1008 | |||
1009 | /* Target and current command state... */ | ||
1010 | esp->targets_present = 0; | ||
1011 | esp->resetting_bus = 0; | ||
1012 | esp->snip = 0; | ||
1013 | |||
1014 | init_waitqueue_head(&esp->reset_queue); | ||
1015 | |||
1016 | /* Debugging... */ | ||
1017 | for(i = 0; i < 32; i++) | ||
1018 | esp->espcmdlog[i] = 0; | ||
1019 | esp->espcmdent = 0; | ||
1020 | |||
1021 | /* MSG phase state... */ | ||
1022 | for(i = 0; i < 16; i++) { | ||
1023 | esp->cur_msgout[i] = 0; | ||
1024 | esp->cur_msgin[i] = 0; | ||
1025 | } | ||
1026 | esp->prevmsgout = esp->prevmsgin = 0; | ||
1027 | esp->msgout_len = esp->msgin_len = 0; | ||
1028 | |||
1029 | /* Clear the one behind caches to hold unmatchable values. */ | ||
1030 | esp->prev_soff = esp->prev_stp = esp->prev_cfg3 = 0xff; | ||
1031 | esp->prev_hme_dmacsr = 0xffffffff; | ||
1032 | } | ||
1033 | |||
1034 | static int __init detect_one_esp(struct scsi_host_template *tpnt, | ||
1035 | struct device *dev, | ||
1036 | struct sbus_dev *esp_dev, | ||
1037 | struct sbus_dev *espdma, | ||
1038 | struct sbus_bus *sbus, | ||
1039 | int hme) | ||
1040 | { | ||
1041 | static int instance; | ||
1042 | struct Scsi_Host *esp_host = scsi_host_alloc(tpnt, sizeof(struct esp)); | ||
1043 | struct esp *esp; | ||
1044 | |||
1045 | if (!esp_host) | ||
1046 | return -ENOMEM; | ||
1047 | |||
1048 | if (hme) | ||
1049 | esp_host->max_id = 16; | ||
1050 | esp = (struct esp *) esp_host->hostdata; | ||
1051 | esp->ehost = esp_host; | ||
1052 | esp->sdev = esp_dev; | ||
1053 | esp->esp_id = instance; | ||
1054 | esp->prom_node = esp_dev->prom_node; | ||
1055 | prom_getstring(esp->prom_node, "name", esp->prom_name, | ||
1056 | sizeof(esp->prom_name)); | ||
1057 | |||
1058 | if (esp_find_dvma(esp, espdma) < 0) | ||
1059 | goto fail_unlink; | ||
1060 | if (esp_map_regs(esp, hme) < 0) { | ||
1061 | printk("ESP registers unmappable"); | ||
1062 | goto fail_dvma_release; | ||
1063 | } | ||
1064 | if (esp_map_cmdarea(esp) < 0) { | ||
1065 | printk("ESP DVMA transport area unmappable"); | ||
1066 | goto fail_unmap_regs; | ||
1067 | } | ||
1068 | if (esp_register_irq(esp) < 0) | ||
1069 | goto fail_unmap_cmdarea; | ||
1070 | |||
1071 | esp_get_scsi_id(esp); | ||
1072 | |||
1073 | esp->diff = prom_getbool(esp->prom_node, "differential"); | ||
1074 | if (esp->diff) | ||
1075 | printk("Differential "); | ||
1076 | |||
1077 | esp_get_clock_params(esp); | ||
1078 | esp_get_bursts(esp, espdma); | ||
1079 | esp_get_revision(esp); | ||
1080 | esp_init_swstate(esp); | ||
1081 | |||
1082 | esp_bootup_reset(esp); | ||
1083 | |||
1084 | if (scsi_add_host(esp_host, dev)) | ||
1085 | goto fail_free_irq; | ||
1086 | |||
1087 | dev_set_drvdata(&esp_dev->ofdev.dev, esp); | ||
1088 | |||
1089 | scsi_scan_host(esp_host); | ||
1090 | instance++; | ||
1091 | |||
1092 | return 0; | ||
1093 | |||
1094 | fail_free_irq: | ||
1095 | free_irq(esp->ehost->irq, esp); | ||
1096 | |||
1097 | fail_unmap_cmdarea: | ||
1098 | sbus_free_consistent(esp->sdev, 16, | ||
1099 | (void *) esp->esp_command, | ||
1100 | esp->esp_command_dvma); | ||
1101 | |||
1102 | fail_unmap_regs: | ||
1103 | sbus_iounmap(esp->eregs, ESP_REG_SIZE); | ||
1104 | |||
1105 | fail_dvma_release: | ||
1106 | esp->dma->allocated = 0; | ||
1107 | |||
1108 | fail_unlink: | ||
1109 | scsi_host_put(esp_host); | ||
1110 | return -1; | ||
1111 | } | ||
1112 | |||
1113 | /* Detecting ESP chips on the machine. This is the simple and easy | ||
1114 | * version. | ||
1115 | */ | ||
1116 | static int __devexit esp_remove_common(struct esp *esp) | ||
1117 | { | ||
1118 | unsigned int irq = esp->ehost->irq; | ||
1119 | |||
1120 | scsi_remove_host(esp->ehost); | ||
1121 | |||
1122 | ESP_INTSOFF(esp->dregs); | ||
1123 | #if 0 | ||
1124 | esp_reset_dma(esp); | ||
1125 | esp_reset_esp(esp); | ||
1126 | #endif | ||
1127 | |||
1128 | free_irq(irq, esp); | ||
1129 | sbus_free_consistent(esp->sdev, 16, | ||
1130 | (void *) esp->esp_command, esp->esp_command_dvma); | ||
1131 | sbus_iounmap(esp->eregs, ESP_REG_SIZE); | ||
1132 | esp->dma->allocated = 0; | ||
1133 | |||
1134 | scsi_host_put(esp->ehost); | ||
1135 | |||
1136 | return 0; | ||
1137 | } | ||
1138 | |||
1139 | |||
1140 | #ifdef CONFIG_SUN4 | ||
1141 | |||
1142 | #include <asm/sun4paddr.h> | ||
1143 | |||
1144 | static struct sbus_dev sun4_esp_dev; | ||
1145 | |||
1146 | static int __init esp_sun4_probe(struct scsi_host_template *tpnt) | ||
1147 | { | ||
1148 | if (sun4_esp_physaddr) { | ||
1149 | memset(&sun4_esp_dev, 0, sizeof(sun4_esp_dev)); | ||
1150 | sun4_esp_dev.reg_addrs[0].phys_addr = sun4_esp_physaddr; | ||
1151 | sun4_esp_dev.irqs[0] = 4; | ||
1152 | sun4_esp_dev.resource[0].start = sun4_esp_physaddr; | ||
1153 | sun4_esp_dev.resource[0].end = | ||
1154 | sun4_esp_physaddr + ESP_REG_SIZE - 1; | ||
1155 | sun4_esp_dev.resource[0].flags = IORESOURCE_IO; | ||
1156 | |||
1157 | return detect_one_esp(tpnt, NULL, | ||
1158 | &sun4_esp_dev, NULL, NULL, 0); | ||
1159 | } | ||
1160 | return 0; | ||
1161 | } | ||
1162 | |||
1163 | static int __devexit esp_sun4_remove(void) | ||
1164 | { | ||
1165 | struct of_device *dev = &sun4_esp_dev.ofdev; | ||
1166 | struct esp *esp = dev_get_drvdata(&dev->dev); | ||
1167 | |||
1168 | return esp_remove_common(esp); | ||
1169 | } | ||
1170 | |||
1171 | #else /* !CONFIG_SUN4 */ | ||
1172 | |||
1173 | static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match) | ||
1174 | { | ||
1175 | struct sbus_dev *sdev = to_sbus_device(&dev->dev); | ||
1176 | struct device_node *dp = dev->node; | ||
1177 | struct sbus_dev *dma_sdev = NULL; | ||
1178 | int hme = 0; | ||
1179 | |||
1180 | if (dp->parent && | ||
1181 | (!strcmp(dp->parent->name, "espdma") || | ||
1182 | !strcmp(dp->parent->name, "dma"))) | ||
1183 | dma_sdev = sdev->parent; | ||
1184 | else if (!strcmp(dp->name, "SUNW,fas")) { | ||
1185 | dma_sdev = sdev; | ||
1186 | hme = 1; | ||
1187 | } | ||
1188 | |||
1189 | return detect_one_esp(match->data, &dev->dev, | ||
1190 | sdev, dma_sdev, sdev->bus, hme); | ||
1191 | } | ||
1192 | |||
1193 | static int __devexit esp_sbus_remove(struct of_device *dev) | ||
1194 | { | ||
1195 | struct esp *esp = dev_get_drvdata(&dev->dev); | ||
1196 | |||
1197 | return esp_remove_common(esp); | ||
1198 | } | ||
1199 | |||
1200 | #endif /* !CONFIG_SUN4 */ | ||
1201 | |||
1202 | /* The info function will return whatever useful | ||
1203 | * information the developer sees fit. If not provided, then | ||
1204 | * the name field will be used instead. | ||
1205 | */ | ||
1206 | static const char *esp_info(struct Scsi_Host *host) | ||
1207 | { | ||
1208 | struct esp *esp; | ||
1209 | |||
1210 | esp = (struct esp *) host->hostdata; | ||
1211 | switch (esp->erev) { | ||
1212 | case esp100: | ||
1213 | return "Sparc ESP100 (NCR53C90)"; | ||
1214 | case esp100a: | ||
1215 | return "Sparc ESP100A (NCR53C90A)"; | ||
1216 | case esp236: | ||
1217 | return "Sparc ESP236"; | ||
1218 | case fas236: | ||
1219 | return "Sparc ESP236-FAST"; | ||
1220 | case fashme: | ||
1221 | return "Sparc ESP366-HME"; | ||
1222 | case fas100a: | ||
1223 | return "Sparc ESP100A-FAST"; | ||
1224 | default: | ||
1225 | return "Bogon ESP revision"; | ||
1226 | }; | ||
1227 | } | ||
1228 | |||
1229 | /* From Wolfgang Stanglmeier's NCR scsi driver. */ | ||
1230 | struct info_str | ||
1231 | { | ||
1232 | char *buffer; | ||
1233 | int length; | ||
1234 | int offset; | ||
1235 | int pos; | ||
1236 | }; | ||
1237 | |||
1238 | static void copy_mem_info(struct info_str *info, char *data, int len) | ||
1239 | { | ||
1240 | if (info->pos + len > info->length) | ||
1241 | len = info->length - info->pos; | ||
1242 | |||
1243 | if (info->pos + len < info->offset) { | ||
1244 | info->pos += len; | ||
1245 | return; | ||
1246 | } | ||
1247 | if (info->pos < info->offset) { | ||
1248 | data += (info->offset - info->pos); | ||
1249 | len -= (info->offset - info->pos); | ||
1250 | } | ||
1251 | |||
1252 | if (len > 0) { | ||
1253 | memcpy(info->buffer + info->pos, data, len); | ||
1254 | info->pos += len; | ||
1255 | } | ||
1256 | } | ||
1257 | |||
1258 | static int copy_info(struct info_str *info, char *fmt, ...) | ||
1259 | { | ||
1260 | va_list args; | ||
1261 | char buf[81]; | ||
1262 | int len; | ||
1263 | |||
1264 | va_start(args, fmt); | ||
1265 | len = vsprintf(buf, fmt, args); | ||
1266 | va_end(args); | ||
1267 | |||
1268 | copy_mem_info(info, buf, len); | ||
1269 | return len; | ||
1270 | } | ||
1271 | |||
1272 | static int esp_host_info(struct esp *esp, char *ptr, off_t offset, int len) | ||
1273 | { | ||
1274 | struct scsi_device *sdev; | ||
1275 | struct info_str info; | ||
1276 | int i; | ||
1277 | |||
1278 | info.buffer = ptr; | ||
1279 | info.length = len; | ||
1280 | info.offset = offset; | ||
1281 | info.pos = 0; | ||
1282 | |||
1283 | copy_info(&info, "Sparc ESP Host Adapter:\n"); | ||
1284 | copy_info(&info, "\tPROM node\t\t%08x\n", (unsigned int) esp->prom_node); | ||
1285 | copy_info(&info, "\tPROM name\t\t%s\n", esp->prom_name); | ||
1286 | copy_info(&info, "\tESP Model\t\t"); | ||
1287 | switch (esp->erev) { | ||
1288 | case esp100: | ||
1289 | copy_info(&info, "ESP100\n"); | ||
1290 | break; | ||
1291 | case esp100a: | ||
1292 | copy_info(&info, "ESP100A\n"); | ||
1293 | break; | ||
1294 | case esp236: | ||
1295 | copy_info(&info, "ESP236\n"); | ||
1296 | break; | ||
1297 | case fas236: | ||
1298 | copy_info(&info, "FAS236\n"); | ||
1299 | break; | ||
1300 | case fas100a: | ||
1301 | copy_info(&info, "FAS100A\n"); | ||
1302 | break; | ||
1303 | case fast: | ||
1304 | copy_info(&info, "FAST\n"); | ||
1305 | break; | ||
1306 | case fashme: | ||
1307 | copy_info(&info, "Happy Meal FAS\n"); | ||
1308 | break; | ||
1309 | case espunknown: | ||
1310 | default: | ||
1311 | copy_info(&info, "Unknown!\n"); | ||
1312 | break; | ||
1313 | }; | ||
1314 | copy_info(&info, "\tDMA Revision\t\t"); | ||
1315 | switch (esp->dma->revision) { | ||
1316 | case dvmarev0: | ||
1317 | copy_info(&info, "Rev 0\n"); | ||
1318 | break; | ||
1319 | case dvmaesc1: | ||
1320 | copy_info(&info, "ESC Rev 1\n"); | ||
1321 | break; | ||
1322 | case dvmarev1: | ||
1323 | copy_info(&info, "Rev 1\n"); | ||
1324 | break; | ||
1325 | case dvmarev2: | ||
1326 | copy_info(&info, "Rev 2\n"); | ||
1327 | break; | ||
1328 | case dvmarev3: | ||
1329 | copy_info(&info, "Rev 3\n"); | ||
1330 | break; | ||
1331 | case dvmarevplus: | ||
1332 | copy_info(&info, "Rev 1+\n"); | ||
1333 | break; | ||
1334 | case dvmahme: | ||
1335 | copy_info(&info, "Rev HME/FAS\n"); | ||
1336 | break; | ||
1337 | default: | ||
1338 | copy_info(&info, "Unknown!\n"); | ||
1339 | break; | ||
1340 | }; | ||
1341 | copy_info(&info, "\tLive Targets\t\t[ "); | ||
1342 | for (i = 0; i < 15; i++) { | ||
1343 | if (esp->targets_present & (1 << i)) | ||
1344 | copy_info(&info, "%d ", i); | ||
1345 | } | ||
1346 | copy_info(&info, "]\n\n"); | ||
1347 | |||
1348 | /* Now describe the state of each existing target. */ | ||
1349 | copy_info(&info, "Target #\tconfig3\t\tSync Capabilities\tDisconnect\tWide\n"); | ||
1350 | |||
1351 | shost_for_each_device(sdev, esp->ehost) { | ||
1352 | struct esp_device *esp_dev = sdev->hostdata; | ||
1353 | uint id = sdev->id; | ||
1354 | |||
1355 | if (!(esp->targets_present & (1 << id))) | ||
1356 | continue; | ||
1357 | |||
1358 | copy_info(&info, "%d\t\t", id); | ||
1359 | copy_info(&info, "%08lx\t", esp->config3[id]); | ||
1360 | copy_info(&info, "[%02lx,%02lx]\t\t\t", | ||
1361 | esp_dev->sync_max_offset, | ||
1362 | esp_dev->sync_min_period); | ||
1363 | copy_info(&info, "%s\t\t", | ||
1364 | esp_dev->disconnect ? "yes" : "no"); | ||
1365 | copy_info(&info, "%s\n", | ||
1366 | (esp->config3[id] & ESP_CONFIG3_EWIDE) ? "yes" : "no"); | ||
1367 | } | ||
1368 | return info.pos > info.offset? info.pos - info.offset : 0; | ||
1369 | } | ||
1370 | |||
1371 | /* ESP proc filesystem code. */ | ||
1372 | static int esp_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, | ||
1373 | int length, int inout) | ||
1374 | { | ||
1375 | struct esp *esp = (struct esp *) host->hostdata; | ||
1376 | |||
1377 | if (inout) | ||
1378 | return -EINVAL; /* not yet */ | ||
1379 | |||
1380 | if (start) | ||
1381 | *start = buffer; | ||
1382 | |||
1383 | return esp_host_info(esp, buffer, offset, length); | ||
1384 | } | ||
1385 | |||
1386 | static void esp_get_dmabufs(struct esp *esp, struct scsi_cmnd *sp) | ||
1387 | { | ||
1388 | if (sp->use_sg == 0) { | ||
1389 | sp->SCp.this_residual = sp->request_bufflen; | ||
1390 | sp->SCp.buffer = (struct scatterlist *) sp->request_buffer; | ||
1391 | sp->SCp.buffers_residual = 0; | ||
1392 | if (sp->request_bufflen) { | ||
1393 | sp->SCp.have_data_in = sbus_map_single(esp->sdev, sp->SCp.buffer, | ||
1394 | sp->SCp.this_residual, | ||
1395 | sp->sc_data_direction); | ||
1396 | sp->SCp.ptr = (char *) ((unsigned long)sp->SCp.have_data_in); | ||
1397 | } else { | ||
1398 | sp->SCp.ptr = NULL; | ||
1399 | } | ||
1400 | } else { | ||
1401 | sp->SCp.buffer = (struct scatterlist *) sp->request_buffer; | ||
1402 | sp->SCp.buffers_residual = sbus_map_sg(esp->sdev, | ||
1403 | sp->SCp.buffer, | ||
1404 | sp->use_sg, | ||
1405 | sp->sc_data_direction); | ||
1406 | sp->SCp.this_residual = sg_dma_len(sp->SCp.buffer); | ||
1407 | sp->SCp.ptr = (char *) ((unsigned long)sg_dma_address(sp->SCp.buffer)); | ||
1408 | } | ||
1409 | } | ||
1410 | |||
1411 | static void esp_release_dmabufs(struct esp *esp, struct scsi_cmnd *sp) | ||
1412 | { | ||
1413 | if (sp->use_sg) { | ||
1414 | sbus_unmap_sg(esp->sdev, sp->request_buffer, sp->use_sg, | ||
1415 | sp->sc_data_direction); | ||
1416 | } else if (sp->request_bufflen) { | ||
1417 | sbus_unmap_single(esp->sdev, | ||
1418 | sp->SCp.have_data_in, | ||
1419 | sp->request_bufflen, | ||
1420 | sp->sc_data_direction); | ||
1421 | } | ||
1422 | } | ||
1423 | |||
1424 | static void esp_restore_pointers(struct esp *esp, struct scsi_cmnd *sp) | ||
1425 | { | ||
1426 | struct esp_pointers *ep = &esp->data_pointers[sp->device->id]; | ||
1427 | |||
1428 | sp->SCp.ptr = ep->saved_ptr; | ||
1429 | sp->SCp.buffer = ep->saved_buffer; | ||
1430 | sp->SCp.this_residual = ep->saved_this_residual; | ||
1431 | sp->SCp.buffers_residual = ep->saved_buffers_residual; | ||
1432 | } | ||
1433 | |||
1434 | static void esp_save_pointers(struct esp *esp, struct scsi_cmnd *sp) | ||
1435 | { | ||
1436 | struct esp_pointers *ep = &esp->data_pointers[sp->device->id]; | ||
1437 | |||
1438 | ep->saved_ptr = sp->SCp.ptr; | ||
1439 | ep->saved_buffer = sp->SCp.buffer; | ||
1440 | ep->saved_this_residual = sp->SCp.this_residual; | ||
1441 | ep->saved_buffers_residual = sp->SCp.buffers_residual; | ||
1442 | } | ||
1443 | |||
1444 | /* Some rules: | ||
1445 | * | ||
1446 | * 1) Never ever panic while something is live on the bus. | ||
1447 | * If there is to be any chance of syncing the disks this | ||
1448 | * rule is to be obeyed. | ||
1449 | * | ||
1450 | * 2) Any target that causes a foul condition will no longer | ||
1451 | * have synchronous transfers done to it, no questions | ||
1452 | * asked. | ||
1453 | * | ||
1454 | * 3) Keep register accesses to a minimum. Think about some | ||
1455 | * day when we have Xbus machines this is running on and | ||
1456 | * the ESP chip is on the other end of the machine on a | ||
1457 | * different board from the cpu where this is running. | ||
1458 | */ | ||
1459 | |||
1460 | /* Fire off a command. We assume the bus is free and that the only | ||
1461 | * case where we could see an interrupt is where we have disconnected | ||
1462 | * commands active and they are trying to reselect us. | ||
1463 | */ | ||
1464 | static inline void esp_check_cmd(struct esp *esp, struct scsi_cmnd *sp) | ||
1465 | { | ||
1466 | switch (sp->cmd_len) { | ||
1467 | case 6: | ||
1468 | case 10: | ||
1469 | case 12: | ||
1470 | esp->esp_slowcmd = 0; | ||
1471 | break; | ||
1472 | |||
1473 | default: | ||
1474 | esp->esp_slowcmd = 1; | ||
1475 | esp->esp_scmdleft = sp->cmd_len; | ||
1476 | esp->esp_scmdp = &sp->cmnd[0]; | ||
1477 | break; | ||
1478 | }; | ||
1479 | } | ||
1480 | |||
1481 | static inline void build_sync_nego_msg(struct esp *esp, int period, int offset) | ||
1482 | { | ||
1483 | esp->cur_msgout[0] = EXTENDED_MESSAGE; | ||
1484 | esp->cur_msgout[1] = 3; | ||
1485 | esp->cur_msgout[2] = EXTENDED_SDTR; | ||
1486 | esp->cur_msgout[3] = period; | ||
1487 | esp->cur_msgout[4] = offset; | ||
1488 | esp->msgout_len = 5; | ||
1489 | } | ||
1490 | |||
1491 | /* SIZE is in bits, currently HME only supports 16 bit wide transfers. */ | ||
1492 | static inline void build_wide_nego_msg(struct esp *esp, int size) | ||
1493 | { | ||
1494 | esp->cur_msgout[0] = EXTENDED_MESSAGE; | ||
1495 | esp->cur_msgout[1] = 2; | ||
1496 | esp->cur_msgout[2] = EXTENDED_WDTR; | ||
1497 | switch (size) { | ||
1498 | case 32: | ||
1499 | esp->cur_msgout[3] = 2; | ||
1500 | break; | ||
1501 | case 16: | ||
1502 | esp->cur_msgout[3] = 1; | ||
1503 | break; | ||
1504 | case 8: | ||
1505 | default: | ||
1506 | esp->cur_msgout[3] = 0; | ||
1507 | break; | ||
1508 | }; | ||
1509 | |||
1510 | esp->msgout_len = 4; | ||
1511 | } | ||
1512 | |||
1513 | static void esp_exec_cmd(struct esp *esp) | ||
1514 | { | ||
1515 | struct scsi_cmnd *SCptr; | ||
1516 | struct scsi_device *SDptr; | ||
1517 | struct esp_device *esp_dev; | ||
1518 | volatile u8 *cmdp = esp->esp_command; | ||
1519 | u8 the_esp_command; | ||
1520 | int lun, target; | ||
1521 | int i; | ||
1522 | |||
1523 | /* Hold off if we have disconnected commands and | ||
1524 | * an IRQ is showing... | ||
1525 | */ | ||
1526 | if (esp->disconnected_SC && ESP_IRQ_P(esp->dregs)) | ||
1527 | return; | ||
1528 | |||
1529 | /* Grab first member of the issue queue. */ | ||
1530 | SCptr = esp->current_SC = remove_first_SC(&esp->issue_SC); | ||
1531 | |||
1532 | /* Safe to panic here because current_SC is null. */ | ||
1533 | if (!SCptr) | ||
1534 | panic("esp: esp_exec_cmd and issue queue is NULL"); | ||
1535 | |||
1536 | SDptr = SCptr->device; | ||
1537 | esp_dev = SDptr->hostdata; | ||
1538 | lun = SCptr->device->lun; | ||
1539 | target = SCptr->device->id; | ||
1540 | |||
1541 | esp->snip = 0; | ||
1542 | esp->msgout_len = 0; | ||
1543 | |||
1544 | /* Send it out whole, or piece by piece? The ESP | ||
1545 | * only knows how to automatically send out 6, 10, | ||
1546 | * and 12 byte commands. I used to think that the | ||
1547 | * Linux SCSI code would never throw anything other | ||
1548 | * than that to us, but then again there is the | ||
1549 | * SCSI generic driver which can send us anything. | ||
1550 | */ | ||
1551 | esp_check_cmd(esp, SCptr); | ||
1552 | |||
1553 | /* If arbitration/selection is successful, the ESP will leave | ||
1554 | * ATN asserted, causing the target to go into message out | ||
1555 | * phase. The ESP will feed the target the identify and then | ||
1556 | * the target can only legally go to one of command, | ||
1557 | * datain/out, status, or message in phase, or stay in message | ||
1558 | * out phase (should we be trying to send a sync negotiation | ||
1559 | * message after the identify). It is not allowed to drop | ||
1560 | * BSY, but some buggy targets do and we check for this | ||
1561 | * condition in the selection complete code. Most of the time | ||
1562 | * we'll make the command bytes available to the ESP and it | ||
1563 | * will not interrupt us until it finishes command phase, we | ||
1564 | * cannot do this for command sizes the ESP does not | ||
1565 | * understand and in this case we'll get interrupted right | ||
1566 | * when the target goes into command phase. | ||
1567 | * | ||
1568 | * It is absolutely _illegal_ in the presence of SCSI-2 devices | ||
1569 | * to use the ESP select w/o ATN command. When SCSI-2 devices are | ||
1570 | * present on the bus we _must_ always go straight to message out | ||
1571 | * phase with an identify message for the target. Being that | ||
1572 | * selection attempts in SCSI-1 w/o ATN was an option, doing SCSI-2 | ||
1573 | * selections should not confuse SCSI-1 we hope. | ||
1574 | */ | ||
1575 | |||
1576 | if (esp_dev->sync) { | ||
1577 | /* this targets sync is known */ | ||
1578 | #ifndef __sparc_v9__ | ||
1579 | do_sync_known: | ||
1580 | #endif | ||
1581 | if (esp_dev->disconnect) | ||
1582 | *cmdp++ = IDENTIFY(1, lun); | ||
1583 | else | ||
1584 | *cmdp++ = IDENTIFY(0, lun); | ||
1585 | |||
1586 | if (esp->esp_slowcmd) { | ||
1587 | the_esp_command = (ESP_CMD_SELAS | ESP_CMD_DMA); | ||
1588 | esp_advance_phase(SCptr, in_slct_stop); | ||
1589 | } else { | ||
1590 | the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA); | ||
1591 | esp_advance_phase(SCptr, in_slct_norm); | ||
1592 | } | ||
1593 | } else if (!(esp->targets_present & (1<<target)) || !(esp_dev->disconnect)) { | ||
1594 | /* After the bootup SCSI code sends both the | ||
1595 | * TEST_UNIT_READY and INQUIRY commands we want | ||
1596 | * to at least attempt allowing the device to | ||
1597 | * disconnect. | ||
1598 | */ | ||
1599 | ESPMISC(("esp: Selecting device for first time. target=%d " | ||
1600 | "lun=%d\n", target, SCptr->device->lun)); | ||
1601 | if (!SDptr->borken && !esp_dev->disconnect) | ||
1602 | esp_dev->disconnect = 1; | ||
1603 | |||
1604 | *cmdp++ = IDENTIFY(0, lun); | ||
1605 | esp->prevmsgout = NOP; | ||
1606 | esp_advance_phase(SCptr, in_slct_norm); | ||
1607 | the_esp_command = (ESP_CMD_SELA | ESP_CMD_DMA); | ||
1608 | |||
1609 | /* Take no chances... */ | ||
1610 | esp_dev->sync_max_offset = 0; | ||
1611 | esp_dev->sync_min_period = 0; | ||
1612 | } else { | ||
1613 | /* Sorry, I have had way too many problems with | ||
1614 | * various CDROM devices on ESP. -DaveM | ||
1615 | */ | ||
1616 | int cdrom_hwbug_wkaround = 0; | ||
1617 | |||
1618 | #ifndef __sparc_v9__ | ||
1619 | /* Never allow disconnects or synchronous transfers on | ||
1620 | * SparcStation1 and SparcStation1+. Allowing those | ||
1621 | * to be enabled seems to lockup the machine completely. | ||
1622 | */ | ||
1623 | if ((idprom->id_machtype == (SM_SUN4C | SM_4C_SS1)) || | ||
1624 | (idprom->id_machtype == (SM_SUN4C | SM_4C_SS1PLUS))) { | ||
1625 | /* But we are nice and allow tapes and removable | ||
1626 | * disks (but not CDROMs) to disconnect. | ||
1627 | */ | ||
1628 | if(SDptr->type == TYPE_TAPE || | ||
1629 | (SDptr->type != TYPE_ROM && SDptr->removable)) | ||
1630 | esp_dev->disconnect = 1; | ||
1631 | else | ||
1632 | esp_dev->disconnect = 0; | ||
1633 | esp_dev->sync_max_offset = 0; | ||
1634 | esp_dev->sync_min_period = 0; | ||
1635 | esp_dev->sync = 1; | ||
1636 | esp->snip = 0; | ||
1637 | goto do_sync_known; | ||
1638 | } | ||
1639 | #endif /* !(__sparc_v9__) */ | ||
1640 | |||
1641 | /* We've talked to this guy before, | ||
1642 | * but never negotiated. Let's try, | ||
1643 | * need to attempt WIDE first, before | ||
1644 | * sync nego, as per SCSI 2 standard. | ||
1645 | */ | ||
1646 | if (esp->erev == fashme && !esp_dev->wide) { | ||
1647 | if (!SDptr->borken && | ||
1648 | SDptr->type != TYPE_ROM && | ||
1649 | SDptr->removable == 0) { | ||
1650 | build_wide_nego_msg(esp, 16); | ||
1651 | esp_dev->wide = 1; | ||
1652 | esp->wnip = 1; | ||
1653 | goto after_nego_msg_built; | ||
1654 | } else { | ||
1655 | esp_dev->wide = 1; | ||
1656 | /* Fall through and try sync. */ | ||
1657 | } | ||
1658 | } | ||
1659 | |||
1660 | if (!SDptr->borken) { | ||
1661 | if ((SDptr->type == TYPE_ROM)) { | ||
1662 | /* Nice try sucker... */ | ||
1663 | ESPMISC(("esp%d: Disabling sync for buggy " | ||
1664 | "CDROM.\n", esp->esp_id)); | ||
1665 | cdrom_hwbug_wkaround = 1; | ||
1666 | build_sync_nego_msg(esp, 0, 0); | ||
1667 | } else if (SDptr->removable != 0) { | ||
1668 | ESPMISC(("esp%d: Not negotiating sync/wide but " | ||
1669 | "allowing disconnect for removable media.\n", | ||
1670 | esp->esp_id)); | ||
1671 | build_sync_nego_msg(esp, 0, 0); | ||
1672 | } else { | ||
1673 | build_sync_nego_msg(esp, esp->sync_defp, 15); | ||
1674 | } | ||
1675 | } else { | ||
1676 | build_sync_nego_msg(esp, 0, 0); | ||
1677 | } | ||
1678 | esp_dev->sync = 1; | ||
1679 | esp->snip = 1; | ||
1680 | |||
1681 | after_nego_msg_built: | ||
1682 | /* A fix for broken SCSI1 targets, when they disconnect | ||
1683 | * they lock up the bus and confuse ESP. So disallow | ||
1684 | * disconnects for SCSI1 targets for now until we | ||
1685 | * find a better fix. | ||
1686 | * | ||
1687 | * Addendum: This is funny, I figured out what was going | ||
1688 | * on. The blotzed SCSI1 target would disconnect, | ||
1689 | * one of the other SCSI2 targets or both would be | ||
1690 | * disconnected as well. The SCSI1 target would | ||
1691 | * stay disconnected long enough that we start | ||
1692 | * up a command on one of the SCSI2 targets. As | ||
1693 | * the ESP is arbitrating for the bus the SCSI1 | ||
1694 | * target begins to arbitrate as well to reselect | ||
1695 | * the ESP. The SCSI1 target refuses to drop it's | ||
1696 | * ID bit on the data bus even though the ESP is | ||
1697 | * at ID 7 and is the obvious winner for any | ||
1698 | * arbitration. The ESP is a poor sport and refuses | ||
1699 | * to lose arbitration, it will continue indefinitely | ||
1700 | * trying to arbitrate for the bus and can only be | ||
1701 | * stopped via a chip reset or SCSI bus reset. | ||
1702 | * Therefore _no_ disconnects for SCSI1 targets | ||
1703 | * thank you very much. ;-) | ||
1704 | */ | ||
1705 | if(((SDptr->scsi_level < 3) && | ||
1706 | (SDptr->type != TYPE_TAPE) && | ||
1707 | SDptr->removable == 0) || | ||
1708 | cdrom_hwbug_wkaround || SDptr->borken) { | ||
1709 | ESPMISC((KERN_INFO "esp%d: Disabling DISCONNECT for target %d " | ||
1710 | "lun %d\n", esp->esp_id, SCptr->device->id, SCptr->device->lun)); | ||
1711 | esp_dev->disconnect = 0; | ||
1712 | *cmdp++ = IDENTIFY(0, lun); | ||
1713 | } else { | ||
1714 | *cmdp++ = IDENTIFY(1, lun); | ||
1715 | } | ||
1716 | |||
1717 | /* ESP fifo is only so big... | ||
1718 | * Make this look like a slow command. | ||
1719 | */ | ||
1720 | esp->esp_slowcmd = 1; | ||
1721 | esp->esp_scmdleft = SCptr->cmd_len; | ||
1722 | esp->esp_scmdp = &SCptr->cmnd[0]; | ||
1723 | |||
1724 | the_esp_command = (ESP_CMD_SELAS | ESP_CMD_DMA); | ||
1725 | esp_advance_phase(SCptr, in_slct_msg); | ||
1726 | } | ||
1727 | |||
1728 | if (!esp->esp_slowcmd) | ||
1729 | for (i = 0; i < SCptr->cmd_len; i++) | ||
1730 | *cmdp++ = SCptr->cmnd[i]; | ||
1731 | |||
1732 | /* HME sucks... */ | ||
1733 | if (esp->erev == fashme) | ||
1734 | sbus_writeb((target & 0xf) | (ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT), | ||
1735 | esp->eregs + ESP_BUSID); | ||
1736 | else | ||
1737 | sbus_writeb(target & 7, esp->eregs + ESP_BUSID); | ||
1738 | if (esp->prev_soff != esp_dev->sync_max_offset || | ||
1739 | esp->prev_stp != esp_dev->sync_min_period || | ||
1740 | (esp->erev > esp100a && | ||
1741 | esp->prev_cfg3 != esp->config3[target])) { | ||
1742 | esp->prev_soff = esp_dev->sync_max_offset; | ||
1743 | esp->prev_stp = esp_dev->sync_min_period; | ||
1744 | sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF); | ||
1745 | sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP); | ||
1746 | if (esp->erev > esp100a) { | ||
1747 | esp->prev_cfg3 = esp->config3[target]; | ||
1748 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
1749 | } | ||
1750 | } | ||
1751 | i = (cmdp - esp->esp_command); | ||
1752 | |||
1753 | if (esp->erev == fashme) { | ||
1754 | esp_cmd(esp, ESP_CMD_FLUSH); /* Grrr! */ | ||
1755 | |||
1756 | /* Set up the DMA and HME counters */ | ||
1757 | sbus_writeb(i, esp->eregs + ESP_TCLOW); | ||
1758 | sbus_writeb(0, esp->eregs + ESP_TCMED); | ||
1759 | sbus_writeb(0, esp->eregs + FAS_RLO); | ||
1760 | sbus_writeb(0, esp->eregs + FAS_RHI); | ||
1761 | esp_cmd(esp, the_esp_command); | ||
1762 | |||
1763 | /* Talk about touchy hardware... */ | ||
1764 | esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr | | ||
1765 | (DMA_SCSI_DISAB | DMA_ENABLE)) & | ||
1766 | ~(DMA_ST_WRITE)); | ||
1767 | sbus_writel(16, esp->dregs + DMA_COUNT); | ||
1768 | sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR); | ||
1769 | sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR); | ||
1770 | } else { | ||
1771 | u32 tmp; | ||
1772 | |||
1773 | /* Set up the DMA and ESP counters */ | ||
1774 | sbus_writeb(i, esp->eregs + ESP_TCLOW); | ||
1775 | sbus_writeb(0, esp->eregs + ESP_TCMED); | ||
1776 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
1777 | tmp &= ~DMA_ST_WRITE; | ||
1778 | tmp |= DMA_ENABLE; | ||
1779 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
1780 | if (esp->dma->revision == dvmaesc1) { | ||
1781 | if (i) /* Workaround ESC gate array SBUS rerun bug. */ | ||
1782 | sbus_writel(PAGE_SIZE, esp->dregs + DMA_COUNT); | ||
1783 | } | ||
1784 | sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR); | ||
1785 | |||
1786 | /* Tell ESP to "go". */ | ||
1787 | esp_cmd(esp, the_esp_command); | ||
1788 | } | ||
1789 | } | ||
1790 | |||
1791 | /* Queue a SCSI command delivered from the mid-level Linux SCSI code. */ | ||
1792 | static int esp_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *)) | ||
1793 | { | ||
1794 | struct esp *esp; | ||
1795 | |||
1796 | /* Set up func ptr and initial driver cmd-phase. */ | ||
1797 | SCpnt->scsi_done = done; | ||
1798 | SCpnt->SCp.phase = not_issued; | ||
1799 | |||
1800 | /* We use the scratch area. */ | ||
1801 | ESPQUEUE(("esp_queue: target=%d lun=%d ", SCpnt->device->id, SCpnt->device->lun)); | ||
1802 | ESPDISC(("N<%02x,%02x>", SCpnt->device->id, SCpnt->device->lun)); | ||
1803 | |||
1804 | esp = (struct esp *) SCpnt->device->host->hostdata; | ||
1805 | esp_get_dmabufs(esp, SCpnt); | ||
1806 | esp_save_pointers(esp, SCpnt); /* FIXME for tag queueing */ | ||
1807 | |||
1808 | SCpnt->SCp.Status = CHECK_CONDITION; | ||
1809 | SCpnt->SCp.Message = 0xff; | ||
1810 | SCpnt->SCp.sent_command = 0; | ||
1811 | |||
1812 | /* Place into our queue. */ | ||
1813 | if (SCpnt->cmnd[0] == REQUEST_SENSE) { | ||
1814 | ESPQUEUE(("RQSENSE\n")); | ||
1815 | prepend_SC(&esp->issue_SC, SCpnt); | ||
1816 | } else { | ||
1817 | ESPQUEUE(("\n")); | ||
1818 | append_SC(&esp->issue_SC, SCpnt); | ||
1819 | } | ||
1820 | |||
1821 | /* Run it now if we can. */ | ||
1822 | if (!esp->current_SC && !esp->resetting_bus) | ||
1823 | esp_exec_cmd(esp); | ||
1824 | |||
1825 | return 0; | ||
1826 | } | ||
1827 | |||
1828 | /* Dump driver state. */ | ||
1829 | static void esp_dump_cmd(struct scsi_cmnd *SCptr) | ||
1830 | { | ||
1831 | ESPLOG(("[tgt<%02x> lun<%02x> " | ||
1832 | "pphase<%s> cphase<%s>]", | ||
1833 | SCptr->device->id, SCptr->device->lun, | ||
1834 | phase_string(SCptr->SCp.sent_command), | ||
1835 | phase_string(SCptr->SCp.phase))); | ||
1836 | } | ||
1837 | |||
1838 | static void esp_dump_state(struct esp *esp) | ||
1839 | { | ||
1840 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
1841 | #ifdef DEBUG_ESP_CMDS | ||
1842 | int i; | ||
1843 | #endif | ||
1844 | |||
1845 | ESPLOG(("esp%d: dumping state\n", esp->esp_id)); | ||
1846 | ESPLOG(("esp%d: dma -- cond_reg<%08x> addr<%08x>\n", | ||
1847 | esp->esp_id, | ||
1848 | sbus_readl(esp->dregs + DMA_CSR), | ||
1849 | sbus_readl(esp->dregs + DMA_ADDR))); | ||
1850 | ESPLOG(("esp%d: SW [sreg<%02x> sstep<%02x> ireg<%02x>]\n", | ||
1851 | esp->esp_id, esp->sreg, esp->seqreg, esp->ireg)); | ||
1852 | ESPLOG(("esp%d: HW reread [sreg<%02x> sstep<%02x> ireg<%02x>]\n", | ||
1853 | esp->esp_id, | ||
1854 | sbus_readb(esp->eregs + ESP_STATUS), | ||
1855 | sbus_readb(esp->eregs + ESP_SSTEP), | ||
1856 | sbus_readb(esp->eregs + ESP_INTRPT))); | ||
1857 | #ifdef DEBUG_ESP_CMDS | ||
1858 | printk("esp%d: last ESP cmds [", esp->esp_id); | ||
1859 | i = (esp->espcmdent - 1) & 31; | ||
1860 | printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">"); | ||
1861 | i = (i - 1) & 31; | ||
1862 | printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">"); | ||
1863 | i = (i - 1) & 31; | ||
1864 | printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">"); | ||
1865 | i = (i - 1) & 31; | ||
1866 | printk("<"); esp_print_cmd(esp->espcmdlog[i]); printk(">"); | ||
1867 | printk("]\n"); | ||
1868 | #endif /* (DEBUG_ESP_CMDS) */ | ||
1869 | |||
1870 | if (SCptr) { | ||
1871 | ESPLOG(("esp%d: current command ", esp->esp_id)); | ||
1872 | esp_dump_cmd(SCptr); | ||
1873 | } | ||
1874 | ESPLOG(("\n")); | ||
1875 | SCptr = esp->disconnected_SC; | ||
1876 | ESPLOG(("esp%d: disconnected ", esp->esp_id)); | ||
1877 | while (SCptr) { | ||
1878 | esp_dump_cmd(SCptr); | ||
1879 | SCptr = (struct scsi_cmnd *) SCptr->host_scribble; | ||
1880 | } | ||
1881 | ESPLOG(("\n")); | ||
1882 | } | ||
1883 | |||
1884 | /* Abort a command. The host_lock is acquired by caller. */ | ||
1885 | static int esp_abort(struct scsi_cmnd *SCptr) | ||
1886 | { | ||
1887 | struct esp *esp = (struct esp *) SCptr->device->host->hostdata; | ||
1888 | int don; | ||
1889 | |||
1890 | ESPLOG(("esp%d: Aborting command\n", esp->esp_id)); | ||
1891 | esp_dump_state(esp); | ||
1892 | |||
1893 | /* Wheee, if this is the current command on the bus, the | ||
1894 | * best we can do is assert ATN and wait for msgout phase. | ||
1895 | * This should even fix a hung SCSI bus when we lose state | ||
1896 | * in the driver and timeout because the eventual phase change | ||
1897 | * will cause the ESP to (eventually) give an interrupt. | ||
1898 | */ | ||
1899 | if (esp->current_SC == SCptr) { | ||
1900 | esp->cur_msgout[0] = ABORT; | ||
1901 | esp->msgout_len = 1; | ||
1902 | esp->msgout_ctr = 0; | ||
1903 | esp_cmd(esp, ESP_CMD_SATN); | ||
1904 | return SUCCESS; | ||
1905 | } | ||
1906 | |||
1907 | /* If it is still in the issue queue then we can safely | ||
1908 | * call the completion routine and report abort success. | ||
1909 | */ | ||
1910 | don = (sbus_readl(esp->dregs + DMA_CSR) & DMA_INT_ENAB); | ||
1911 | if (don) { | ||
1912 | ESP_INTSOFF(esp->dregs); | ||
1913 | } | ||
1914 | if (esp->issue_SC) { | ||
1915 | struct scsi_cmnd **prev, *this; | ||
1916 | for (prev = (&esp->issue_SC), this = esp->issue_SC; | ||
1917 | this != NULL; | ||
1918 | prev = (struct scsi_cmnd **) &(this->host_scribble), | ||
1919 | this = (struct scsi_cmnd *) this->host_scribble) { | ||
1920 | |||
1921 | if (this == SCptr) { | ||
1922 | *prev = (struct scsi_cmnd *) this->host_scribble; | ||
1923 | this->host_scribble = NULL; | ||
1924 | |||
1925 | esp_release_dmabufs(esp, this); | ||
1926 | this->result = DID_ABORT << 16; | ||
1927 | this->scsi_done(this); | ||
1928 | |||
1929 | if (don) | ||
1930 | ESP_INTSON(esp->dregs); | ||
1931 | |||
1932 | return SUCCESS; | ||
1933 | } | ||
1934 | } | ||
1935 | } | ||
1936 | |||
1937 | /* Yuck, the command to abort is disconnected, it is not | ||
1938 | * worth trying to abort it now if something else is live | ||
1939 | * on the bus at this time. So, we let the SCSI code wait | ||
1940 | * a little bit and try again later. | ||
1941 | */ | ||
1942 | if (esp->current_SC) { | ||
1943 | if (don) | ||
1944 | ESP_INTSON(esp->dregs); | ||
1945 | return FAILED; | ||
1946 | } | ||
1947 | |||
1948 | /* It's disconnected, we have to reconnect to re-establish | ||
1949 | * the nexus and tell the device to abort. However, we really | ||
1950 | * cannot 'reconnect' per se. Don't try to be fancy, just | ||
1951 | * indicate failure, which causes our caller to reset the whole | ||
1952 | * bus. | ||
1953 | */ | ||
1954 | |||
1955 | if (don) | ||
1956 | ESP_INTSON(esp->dregs); | ||
1957 | |||
1958 | return FAILED; | ||
1959 | } | ||
1960 | |||
1961 | /* We've sent ESP_CMD_RS to the ESP, the interrupt had just | ||
1962 | * arrived indicating the end of the SCSI bus reset. Our job | ||
1963 | * is to clean out the command queues and begin re-execution | ||
1964 | * of SCSI commands once more. | ||
1965 | */ | ||
1966 | static int esp_finish_reset(struct esp *esp) | ||
1967 | { | ||
1968 | struct scsi_cmnd *sp = esp->current_SC; | ||
1969 | |||
1970 | /* Clean up currently executing command, if any. */ | ||
1971 | if (sp != NULL) { | ||
1972 | esp->current_SC = NULL; | ||
1973 | |||
1974 | esp_release_dmabufs(esp, sp); | ||
1975 | sp->result = (DID_RESET << 16); | ||
1976 | |||
1977 | sp->scsi_done(sp); | ||
1978 | } | ||
1979 | |||
1980 | /* Clean up disconnected queue, they have been invalidated | ||
1981 | * by the bus reset. | ||
1982 | */ | ||
1983 | if (esp->disconnected_SC) { | ||
1984 | while ((sp = remove_first_SC(&esp->disconnected_SC)) != NULL) { | ||
1985 | esp_release_dmabufs(esp, sp); | ||
1986 | sp->result = (DID_RESET << 16); | ||
1987 | |||
1988 | sp->scsi_done(sp); | ||
1989 | } | ||
1990 | } | ||
1991 | |||
1992 | /* SCSI bus reset is complete. */ | ||
1993 | esp->resetting_bus = 0; | ||
1994 | wake_up(&esp->reset_queue); | ||
1995 | |||
1996 | /* Ok, now it is safe to get commands going once more. */ | ||
1997 | if (esp->issue_SC) | ||
1998 | esp_exec_cmd(esp); | ||
1999 | |||
2000 | return do_intr_end; | ||
2001 | } | ||
2002 | |||
2003 | static int esp_do_resetbus(struct esp *esp) | ||
2004 | { | ||
2005 | ESPLOG(("esp%d: Resetting scsi bus\n", esp->esp_id)); | ||
2006 | esp->resetting_bus = 1; | ||
2007 | esp_cmd(esp, ESP_CMD_RS); | ||
2008 | |||
2009 | return do_intr_end; | ||
2010 | } | ||
2011 | |||
2012 | /* Reset ESP chip, reset hanging bus, then kill active and | ||
2013 | * disconnected commands for targets without soft reset. | ||
2014 | * | ||
2015 | * The host_lock is acquired by caller. | ||
2016 | */ | ||
2017 | static int esp_reset(struct scsi_cmnd *SCptr) | ||
2018 | { | ||
2019 | struct esp *esp = (struct esp *) SCptr->device->host->hostdata; | ||
2020 | |||
2021 | spin_lock_irq(esp->ehost->host_lock); | ||
2022 | (void) esp_do_resetbus(esp); | ||
2023 | spin_unlock_irq(esp->ehost->host_lock); | ||
2024 | |||
2025 | wait_event(esp->reset_queue, (esp->resetting_bus == 0)); | ||
2026 | |||
2027 | return SUCCESS; | ||
2028 | } | ||
2029 | |||
2030 | /* Internal ESP done function. */ | ||
2031 | static void esp_done(struct esp *esp, int error) | ||
2032 | { | ||
2033 | struct scsi_cmnd *done_SC = esp->current_SC; | ||
2034 | |||
2035 | esp->current_SC = NULL; | ||
2036 | |||
2037 | esp_release_dmabufs(esp, done_SC); | ||
2038 | done_SC->result = error; | ||
2039 | |||
2040 | done_SC->scsi_done(done_SC); | ||
2041 | |||
2042 | /* Bus is free, issue any commands in the queue. */ | ||
2043 | if (esp->issue_SC && !esp->current_SC) | ||
2044 | esp_exec_cmd(esp); | ||
2045 | |||
2046 | } | ||
2047 | |||
2048 | /* Wheee, ESP interrupt engine. */ | ||
2049 | |||
2050 | /* Forward declarations. */ | ||
2051 | static int esp_do_phase_determine(struct esp *esp); | ||
2052 | static int esp_do_data_finale(struct esp *esp); | ||
2053 | static int esp_select_complete(struct esp *esp); | ||
2054 | static int esp_do_status(struct esp *esp); | ||
2055 | static int esp_do_msgin(struct esp *esp); | ||
2056 | static int esp_do_msgindone(struct esp *esp); | ||
2057 | static int esp_do_msgout(struct esp *esp); | ||
2058 | static int esp_do_cmdbegin(struct esp *esp); | ||
2059 | |||
2060 | #define sreg_datainp(__sreg) (((__sreg) & ESP_STAT_PMASK) == ESP_DIP) | ||
2061 | #define sreg_dataoutp(__sreg) (((__sreg) & ESP_STAT_PMASK) == ESP_DOP) | ||
2062 | |||
2063 | /* Read any bytes found in the FAS366 fifo, storing them into | ||
2064 | * the ESP driver software state structure. | ||
2065 | */ | ||
2066 | static void hme_fifo_read(struct esp *esp) | ||
2067 | { | ||
2068 | u8 count = 0; | ||
2069 | u8 status = esp->sreg; | ||
2070 | |||
2071 | /* Cannot safely frob the fifo for these following cases, but | ||
2072 | * we must always read the fifo when the reselect interrupt | ||
2073 | * is pending. | ||
2074 | */ | ||
2075 | if (((esp->ireg & ESP_INTR_RSEL) == 0) && | ||
2076 | (sreg_datainp(status) || | ||
2077 | sreg_dataoutp(status) || | ||
2078 | (esp->current_SC && | ||
2079 | esp->current_SC->SCp.phase == in_data_done))) { | ||
2080 | ESPHME(("<wkaround_skipped>")); | ||
2081 | } else { | ||
2082 | unsigned long fcnt = sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES; | ||
2083 | |||
2084 | /* The HME stores bytes in multiples of 2 in the fifo. */ | ||
2085 | ESPHME(("hme_fifo[fcnt=%d", (int)fcnt)); | ||
2086 | while (fcnt) { | ||
2087 | esp->hme_fifo_workaround_buffer[count++] = | ||
2088 | sbus_readb(esp->eregs + ESP_FDATA); | ||
2089 | esp->hme_fifo_workaround_buffer[count++] = | ||
2090 | sbus_readb(esp->eregs + ESP_FDATA); | ||
2091 | ESPHME(("<%02x,%02x>", esp->hme_fifo_workaround_buffer[count-2], esp->hme_fifo_workaround_buffer[count-1])); | ||
2092 | fcnt--; | ||
2093 | } | ||
2094 | if (sbus_readb(esp->eregs + ESP_STATUS2) & ESP_STAT2_F1BYTE) { | ||
2095 | ESPHME(("<poke_byte>")); | ||
2096 | sbus_writeb(0, esp->eregs + ESP_FDATA); | ||
2097 | esp->hme_fifo_workaround_buffer[count++] = | ||
2098 | sbus_readb(esp->eregs + ESP_FDATA); | ||
2099 | ESPHME(("<%02x,0x00>", esp->hme_fifo_workaround_buffer[count-1])); | ||
2100 | ESPHME(("CMD_FLUSH")); | ||
2101 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2102 | } else { | ||
2103 | ESPHME(("no_xtra_byte")); | ||
2104 | } | ||
2105 | } | ||
2106 | ESPHME(("wkarnd_cnt=%d]", (int)count)); | ||
2107 | esp->hme_fifo_workaround_count = count; | ||
2108 | } | ||
2109 | |||
2110 | static inline void hme_fifo_push(struct esp *esp, u8 *bytes, u8 count) | ||
2111 | { | ||
2112 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2113 | while (count) { | ||
2114 | u8 tmp = *bytes++; | ||
2115 | sbus_writeb(tmp, esp->eregs + ESP_FDATA); | ||
2116 | sbus_writeb(0, esp->eregs + ESP_FDATA); | ||
2117 | count--; | ||
2118 | } | ||
2119 | } | ||
2120 | |||
2121 | /* We try to avoid some interrupts by jumping ahead and see if the ESP | ||
2122 | * has gotten far enough yet. Hence the following. | ||
2123 | */ | ||
2124 | static inline int skipahead1(struct esp *esp, struct scsi_cmnd *scp, | ||
2125 | int prev_phase, int new_phase) | ||
2126 | { | ||
2127 | if (scp->SCp.sent_command != prev_phase) | ||
2128 | return 0; | ||
2129 | if (ESP_IRQ_P(esp->dregs)) { | ||
2130 | /* Yes, we are able to save an interrupt. */ | ||
2131 | if (esp->erev == fashme) | ||
2132 | esp->sreg2 = sbus_readb(esp->eregs + ESP_STATUS2); | ||
2133 | esp->sreg = (sbus_readb(esp->eregs + ESP_STATUS) & ~(ESP_STAT_INTR)); | ||
2134 | esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT); | ||
2135 | if (esp->erev == fashme) { | ||
2136 | /* This chip is really losing. */ | ||
2137 | ESPHME(("HME[")); | ||
2138 | /* Must latch fifo before reading the interrupt | ||
2139 | * register else garbage ends up in the FIFO | ||
2140 | * which confuses the driver utterly. | ||
2141 | * Happy Meal indeed.... | ||
2142 | */ | ||
2143 | ESPHME(("fifo_workaround]")); | ||
2144 | if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || | ||
2145 | (esp->sreg2 & ESP_STAT2_F1BYTE)) | ||
2146 | hme_fifo_read(esp); | ||
2147 | } | ||
2148 | if (!(esp->ireg & ESP_INTR_SR)) | ||
2149 | return 0; | ||
2150 | else | ||
2151 | return do_reset_complete; | ||
2152 | } | ||
2153 | /* Ho hum, target is taking forever... */ | ||
2154 | scp->SCp.sent_command = new_phase; /* so we don't recurse... */ | ||
2155 | return do_intr_end; | ||
2156 | } | ||
2157 | |||
2158 | static inline int skipahead2(struct esp *esp, struct scsi_cmnd *scp, | ||
2159 | int prev_phase1, int prev_phase2, int new_phase) | ||
2160 | { | ||
2161 | if (scp->SCp.sent_command != prev_phase1 && | ||
2162 | scp->SCp.sent_command != prev_phase2) | ||
2163 | return 0; | ||
2164 | if (ESP_IRQ_P(esp->dregs)) { | ||
2165 | /* Yes, we are able to save an interrupt. */ | ||
2166 | if (esp->erev == fashme) | ||
2167 | esp->sreg2 = sbus_readb(esp->eregs + ESP_STATUS2); | ||
2168 | esp->sreg = (sbus_readb(esp->eregs + ESP_STATUS) & ~(ESP_STAT_INTR)); | ||
2169 | esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT); | ||
2170 | if (esp->erev == fashme) { | ||
2171 | /* This chip is really losing. */ | ||
2172 | ESPHME(("HME[")); | ||
2173 | |||
2174 | /* Must latch fifo before reading the interrupt | ||
2175 | * register else garbage ends up in the FIFO | ||
2176 | * which confuses the driver utterly. | ||
2177 | * Happy Meal indeed.... | ||
2178 | */ | ||
2179 | ESPHME(("fifo_workaround]")); | ||
2180 | if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || | ||
2181 | (esp->sreg2 & ESP_STAT2_F1BYTE)) | ||
2182 | hme_fifo_read(esp); | ||
2183 | } | ||
2184 | if (!(esp->ireg & ESP_INTR_SR)) | ||
2185 | return 0; | ||
2186 | else | ||
2187 | return do_reset_complete; | ||
2188 | } | ||
2189 | /* Ho hum, target is taking forever... */ | ||
2190 | scp->SCp.sent_command = new_phase; /* so we don't recurse... */ | ||
2191 | return do_intr_end; | ||
2192 | } | ||
2193 | |||
2194 | /* Now some dma helpers. */ | ||
2195 | static void dma_setup(struct esp *esp, __u32 addr, int count, int write) | ||
2196 | { | ||
2197 | u32 nreg = sbus_readl(esp->dregs + DMA_CSR); | ||
2198 | |||
2199 | if (write) | ||
2200 | nreg |= DMA_ST_WRITE; | ||
2201 | else | ||
2202 | nreg &= ~(DMA_ST_WRITE); | ||
2203 | nreg |= DMA_ENABLE; | ||
2204 | sbus_writel(nreg, esp->dregs + DMA_CSR); | ||
2205 | if (esp->dma->revision == dvmaesc1) { | ||
2206 | /* This ESC gate array sucks! */ | ||
2207 | __u32 src = addr; | ||
2208 | __u32 dest = src + count; | ||
2209 | |||
2210 | if (dest & (PAGE_SIZE - 1)) | ||
2211 | count = PAGE_ALIGN(count); | ||
2212 | sbus_writel(count, esp->dregs + DMA_COUNT); | ||
2213 | } | ||
2214 | sbus_writel(addr, esp->dregs + DMA_ADDR); | ||
2215 | } | ||
2216 | |||
2217 | static void dma_drain(struct esp *esp) | ||
2218 | { | ||
2219 | u32 tmp; | ||
2220 | |||
2221 | if (esp->dma->revision == dvmahme) | ||
2222 | return; | ||
2223 | if ((tmp = sbus_readl(esp->dregs + DMA_CSR)) & DMA_FIFO_ISDRAIN) { | ||
2224 | switch (esp->dma->revision) { | ||
2225 | default: | ||
2226 | tmp |= DMA_FIFO_STDRAIN; | ||
2227 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
2228 | |||
2229 | case dvmarev3: | ||
2230 | case dvmaesc1: | ||
2231 | while (sbus_readl(esp->dregs + DMA_CSR) & DMA_FIFO_ISDRAIN) | ||
2232 | udelay(1); | ||
2233 | }; | ||
2234 | } | ||
2235 | } | ||
2236 | |||
2237 | static void dma_invalidate(struct esp *esp) | ||
2238 | { | ||
2239 | u32 tmp; | ||
2240 | |||
2241 | if (esp->dma->revision == dvmahme) { | ||
2242 | sbus_writel(DMA_RST_SCSI, esp->dregs + DMA_CSR); | ||
2243 | |||
2244 | esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr | | ||
2245 | (DMA_PARITY_OFF | DMA_2CLKS | | ||
2246 | DMA_SCSI_DISAB | DMA_INT_ENAB)) & | ||
2247 | ~(DMA_ST_WRITE | DMA_ENABLE)); | ||
2248 | |||
2249 | sbus_writel(0, esp->dregs + DMA_CSR); | ||
2250 | sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR); | ||
2251 | |||
2252 | /* This is necessary to avoid having the SCSI channel | ||
2253 | * engine lock up on us. | ||
2254 | */ | ||
2255 | sbus_writel(0, esp->dregs + DMA_ADDR); | ||
2256 | } else { | ||
2257 | while ((tmp = sbus_readl(esp->dregs + DMA_CSR)) & DMA_PEND_READ) | ||
2258 | udelay(1); | ||
2259 | |||
2260 | tmp &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB); | ||
2261 | tmp |= DMA_FIFO_INV; | ||
2262 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
2263 | tmp &= ~DMA_FIFO_INV; | ||
2264 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
2265 | } | ||
2266 | } | ||
2267 | |||
2268 | static inline void dma_flashclear(struct esp *esp) | ||
2269 | { | ||
2270 | dma_drain(esp); | ||
2271 | dma_invalidate(esp); | ||
2272 | } | ||
2273 | |||
2274 | static int dma_can_transfer(struct esp *esp, struct scsi_cmnd *sp) | ||
2275 | { | ||
2276 | __u32 base, end, sz; | ||
2277 | |||
2278 | if (esp->dma->revision == dvmarev3) { | ||
2279 | sz = sp->SCp.this_residual; | ||
2280 | if (sz > 0x1000000) | ||
2281 | sz = 0x1000000; | ||
2282 | } else { | ||
2283 | base = ((__u32)((unsigned long)sp->SCp.ptr)); | ||
2284 | base &= (0x1000000 - 1); | ||
2285 | end = (base + sp->SCp.this_residual); | ||
2286 | if (end > 0x1000000) | ||
2287 | end = 0x1000000; | ||
2288 | sz = (end - base); | ||
2289 | } | ||
2290 | return sz; | ||
2291 | } | ||
2292 | |||
2293 | /* Misc. esp helper macros. */ | ||
2294 | #define esp_setcount(__eregs, __cnt, __hme) \ | ||
2295 | sbus_writeb(((__cnt)&0xff), (__eregs) + ESP_TCLOW); \ | ||
2296 | sbus_writeb((((__cnt)>>8)&0xff), (__eregs) + ESP_TCMED); \ | ||
2297 | if (__hme) { \ | ||
2298 | sbus_writeb((((__cnt)>>16)&0xff), (__eregs) + FAS_RLO); \ | ||
2299 | sbus_writeb(0, (__eregs) + FAS_RHI); \ | ||
2300 | } | ||
2301 | |||
2302 | #define esp_getcount(__eregs, __hme) \ | ||
2303 | ((sbus_readb((__eregs) + ESP_TCLOW)&0xff) | \ | ||
2304 | ((sbus_readb((__eregs) + ESP_TCMED)&0xff) << 8) | \ | ||
2305 | ((__hme) ? sbus_readb((__eregs) + FAS_RLO) << 16 : 0)) | ||
2306 | |||
2307 | #define fcount(__esp) \ | ||
2308 | (((__esp)->erev == fashme) ? \ | ||
2309 | (__esp)->hme_fifo_workaround_count : \ | ||
2310 | sbus_readb(((__esp)->eregs) + ESP_FFLAGS) & ESP_FF_FBYTES) | ||
2311 | |||
2312 | #define fnzero(__esp) \ | ||
2313 | (((__esp)->erev == fashme) ? 0 : \ | ||
2314 | sbus_readb(((__esp)->eregs) + ESP_FFLAGS) & ESP_FF_ONOTZERO) | ||
2315 | |||
2316 | /* XXX speculative nops unnecessary when continuing amidst a data phase | ||
2317 | * XXX even on esp100!!! another case of flooding the bus with I/O reg | ||
2318 | * XXX writes... | ||
2319 | */ | ||
2320 | #define esp_maybe_nop(__esp) \ | ||
2321 | if ((__esp)->erev == esp100) \ | ||
2322 | esp_cmd((__esp), ESP_CMD_NULL) | ||
2323 | |||
2324 | #define sreg_to_dataphase(__sreg) \ | ||
2325 | ((((__sreg) & ESP_STAT_PMASK) == ESP_DOP) ? in_dataout : in_datain) | ||
2326 | |||
2327 | /* The ESP100 when in synchronous data phase, can mistake a long final | ||
2328 | * REQ pulse from the target as an extra byte, it places whatever is on | ||
2329 | * the data lines into the fifo. For now, we will assume when this | ||
2330 | * happens that the target is a bit quirky and we don't want to | ||
2331 | * be talking synchronously to it anyways. Regardless, we need to | ||
2332 | * tell the ESP to eat the extraneous byte so that we can proceed | ||
2333 | * to the next phase. | ||
2334 | */ | ||
2335 | static int esp100_sync_hwbug(struct esp *esp, struct scsi_cmnd *sp, int fifocnt) | ||
2336 | { | ||
2337 | /* Do not touch this piece of code. */ | ||
2338 | if ((!(esp->erev == esp100)) || | ||
2339 | (!(sreg_datainp((esp->sreg = sbus_readb(esp->eregs + ESP_STATUS))) && | ||
2340 | !fifocnt) && | ||
2341 | !(sreg_dataoutp(esp->sreg) && !fnzero(esp)))) { | ||
2342 | if (sp->SCp.phase == in_dataout) | ||
2343 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2344 | return 0; | ||
2345 | } else { | ||
2346 | /* Async mode for this guy. */ | ||
2347 | build_sync_nego_msg(esp, 0, 0); | ||
2348 | |||
2349 | /* Ack the bogus byte, but set ATN first. */ | ||
2350 | esp_cmd(esp, ESP_CMD_SATN); | ||
2351 | esp_cmd(esp, ESP_CMD_MOK); | ||
2352 | return 1; | ||
2353 | } | ||
2354 | } | ||
2355 | |||
2356 | /* This closes the window during a selection with a reselect pending, because | ||
2357 | * we use DMA for the selection process the FIFO should hold the correct | ||
2358 | * contents if we get reselected during this process. So we just need to | ||
2359 | * ack the possible illegal cmd interrupt pending on the esp100. | ||
2360 | */ | ||
2361 | static inline int esp100_reconnect_hwbug(struct esp *esp) | ||
2362 | { | ||
2363 | u8 tmp; | ||
2364 | |||
2365 | if (esp->erev != esp100) | ||
2366 | return 0; | ||
2367 | tmp = sbus_readb(esp->eregs + ESP_INTRPT); | ||
2368 | if (tmp & ESP_INTR_SR) | ||
2369 | return 1; | ||
2370 | return 0; | ||
2371 | } | ||
2372 | |||
2373 | /* This verifies the BUSID bits during a reselection so that we know which | ||
2374 | * target is talking to us. | ||
2375 | */ | ||
2376 | static inline int reconnect_target(struct esp *esp) | ||
2377 | { | ||
2378 | int it, me = esp->scsi_id_mask, targ = 0; | ||
2379 | |||
2380 | if (2 != fcount(esp)) | ||
2381 | return -1; | ||
2382 | if (esp->erev == fashme) { | ||
2383 | /* HME does not latch it's own BUS ID bits during | ||
2384 | * a reselection. Also the target number is given | ||
2385 | * as an unsigned char, not as a sole bit number | ||
2386 | * like the other ESP's do. | ||
2387 | * Happy Meal indeed.... | ||
2388 | */ | ||
2389 | targ = esp->hme_fifo_workaround_buffer[0]; | ||
2390 | } else { | ||
2391 | it = sbus_readb(esp->eregs + ESP_FDATA); | ||
2392 | if (!(it & me)) | ||
2393 | return -1; | ||
2394 | it &= ~me; | ||
2395 | if (it & (it - 1)) | ||
2396 | return -1; | ||
2397 | while (!(it & 1)) | ||
2398 | targ++, it >>= 1; | ||
2399 | } | ||
2400 | return targ; | ||
2401 | } | ||
2402 | |||
2403 | /* This verifies the identify from the target so that we know which lun is | ||
2404 | * being reconnected. | ||
2405 | */ | ||
2406 | static inline int reconnect_lun(struct esp *esp) | ||
2407 | { | ||
2408 | int lun; | ||
2409 | |||
2410 | if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) | ||
2411 | return -1; | ||
2412 | if (esp->erev == fashme) | ||
2413 | lun = esp->hme_fifo_workaround_buffer[1]; | ||
2414 | else | ||
2415 | lun = sbus_readb(esp->eregs + ESP_FDATA); | ||
2416 | |||
2417 | /* Yes, you read this correctly. We report lun of zero | ||
2418 | * if we see parity error. ESP reports parity error for | ||
2419 | * the lun byte, and this is the only way to hope to recover | ||
2420 | * because the target is connected. | ||
2421 | */ | ||
2422 | if (esp->sreg & ESP_STAT_PERR) | ||
2423 | return 0; | ||
2424 | |||
2425 | /* Check for illegal bits being set in the lun. */ | ||
2426 | if ((lun & 0x40) || !(lun & 0x80)) | ||
2427 | return -1; | ||
2428 | |||
2429 | return lun & 7; | ||
2430 | } | ||
2431 | |||
2432 | /* This puts the driver in a state where it can revitalize a command that | ||
2433 | * is being continued due to reselection. | ||
2434 | */ | ||
2435 | static inline void esp_connect(struct esp *esp, struct scsi_cmnd *sp) | ||
2436 | { | ||
2437 | struct esp_device *esp_dev = sp->device->hostdata; | ||
2438 | |||
2439 | if (esp->prev_soff != esp_dev->sync_max_offset || | ||
2440 | esp->prev_stp != esp_dev->sync_min_period || | ||
2441 | (esp->erev > esp100a && | ||
2442 | esp->prev_cfg3 != esp->config3[sp->device->id])) { | ||
2443 | esp->prev_soff = esp_dev->sync_max_offset; | ||
2444 | esp->prev_stp = esp_dev->sync_min_period; | ||
2445 | sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF); | ||
2446 | sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP); | ||
2447 | if (esp->erev > esp100a) { | ||
2448 | esp->prev_cfg3 = esp->config3[sp->device->id]; | ||
2449 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
2450 | } | ||
2451 | } | ||
2452 | esp->current_SC = sp; | ||
2453 | } | ||
2454 | |||
2455 | /* This will place the current working command back into the issue queue | ||
2456 | * if we are to receive a reselection amidst a selection attempt. | ||
2457 | */ | ||
2458 | static inline void esp_reconnect(struct esp *esp, struct scsi_cmnd *sp) | ||
2459 | { | ||
2460 | if (!esp->disconnected_SC) | ||
2461 | ESPLOG(("esp%d: Weird, being reselected but disconnected " | ||
2462 | "command queue is empty.\n", esp->esp_id)); | ||
2463 | esp->snip = 0; | ||
2464 | esp->current_SC = NULL; | ||
2465 | sp->SCp.phase = not_issued; | ||
2466 | append_SC(&esp->issue_SC, sp); | ||
2467 | } | ||
2468 | |||
2469 | /* Begin message in phase. */ | ||
2470 | static int esp_do_msgin(struct esp *esp) | ||
2471 | { | ||
2472 | /* Must be very careful with the fifo on the HME */ | ||
2473 | if ((esp->erev != fashme) || | ||
2474 | !(sbus_readb(esp->eregs + ESP_STATUS2) & ESP_STAT2_FEMPTY)) | ||
2475 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2476 | esp_maybe_nop(esp); | ||
2477 | esp_cmd(esp, ESP_CMD_TI); | ||
2478 | esp->msgin_len = 1; | ||
2479 | esp->msgin_ctr = 0; | ||
2480 | esp_advance_phase(esp->current_SC, in_msgindone); | ||
2481 | return do_work_bus; | ||
2482 | } | ||
2483 | |||
2484 | /* This uses various DMA csr fields and the fifo flags count value to | ||
2485 | * determine how many bytes were successfully sent/received by the ESP. | ||
2486 | */ | ||
2487 | static inline int esp_bytes_sent(struct esp *esp, int fifo_count) | ||
2488 | { | ||
2489 | int rval = sbus_readl(esp->dregs + DMA_ADDR) - esp->esp_command_dvma; | ||
2490 | |||
2491 | if (esp->dma->revision == dvmarev1) | ||
2492 | rval -= (4 - ((sbus_readl(esp->dregs + DMA_CSR) & DMA_READ_AHEAD)>>11)); | ||
2493 | return rval - fifo_count; | ||
2494 | } | ||
2495 | |||
2496 | static inline void advance_sg(struct scsi_cmnd *sp) | ||
2497 | { | ||
2498 | ++sp->SCp.buffer; | ||
2499 | --sp->SCp.buffers_residual; | ||
2500 | sp->SCp.this_residual = sg_dma_len(sp->SCp.buffer); | ||
2501 | sp->SCp.ptr = (char *)((unsigned long)sg_dma_address(sp->SCp.buffer)); | ||
2502 | } | ||
2503 | |||
2504 | /* Please note that the way I've coded these routines is that I _always_ | ||
2505 | * check for a disconnect during any and all information transfer | ||
2506 | * phases. The SCSI standard states that the target _can_ cause a BUS | ||
2507 | * FREE condition by dropping all MSG/CD/IO/BSY signals. Also note | ||
2508 | * that during information transfer phases the target controls every | ||
2509 | * change in phase, the only thing the initiator can do is "ask" for | ||
2510 | * a message out phase by driving ATN true. The target can, and sometimes | ||
2511 | * will, completely ignore this request so we cannot assume anything when | ||
2512 | * we try to force a message out phase to abort/reset a target. Most of | ||
2513 | * the time the target will eventually be nice and go to message out, so | ||
2514 | * we may have to hold on to our state about what we want to tell the target | ||
2515 | * for some period of time. | ||
2516 | */ | ||
2517 | |||
2518 | /* I think I have things working here correctly. Even partial transfers | ||
2519 | * within a buffer or sub-buffer should not upset us at all no matter | ||
2520 | * how bad the target and/or ESP fucks things up. | ||
2521 | */ | ||
2522 | static int esp_do_data(struct esp *esp) | ||
2523 | { | ||
2524 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
2525 | int thisphase, hmuch; | ||
2526 | |||
2527 | ESPDATA(("esp_do_data: ")); | ||
2528 | esp_maybe_nop(esp); | ||
2529 | thisphase = sreg_to_dataphase(esp->sreg); | ||
2530 | esp_advance_phase(SCptr, thisphase); | ||
2531 | ESPDATA(("newphase<%s> ", (thisphase == in_datain) ? "DATAIN" : "DATAOUT")); | ||
2532 | hmuch = dma_can_transfer(esp, SCptr); | ||
2533 | if (hmuch > (64 * 1024) && (esp->erev != fashme)) | ||
2534 | hmuch = (64 * 1024); | ||
2535 | ESPDATA(("hmuch<%d> ", hmuch)); | ||
2536 | esp->current_transfer_size = hmuch; | ||
2537 | |||
2538 | if (esp->erev == fashme) { | ||
2539 | u32 tmp = esp->prev_hme_dmacsr; | ||
2540 | |||
2541 | /* Always set the ESP count registers first. */ | ||
2542 | esp_setcount(esp->eregs, hmuch, 1); | ||
2543 | |||
2544 | /* Get the DMA csr computed. */ | ||
2545 | tmp |= (DMA_SCSI_DISAB | DMA_ENABLE); | ||
2546 | if (thisphase == in_datain) | ||
2547 | tmp |= DMA_ST_WRITE; | ||
2548 | else | ||
2549 | tmp &= ~(DMA_ST_WRITE); | ||
2550 | esp->prev_hme_dmacsr = tmp; | ||
2551 | |||
2552 | ESPDATA(("DMA|TI --> do_intr_end\n")); | ||
2553 | if (thisphase == in_datain) { | ||
2554 | sbus_writel(hmuch, esp->dregs + DMA_COUNT); | ||
2555 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
2556 | } else { | ||
2557 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
2558 | sbus_writel(hmuch, esp->dregs + DMA_COUNT); | ||
2559 | } | ||
2560 | sbus_writel((__u32)((unsigned long)SCptr->SCp.ptr), esp->dregs+DMA_ADDR); | ||
2561 | sbus_writel(esp->prev_hme_dmacsr, esp->dregs + DMA_CSR); | ||
2562 | } else { | ||
2563 | esp_setcount(esp->eregs, hmuch, 0); | ||
2564 | dma_setup(esp, ((__u32)((unsigned long)SCptr->SCp.ptr)), | ||
2565 | hmuch, (thisphase == in_datain)); | ||
2566 | ESPDATA(("DMA|TI --> do_intr_end\n")); | ||
2567 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
2568 | } | ||
2569 | return do_intr_end; | ||
2570 | } | ||
2571 | |||
2572 | /* See how successful the data transfer was. */ | ||
2573 | static int esp_do_data_finale(struct esp *esp) | ||
2574 | { | ||
2575 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
2576 | struct esp_device *esp_dev = SCptr->device->hostdata; | ||
2577 | int bogus_data = 0, bytes_sent = 0, fifocnt, ecount = 0; | ||
2578 | |||
2579 | ESPDATA(("esp_do_data_finale: ")); | ||
2580 | |||
2581 | if (SCptr->SCp.phase == in_datain) { | ||
2582 | if (esp->sreg & ESP_STAT_PERR) { | ||
2583 | /* Yuck, parity error. The ESP asserts ATN | ||
2584 | * so that we can go to message out phase | ||
2585 | * immediately and inform the target that | ||
2586 | * something bad happened. | ||
2587 | */ | ||
2588 | ESPLOG(("esp%d: data bad parity detected.\n", | ||
2589 | esp->esp_id)); | ||
2590 | esp->cur_msgout[0] = INITIATOR_ERROR; | ||
2591 | esp->msgout_len = 1; | ||
2592 | } | ||
2593 | dma_drain(esp); | ||
2594 | } | ||
2595 | dma_invalidate(esp); | ||
2596 | |||
2597 | /* This could happen for the above parity error case. */ | ||
2598 | if (esp->ireg != ESP_INTR_BSERV) { | ||
2599 | /* Please go to msgout phase, please please please... */ | ||
2600 | ESPLOG(("esp%d: !BSERV after data, probably to msgout\n", | ||
2601 | esp->esp_id)); | ||
2602 | return esp_do_phase_determine(esp); | ||
2603 | } | ||
2604 | |||
2605 | /* Check for partial transfers and other horrible events. | ||
2606 | * Note, here we read the real fifo flags register even | ||
2607 | * on HME broken adapters because we skip the HME fifo | ||
2608 | * workaround code in esp_handle() if we are doing data | ||
2609 | * phase things. We don't want to fuck directly with | ||
2610 | * the fifo like that, especially if doing synchronous | ||
2611 | * transfers! Also, will need to double the count on | ||
2612 | * HME if we are doing wide transfers, as the HME fifo | ||
2613 | * will move and count 16-bit quantities during wide data. | ||
2614 | * SMCC _and_ Qlogic can both bite me. | ||
2615 | */ | ||
2616 | fifocnt = (sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES); | ||
2617 | if (esp->erev != fashme) | ||
2618 | ecount = esp_getcount(esp->eregs, 0); | ||
2619 | bytes_sent = esp->current_transfer_size; | ||
2620 | |||
2621 | ESPDATA(("trans_sz(%d), ", bytes_sent)); | ||
2622 | if (esp->erev == fashme) { | ||
2623 | if (!(esp->sreg & ESP_STAT_TCNT)) { | ||
2624 | ecount = esp_getcount(esp->eregs, 1); | ||
2625 | bytes_sent -= ecount; | ||
2626 | } | ||
2627 | |||
2628 | /* Always subtract any cruft remaining in the FIFO. */ | ||
2629 | if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE) | ||
2630 | fifocnt <<= 1; | ||
2631 | if (SCptr->SCp.phase == in_dataout) | ||
2632 | bytes_sent -= fifocnt; | ||
2633 | |||
2634 | /* I have an IBM disk which exhibits the following | ||
2635 | * behavior during writes to it. It disconnects in | ||
2636 | * the middle of a partial transfer, the current sglist | ||
2637 | * buffer is 1024 bytes, the disk stops data transfer | ||
2638 | * at 512 bytes. | ||
2639 | * | ||
2640 | * However the FAS366 reports that 32 more bytes were | ||
2641 | * transferred than really were. This is precisely | ||
2642 | * the size of a fully loaded FIFO in wide scsi mode. | ||
2643 | * The FIFO state recorded indicates that it is empty. | ||
2644 | * | ||
2645 | * I have no idea if this is a bug in the FAS366 chip | ||
2646 | * or a bug in the firmware on this IBM disk. In any | ||
2647 | * event the following seems to be a good workaround. -DaveM | ||
2648 | */ | ||
2649 | if (bytes_sent != esp->current_transfer_size && | ||
2650 | SCptr->SCp.phase == in_dataout) { | ||
2651 | int mask = (64 - 1); | ||
2652 | |||
2653 | if ((esp->prev_cfg3 & ESP_CONFIG3_EWIDE) == 0) | ||
2654 | mask >>= 1; | ||
2655 | |||
2656 | if (bytes_sent & mask) | ||
2657 | bytes_sent -= (bytes_sent & mask); | ||
2658 | } | ||
2659 | } else { | ||
2660 | if (!(esp->sreg & ESP_STAT_TCNT)) | ||
2661 | bytes_sent -= ecount; | ||
2662 | if (SCptr->SCp.phase == in_dataout) | ||
2663 | bytes_sent -= fifocnt; | ||
2664 | } | ||
2665 | |||
2666 | ESPDATA(("bytes_sent(%d), ", bytes_sent)); | ||
2667 | |||
2668 | /* If we were in synchronous mode, check for peculiarities. */ | ||
2669 | if (esp->erev == fashme) { | ||
2670 | if (esp_dev->sync_max_offset) { | ||
2671 | if (SCptr->SCp.phase == in_dataout) | ||
2672 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2673 | } else { | ||
2674 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2675 | } | ||
2676 | } else { | ||
2677 | if (esp_dev->sync_max_offset) | ||
2678 | bogus_data = esp100_sync_hwbug(esp, SCptr, fifocnt); | ||
2679 | else | ||
2680 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2681 | } | ||
2682 | |||
2683 | /* Until we are sure of what has happened, we are certainly | ||
2684 | * in the dark. | ||
2685 | */ | ||
2686 | esp_advance_phase(SCptr, in_the_dark); | ||
2687 | |||
2688 | if (bytes_sent < 0) { | ||
2689 | /* I've seen this happen due to lost state in this | ||
2690 | * driver. No idea why it happened, but allowing | ||
2691 | * this value to be negative caused things to | ||
2692 | * lock up. This allows greater chance of recovery. | ||
2693 | * In fact every time I've seen this, it has been | ||
2694 | * a driver bug without question. | ||
2695 | */ | ||
2696 | ESPLOG(("esp%d: yieee, bytes_sent < 0!\n", esp->esp_id)); | ||
2697 | ESPLOG(("esp%d: csz=%d fifocount=%d ecount=%d\n", | ||
2698 | esp->esp_id, | ||
2699 | esp->current_transfer_size, fifocnt, ecount)); | ||
2700 | ESPLOG(("esp%d: use_sg=%d ptr=%p this_residual=%d\n", | ||
2701 | esp->esp_id, | ||
2702 | SCptr->use_sg, SCptr->SCp.ptr, SCptr->SCp.this_residual)); | ||
2703 | ESPLOG(("esp%d: Forcing async for target %d\n", esp->esp_id, | ||
2704 | SCptr->device->id)); | ||
2705 | SCptr->device->borken = 1; | ||
2706 | esp_dev->sync = 0; | ||
2707 | bytes_sent = 0; | ||
2708 | } | ||
2709 | |||
2710 | /* Update the state of our transfer. */ | ||
2711 | SCptr->SCp.ptr += bytes_sent; | ||
2712 | SCptr->SCp.this_residual -= bytes_sent; | ||
2713 | if (SCptr->SCp.this_residual < 0) { | ||
2714 | /* shit */ | ||
2715 | ESPLOG(("esp%d: Data transfer overrun.\n", esp->esp_id)); | ||
2716 | SCptr->SCp.this_residual = 0; | ||
2717 | } | ||
2718 | |||
2719 | /* Maybe continue. */ | ||
2720 | if (!bogus_data) { | ||
2721 | ESPDATA(("!bogus_data, ")); | ||
2722 | |||
2723 | /* NO MATTER WHAT, we advance the scatterlist, | ||
2724 | * if the target should decide to disconnect | ||
2725 | * in between scatter chunks (which is common) | ||
2726 | * we could die horribly! I used to have the sg | ||
2727 | * advance occur only if we are going back into | ||
2728 | * (or are staying in) a data phase, you can | ||
2729 | * imagine the hell I went through trying to | ||
2730 | * figure this out. | ||
2731 | */ | ||
2732 | if (SCptr->use_sg && !SCptr->SCp.this_residual) | ||
2733 | advance_sg(SCptr); | ||
2734 | if (sreg_datainp(esp->sreg) || sreg_dataoutp(esp->sreg)) { | ||
2735 | ESPDATA(("to more data\n")); | ||
2736 | return esp_do_data(esp); | ||
2737 | } | ||
2738 | ESPDATA(("to new phase\n")); | ||
2739 | return esp_do_phase_determine(esp); | ||
2740 | } | ||
2741 | /* Bogus data, just wait for next interrupt. */ | ||
2742 | ESPLOG(("esp%d: bogus_data during end of data phase\n", | ||
2743 | esp->esp_id)); | ||
2744 | return do_intr_end; | ||
2745 | } | ||
2746 | |||
2747 | /* We received a non-good status return at the end of | ||
2748 | * running a SCSI command. This is used to decide if | ||
2749 | * we should clear our synchronous transfer state for | ||
2750 | * such a device when that happens. | ||
2751 | * | ||
2752 | * The idea is that when spinning up a disk or rewinding | ||
2753 | * a tape, we don't want to go into a loop re-negotiating | ||
2754 | * synchronous capabilities over and over. | ||
2755 | */ | ||
2756 | static int esp_should_clear_sync(struct scsi_cmnd *sp) | ||
2757 | { | ||
2758 | u8 cmd = sp->cmnd[0]; | ||
2759 | |||
2760 | /* These cases are for spinning up a disk and | ||
2761 | * waiting for that spinup to complete. | ||
2762 | */ | ||
2763 | if (cmd == START_STOP) | ||
2764 | return 0; | ||
2765 | |||
2766 | if (cmd == TEST_UNIT_READY) | ||
2767 | return 0; | ||
2768 | |||
2769 | /* One more special case for SCSI tape drives, | ||
2770 | * this is what is used to probe the device for | ||
2771 | * completion of a rewind or tape load operation. | ||
2772 | */ | ||
2773 | if (sp->device->type == TYPE_TAPE) { | ||
2774 | if (cmd == MODE_SENSE) | ||
2775 | return 0; | ||
2776 | } | ||
2777 | |||
2778 | return 1; | ||
2779 | } | ||
2780 | |||
2781 | /* Either a command is completing or a target is dropping off the bus | ||
2782 | * to continue the command in the background so we can do other work. | ||
2783 | */ | ||
2784 | static int esp_do_freebus(struct esp *esp) | ||
2785 | { | ||
2786 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
2787 | struct esp_device *esp_dev = SCptr->device->hostdata; | ||
2788 | int rval; | ||
2789 | |||
2790 | rval = skipahead2(esp, SCptr, in_status, in_msgindone, in_freeing); | ||
2791 | if (rval) | ||
2792 | return rval; | ||
2793 | if (esp->ireg != ESP_INTR_DC) { | ||
2794 | ESPLOG(("esp%d: Target will not disconnect\n", esp->esp_id)); | ||
2795 | return do_reset_bus; /* target will not drop BSY... */ | ||
2796 | } | ||
2797 | esp->msgout_len = 0; | ||
2798 | esp->prevmsgout = NOP; | ||
2799 | if (esp->prevmsgin == COMMAND_COMPLETE) { | ||
2800 | /* Normal end of nexus. */ | ||
2801 | if (esp->disconnected_SC || (esp->erev == fashme)) | ||
2802 | esp_cmd(esp, ESP_CMD_ESEL); | ||
2803 | |||
2804 | if (SCptr->SCp.Status != GOOD && | ||
2805 | SCptr->SCp.Status != CONDITION_GOOD && | ||
2806 | ((1<<SCptr->device->id) & esp->targets_present) && | ||
2807 | esp_dev->sync && | ||
2808 | esp_dev->sync_max_offset) { | ||
2809 | /* SCSI standard says that the synchronous capabilities | ||
2810 | * should be renegotiated at this point. Most likely | ||
2811 | * we are about to request sense from this target | ||
2812 | * in which case we want to avoid using sync | ||
2813 | * transfers until we are sure of the current target | ||
2814 | * state. | ||
2815 | */ | ||
2816 | ESPMISC(("esp: Status <%d> for target %d lun %d\n", | ||
2817 | SCptr->SCp.Status, SCptr->device->id, SCptr->device->lun)); | ||
2818 | |||
2819 | /* But don't do this when spinning up a disk at | ||
2820 | * boot time while we poll for completion as it | ||
2821 | * fills up the console with messages. Also, tapes | ||
2822 | * can report not ready many times right after | ||
2823 | * loading up a tape. | ||
2824 | */ | ||
2825 | if (esp_should_clear_sync(SCptr) != 0) | ||
2826 | esp_dev->sync = 0; | ||
2827 | } | ||
2828 | ESPDISC(("F<%02x,%02x>", SCptr->device->id, SCptr->device->lun)); | ||
2829 | esp_done(esp, ((SCptr->SCp.Status & 0xff) | | ||
2830 | ((SCptr->SCp.Message & 0xff)<<8) | | ||
2831 | (DID_OK << 16))); | ||
2832 | } else if (esp->prevmsgin == DISCONNECT) { | ||
2833 | /* Normal disconnect. */ | ||
2834 | esp_cmd(esp, ESP_CMD_ESEL); | ||
2835 | ESPDISC(("D<%02x,%02x>", SCptr->device->id, SCptr->device->lun)); | ||
2836 | append_SC(&esp->disconnected_SC, SCptr); | ||
2837 | esp->current_SC = NULL; | ||
2838 | if (esp->issue_SC) | ||
2839 | esp_exec_cmd(esp); | ||
2840 | } else { | ||
2841 | /* Driver bug, we do not expect a disconnect here | ||
2842 | * and should not have advanced the state engine | ||
2843 | * to in_freeing. | ||
2844 | */ | ||
2845 | ESPLOG(("esp%d: last msg not disc and not cmd cmplt.\n", | ||
2846 | esp->esp_id)); | ||
2847 | return do_reset_bus; | ||
2848 | } | ||
2849 | return do_intr_end; | ||
2850 | } | ||
2851 | |||
2852 | /* When a reselect occurs, and we cannot find the command to | ||
2853 | * reconnect to in our queues, we do this. | ||
2854 | */ | ||
2855 | static int esp_bad_reconnect(struct esp *esp) | ||
2856 | { | ||
2857 | struct scsi_cmnd *sp; | ||
2858 | |||
2859 | ESPLOG(("esp%d: Eieeee, reconnecting unknown command!\n", | ||
2860 | esp->esp_id)); | ||
2861 | ESPLOG(("QUEUE DUMP\n")); | ||
2862 | sp = esp->issue_SC; | ||
2863 | ESPLOG(("esp%d: issue_SC[", esp->esp_id)); | ||
2864 | while (sp) { | ||
2865 | ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun)); | ||
2866 | sp = (struct scsi_cmnd *) sp->host_scribble; | ||
2867 | } | ||
2868 | ESPLOG(("]\n")); | ||
2869 | sp = esp->current_SC; | ||
2870 | ESPLOG(("esp%d: current_SC[", esp->esp_id)); | ||
2871 | if (sp) | ||
2872 | ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun)); | ||
2873 | else | ||
2874 | ESPLOG(("<NULL>")); | ||
2875 | ESPLOG(("]\n")); | ||
2876 | sp = esp->disconnected_SC; | ||
2877 | ESPLOG(("esp%d: disconnected_SC[", esp->esp_id)); | ||
2878 | while (sp) { | ||
2879 | ESPLOG(("<%02x,%02x>", sp->device->id, sp->device->lun)); | ||
2880 | sp = (struct scsi_cmnd *) sp->host_scribble; | ||
2881 | } | ||
2882 | ESPLOG(("]\n")); | ||
2883 | return do_reset_bus; | ||
2884 | } | ||
2885 | |||
2886 | /* Do the needy when a target tries to reconnect to us. */ | ||
2887 | static int esp_do_reconnect(struct esp *esp) | ||
2888 | { | ||
2889 | int lun, target; | ||
2890 | struct scsi_cmnd *SCptr; | ||
2891 | |||
2892 | /* Check for all bogus conditions first. */ | ||
2893 | target = reconnect_target(esp); | ||
2894 | if (target < 0) { | ||
2895 | ESPDISC(("bad bus bits\n")); | ||
2896 | return do_reset_bus; | ||
2897 | } | ||
2898 | lun = reconnect_lun(esp); | ||
2899 | if (lun < 0) { | ||
2900 | ESPDISC(("target=%2x, bad identify msg\n", target)); | ||
2901 | return do_reset_bus; | ||
2902 | } | ||
2903 | |||
2904 | /* Things look ok... */ | ||
2905 | ESPDISC(("R<%02x,%02x>", target, lun)); | ||
2906 | |||
2907 | /* Must not flush FIFO or DVMA on HME. */ | ||
2908 | if (esp->erev != fashme) { | ||
2909 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
2910 | if (esp100_reconnect_hwbug(esp)) | ||
2911 | return do_reset_bus; | ||
2912 | esp_cmd(esp, ESP_CMD_NULL); | ||
2913 | } | ||
2914 | |||
2915 | SCptr = remove_SC(&esp->disconnected_SC, (u8) target, (u8) lun); | ||
2916 | if (!SCptr) | ||
2917 | return esp_bad_reconnect(esp); | ||
2918 | |||
2919 | esp_connect(esp, SCptr); | ||
2920 | esp_cmd(esp, ESP_CMD_MOK); | ||
2921 | |||
2922 | if (esp->erev == fashme) | ||
2923 | sbus_writeb(((SCptr->device->id & 0xf) | | ||
2924 | (ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT)), | ||
2925 | esp->eregs + ESP_BUSID); | ||
2926 | |||
2927 | /* Reconnect implies a restore pointers operation. */ | ||
2928 | esp_restore_pointers(esp, SCptr); | ||
2929 | |||
2930 | esp->snip = 0; | ||
2931 | esp_advance_phase(SCptr, in_the_dark); | ||
2932 | return do_intr_end; | ||
2933 | } | ||
2934 | |||
2935 | /* End of NEXUS (hopefully), pick up status + message byte then leave if | ||
2936 | * all goes well. | ||
2937 | */ | ||
2938 | static int esp_do_status(struct esp *esp) | ||
2939 | { | ||
2940 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
2941 | int intr, rval; | ||
2942 | |||
2943 | rval = skipahead1(esp, SCptr, in_the_dark, in_status); | ||
2944 | if (rval) | ||
2945 | return rval; | ||
2946 | intr = esp->ireg; | ||
2947 | ESPSTAT(("esp_do_status: ")); | ||
2948 | if (intr != ESP_INTR_DC) { | ||
2949 | int message_out = 0; /* for parity problems */ | ||
2950 | |||
2951 | /* Ack the message. */ | ||
2952 | ESPSTAT(("ack msg, ")); | ||
2953 | esp_cmd(esp, ESP_CMD_MOK); | ||
2954 | |||
2955 | if (esp->erev != fashme) { | ||
2956 | dma_flashclear(esp); | ||
2957 | |||
2958 | /* Wait till the first bits settle. */ | ||
2959 | while (esp->esp_command[0] == 0xff) | ||
2960 | udelay(1); | ||
2961 | } else { | ||
2962 | esp->esp_command[0] = esp->hme_fifo_workaround_buffer[0]; | ||
2963 | esp->esp_command[1] = esp->hme_fifo_workaround_buffer[1]; | ||
2964 | } | ||
2965 | |||
2966 | ESPSTAT(("got something, ")); | ||
2967 | /* ESP chimes in with one of | ||
2968 | * | ||
2969 | * 1) function done interrupt: | ||
2970 | * both status and message in bytes | ||
2971 | * are available | ||
2972 | * | ||
2973 | * 2) bus service interrupt: | ||
2974 | * only status byte was acquired | ||
2975 | * | ||
2976 | * 3) Anything else: | ||
2977 | * can't happen, but we test for it | ||
2978 | * anyways | ||
2979 | * | ||
2980 | * ALSO: If bad parity was detected on either | ||
2981 | * the status _or_ the message byte then | ||
2982 | * the ESP has asserted ATN on the bus | ||
2983 | * and we must therefore wait for the | ||
2984 | * next phase change. | ||
2985 | */ | ||
2986 | if (intr & ESP_INTR_FDONE) { | ||
2987 | /* We got it all, hallejulia. */ | ||
2988 | ESPSTAT(("got both, ")); | ||
2989 | SCptr->SCp.Status = esp->esp_command[0]; | ||
2990 | SCptr->SCp.Message = esp->esp_command[1]; | ||
2991 | esp->prevmsgin = SCptr->SCp.Message; | ||
2992 | esp->cur_msgin[0] = SCptr->SCp.Message; | ||
2993 | if (esp->sreg & ESP_STAT_PERR) { | ||
2994 | /* There was bad parity for the | ||
2995 | * message byte, the status byte | ||
2996 | * was ok. | ||
2997 | */ | ||
2998 | message_out = MSG_PARITY_ERROR; | ||
2999 | } | ||
3000 | } else if (intr == ESP_INTR_BSERV) { | ||
3001 | /* Only got status byte. */ | ||
3002 | ESPLOG(("esp%d: got status only, ", esp->esp_id)); | ||
3003 | if (!(esp->sreg & ESP_STAT_PERR)) { | ||
3004 | SCptr->SCp.Status = esp->esp_command[0]; | ||
3005 | SCptr->SCp.Message = 0xff; | ||
3006 | } else { | ||
3007 | /* The status byte had bad parity. | ||
3008 | * we leave the scsi_pointer Status | ||
3009 | * field alone as we set it to a default | ||
3010 | * of CHECK_CONDITION in esp_queue. | ||
3011 | */ | ||
3012 | message_out = INITIATOR_ERROR; | ||
3013 | } | ||
3014 | } else { | ||
3015 | /* This shouldn't happen ever. */ | ||
3016 | ESPSTAT(("got bolixed\n")); | ||
3017 | esp_advance_phase(SCptr, in_the_dark); | ||
3018 | return esp_do_phase_determine(esp); | ||
3019 | } | ||
3020 | |||
3021 | if (!message_out) { | ||
3022 | ESPSTAT(("status=%2x msg=%2x, ", SCptr->SCp.Status, | ||
3023 | SCptr->SCp.Message)); | ||
3024 | if (SCptr->SCp.Message == COMMAND_COMPLETE) { | ||
3025 | ESPSTAT(("and was COMMAND_COMPLETE\n")); | ||
3026 | esp_advance_phase(SCptr, in_freeing); | ||
3027 | return esp_do_freebus(esp); | ||
3028 | } else { | ||
3029 | ESPLOG(("esp%d: and _not_ COMMAND_COMPLETE\n", | ||
3030 | esp->esp_id)); | ||
3031 | esp->msgin_len = esp->msgin_ctr = 1; | ||
3032 | esp_advance_phase(SCptr, in_msgindone); | ||
3033 | return esp_do_msgindone(esp); | ||
3034 | } | ||
3035 | } else { | ||
3036 | /* With luck we'll be able to let the target | ||
3037 | * know that bad parity happened, it will know | ||
3038 | * which byte caused the problems and send it | ||
3039 | * again. For the case where the status byte | ||
3040 | * receives bad parity, I do not believe most | ||
3041 | * targets recover very well. We'll see. | ||
3042 | */ | ||
3043 | ESPLOG(("esp%d: bad parity somewhere mout=%2x\n", | ||
3044 | esp->esp_id, message_out)); | ||
3045 | esp->cur_msgout[0] = message_out; | ||
3046 | esp->msgout_len = esp->msgout_ctr = 1; | ||
3047 | esp_advance_phase(SCptr, in_the_dark); | ||
3048 | return esp_do_phase_determine(esp); | ||
3049 | } | ||
3050 | } else { | ||
3051 | /* If we disconnect now, all hell breaks loose. */ | ||
3052 | ESPLOG(("esp%d: whoops, disconnect\n", esp->esp_id)); | ||
3053 | esp_advance_phase(SCptr, in_the_dark); | ||
3054 | return esp_do_phase_determine(esp); | ||
3055 | } | ||
3056 | } | ||
3057 | |||
3058 | static int esp_enter_status(struct esp *esp) | ||
3059 | { | ||
3060 | u8 thecmd = ESP_CMD_ICCSEQ; | ||
3061 | |||
3062 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3063 | if (esp->erev != fashme) { | ||
3064 | u32 tmp; | ||
3065 | |||
3066 | esp->esp_command[0] = esp->esp_command[1] = 0xff; | ||
3067 | sbus_writeb(2, esp->eregs + ESP_TCLOW); | ||
3068 | sbus_writeb(0, esp->eregs + ESP_TCMED); | ||
3069 | tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
3070 | tmp |= (DMA_ST_WRITE | DMA_ENABLE); | ||
3071 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
3072 | if (esp->dma->revision == dvmaesc1) | ||
3073 | sbus_writel(0x100, esp->dregs + DMA_COUNT); | ||
3074 | sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR); | ||
3075 | thecmd |= ESP_CMD_DMA; | ||
3076 | } | ||
3077 | esp_cmd(esp, thecmd); | ||
3078 | esp_advance_phase(esp->current_SC, in_status); | ||
3079 | |||
3080 | return esp_do_status(esp); | ||
3081 | } | ||
3082 | |||
3083 | static int esp_disconnect_amidst_phases(struct esp *esp) | ||
3084 | { | ||
3085 | struct scsi_cmnd *sp = esp->current_SC; | ||
3086 | struct esp_device *esp_dev = sp->device->hostdata; | ||
3087 | |||
3088 | /* This means real problems if we see this | ||
3089 | * here. Unless we were actually trying | ||
3090 | * to force the device to abort/reset. | ||
3091 | */ | ||
3092 | ESPLOG(("esp%d Disconnect amidst phases, ", esp->esp_id)); | ||
3093 | ESPLOG(("pphase<%s> cphase<%s>, ", | ||
3094 | phase_string(sp->SCp.phase), | ||
3095 | phase_string(sp->SCp.sent_command))); | ||
3096 | |||
3097 | if (esp->disconnected_SC != NULL || (esp->erev == fashme)) | ||
3098 | esp_cmd(esp, ESP_CMD_ESEL); | ||
3099 | |||
3100 | switch (esp->cur_msgout[0]) { | ||
3101 | default: | ||
3102 | /* We didn't expect this to happen at all. */ | ||
3103 | ESPLOG(("device is bolixed\n")); | ||
3104 | esp_advance_phase(sp, in_tgterror); | ||
3105 | esp_done(esp, (DID_ERROR << 16)); | ||
3106 | break; | ||
3107 | |||
3108 | case BUS_DEVICE_RESET: | ||
3109 | ESPLOG(("device reset successful\n")); | ||
3110 | esp_dev->sync_max_offset = 0; | ||
3111 | esp_dev->sync_min_period = 0; | ||
3112 | esp_dev->sync = 0; | ||
3113 | esp_advance_phase(sp, in_resetdev); | ||
3114 | esp_done(esp, (DID_RESET << 16)); | ||
3115 | break; | ||
3116 | |||
3117 | case ABORT: | ||
3118 | ESPLOG(("device abort successful\n")); | ||
3119 | esp_advance_phase(sp, in_abortone); | ||
3120 | esp_done(esp, (DID_ABORT << 16)); | ||
3121 | break; | ||
3122 | |||
3123 | }; | ||
3124 | return do_intr_end; | ||
3125 | } | ||
3126 | |||
3127 | static int esp_enter_msgout(struct esp *esp) | ||
3128 | { | ||
3129 | esp_advance_phase(esp->current_SC, in_msgout); | ||
3130 | return esp_do_msgout(esp); | ||
3131 | } | ||
3132 | |||
3133 | static int esp_enter_msgin(struct esp *esp) | ||
3134 | { | ||
3135 | esp_advance_phase(esp->current_SC, in_msgin); | ||
3136 | return esp_do_msgin(esp); | ||
3137 | } | ||
3138 | |||
3139 | static int esp_enter_cmd(struct esp *esp) | ||
3140 | { | ||
3141 | esp_advance_phase(esp->current_SC, in_cmdbegin); | ||
3142 | return esp_do_cmdbegin(esp); | ||
3143 | } | ||
3144 | |||
3145 | static int esp_enter_badphase(struct esp *esp) | ||
3146 | { | ||
3147 | ESPLOG(("esp%d: Bizarre bus phase %2x.\n", esp->esp_id, | ||
3148 | esp->sreg & ESP_STAT_PMASK)); | ||
3149 | return do_reset_bus; | ||
3150 | } | ||
3151 | |||
3152 | typedef int (*espfunc_t)(struct esp *); | ||
3153 | |||
3154 | static espfunc_t phase_vector[] = { | ||
3155 | esp_do_data, /* ESP_DOP */ | ||
3156 | esp_do_data, /* ESP_DIP */ | ||
3157 | esp_enter_cmd, /* ESP_CMDP */ | ||
3158 | esp_enter_status, /* ESP_STATP */ | ||
3159 | esp_enter_badphase, /* ESP_STAT_PMSG */ | ||
3160 | esp_enter_badphase, /* ESP_STAT_PMSG | ESP_STAT_PIO */ | ||
3161 | esp_enter_msgout, /* ESP_MOP */ | ||
3162 | esp_enter_msgin, /* ESP_MIP */ | ||
3163 | }; | ||
3164 | |||
3165 | /* The target has control of the bus and we have to see where it has | ||
3166 | * taken us. | ||
3167 | */ | ||
3168 | static int esp_do_phase_determine(struct esp *esp) | ||
3169 | { | ||
3170 | if ((esp->ireg & ESP_INTR_DC) != 0) | ||
3171 | return esp_disconnect_amidst_phases(esp); | ||
3172 | return phase_vector[esp->sreg & ESP_STAT_PMASK](esp); | ||
3173 | } | ||
3174 | |||
3175 | /* First interrupt after exec'ing a cmd comes here. */ | ||
3176 | static int esp_select_complete(struct esp *esp) | ||
3177 | { | ||
3178 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
3179 | struct esp_device *esp_dev = SCptr->device->hostdata; | ||
3180 | int cmd_bytes_sent, fcnt; | ||
3181 | |||
3182 | if (esp->erev != fashme) | ||
3183 | esp->seqreg = (sbus_readb(esp->eregs + ESP_SSTEP) & ESP_STEP_VBITS); | ||
3184 | |||
3185 | if (esp->erev == fashme) | ||
3186 | fcnt = esp->hme_fifo_workaround_count; | ||
3187 | else | ||
3188 | fcnt = (sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES); | ||
3189 | |||
3190 | cmd_bytes_sent = esp_bytes_sent(esp, fcnt); | ||
3191 | dma_invalidate(esp); | ||
3192 | |||
3193 | /* Let's check to see if a reselect happened | ||
3194 | * while we we're trying to select. This must | ||
3195 | * be checked first. | ||
3196 | */ | ||
3197 | if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) { | ||
3198 | esp_reconnect(esp, SCptr); | ||
3199 | return esp_do_reconnect(esp); | ||
3200 | } | ||
3201 | |||
3202 | /* Looks like things worked, we should see a bus service & | ||
3203 | * a function complete interrupt at this point. Note we | ||
3204 | * are doing a direct comparison because we don't want to | ||
3205 | * be fooled into thinking selection was successful if | ||
3206 | * ESP_INTR_DC is set, see below. | ||
3207 | */ | ||
3208 | if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) { | ||
3209 | /* target speaks... */ | ||
3210 | esp->targets_present |= (1<<SCptr->device->id); | ||
3211 | |||
3212 | /* What if the target ignores the sdtr? */ | ||
3213 | if (esp->snip) | ||
3214 | esp_dev->sync = 1; | ||
3215 | |||
3216 | /* See how far, if at all, we got in getting | ||
3217 | * the information out to the target. | ||
3218 | */ | ||
3219 | switch (esp->seqreg) { | ||
3220 | default: | ||
3221 | |||
3222 | case ESP_STEP_ASEL: | ||
3223 | /* Arbitration won, target selected, but | ||
3224 | * we are in some phase which is not command | ||
3225 | * phase nor is it message out phase. | ||
3226 | * | ||
3227 | * XXX We've confused the target, obviously. | ||
3228 | * XXX So clear it's state, but we also end | ||
3229 | * XXX up clearing everyone elses. That isn't | ||
3230 | * XXX so nice. I'd like to just reset this | ||
3231 | * XXX target, but if I cannot even get it's | ||
3232 | * XXX attention and finish selection to talk | ||
3233 | * XXX to it, there is not much more I can do. | ||
3234 | * XXX If we have a loaded bus we're going to | ||
3235 | * XXX spend the next second or so renegotiating | ||
3236 | * XXX for synchronous transfers. | ||
3237 | */ | ||
3238 | ESPLOG(("esp%d: STEP_ASEL for tgt %d\n", | ||
3239 | esp->esp_id, SCptr->device->id)); | ||
3240 | |||
3241 | case ESP_STEP_SID: | ||
3242 | /* Arbitration won, target selected, went | ||
3243 | * to message out phase, sent one message | ||
3244 | * byte, then we stopped. ATN is asserted | ||
3245 | * on the SCSI bus and the target is still | ||
3246 | * there hanging on. This is a legal | ||
3247 | * sequence step if we gave the ESP a select | ||
3248 | * and stop command. | ||
3249 | * | ||
3250 | * XXX See above, I could set the borken flag | ||
3251 | * XXX in the device struct and retry the | ||
3252 | * XXX command. But would that help for | ||
3253 | * XXX tagged capable targets? | ||
3254 | */ | ||
3255 | |||
3256 | case ESP_STEP_NCMD: | ||
3257 | /* Arbitration won, target selected, maybe | ||
3258 | * sent the one message byte in message out | ||
3259 | * phase, but we did not go to command phase | ||
3260 | * in the end. Actually, we could have sent | ||
3261 | * only some of the message bytes if we tried | ||
3262 | * to send out the entire identify and tag | ||
3263 | * message using ESP_CMD_SA3. | ||
3264 | */ | ||
3265 | cmd_bytes_sent = 0; | ||
3266 | break; | ||
3267 | |||
3268 | case ESP_STEP_PPC: | ||
3269 | /* No, not the powerPC pinhead. Arbitration | ||
3270 | * won, all message bytes sent if we went to | ||
3271 | * message out phase, went to command phase | ||
3272 | * but only part of the command was sent. | ||
3273 | * | ||
3274 | * XXX I've seen this, but usually in conjunction | ||
3275 | * XXX with a gross error which appears to have | ||
3276 | * XXX occurred between the time I told the | ||
3277 | * XXX ESP to arbitrate and when I got the | ||
3278 | * XXX interrupt. Could I have misloaded the | ||
3279 | * XXX command bytes into the fifo? Actually, | ||
3280 | * XXX I most likely missed a phase, and therefore | ||
3281 | * XXX went into never never land and didn't even | ||
3282 | * XXX know it. That was the old driver though. | ||
3283 | * XXX What is even more peculiar is that the ESP | ||
3284 | * XXX showed the proper function complete and | ||
3285 | * XXX bus service bits in the interrupt register. | ||
3286 | */ | ||
3287 | |||
3288 | case ESP_STEP_FINI4: | ||
3289 | case ESP_STEP_FINI5: | ||
3290 | case ESP_STEP_FINI6: | ||
3291 | case ESP_STEP_FINI7: | ||
3292 | /* Account for the identify message */ | ||
3293 | if (SCptr->SCp.phase == in_slct_norm) | ||
3294 | cmd_bytes_sent -= 1; | ||
3295 | }; | ||
3296 | |||
3297 | if (esp->erev != fashme) | ||
3298 | esp_cmd(esp, ESP_CMD_NULL); | ||
3299 | |||
3300 | /* Be careful, we could really get fucked during synchronous | ||
3301 | * data transfers if we try to flush the fifo now. | ||
3302 | */ | ||
3303 | if ((esp->erev != fashme) && /* not a Happy Meal and... */ | ||
3304 | !fcnt && /* Fifo is empty and... */ | ||
3305 | /* either we are not doing synchronous transfers or... */ | ||
3306 | (!esp_dev->sync_max_offset || | ||
3307 | /* We are not going into data in phase. */ | ||
3308 | ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP))) | ||
3309 | esp_cmd(esp, ESP_CMD_FLUSH); /* flush is safe */ | ||
3310 | |||
3311 | /* See how far we got if this is not a slow command. */ | ||
3312 | if (!esp->esp_slowcmd) { | ||
3313 | if (cmd_bytes_sent < 0) | ||
3314 | cmd_bytes_sent = 0; | ||
3315 | if (cmd_bytes_sent != SCptr->cmd_len) { | ||
3316 | /* Crapola, mark it as a slowcmd | ||
3317 | * so that we have some chance of | ||
3318 | * keeping the command alive with | ||
3319 | * good luck. | ||
3320 | * | ||
3321 | * XXX Actually, if we didn't send it all | ||
3322 | * XXX this means either we didn't set things | ||
3323 | * XXX up properly (driver bug) or the target | ||
3324 | * XXX or the ESP detected parity on one of | ||
3325 | * XXX the command bytes. This makes much | ||
3326 | * XXX more sense, and therefore this code | ||
3327 | * XXX should be changed to send out a | ||
3328 | * XXX parity error message or if the status | ||
3329 | * XXX register shows no parity error then | ||
3330 | * XXX just expect the target to bring the | ||
3331 | * XXX bus into message in phase so that it | ||
3332 | * XXX can send us the parity error message. | ||
3333 | * XXX SCSI sucks... | ||
3334 | */ | ||
3335 | esp->esp_slowcmd = 1; | ||
3336 | esp->esp_scmdp = &(SCptr->cmnd[cmd_bytes_sent]); | ||
3337 | esp->esp_scmdleft = (SCptr->cmd_len - cmd_bytes_sent); | ||
3338 | } | ||
3339 | } | ||
3340 | |||
3341 | /* Now figure out where we went. */ | ||
3342 | esp_advance_phase(SCptr, in_the_dark); | ||
3343 | return esp_do_phase_determine(esp); | ||
3344 | } | ||
3345 | |||
3346 | /* Did the target even make it? */ | ||
3347 | if (esp->ireg == ESP_INTR_DC) { | ||
3348 | /* wheee... nobody there or they didn't like | ||
3349 | * what we told it to do, clean up. | ||
3350 | */ | ||
3351 | |||
3352 | /* If anyone is off the bus, but working on | ||
3353 | * a command in the background for us, tell | ||
3354 | * the ESP to listen for them. | ||
3355 | */ | ||
3356 | if (esp->disconnected_SC) | ||
3357 | esp_cmd(esp, ESP_CMD_ESEL); | ||
3358 | |||
3359 | if (((1<<SCptr->device->id) & esp->targets_present) && | ||
3360 | esp->seqreg != 0 && | ||
3361 | (esp->cur_msgout[0] == EXTENDED_MESSAGE) && | ||
3362 | (SCptr->SCp.phase == in_slct_msg || | ||
3363 | SCptr->SCp.phase == in_slct_stop)) { | ||
3364 | /* shit */ | ||
3365 | esp->snip = 0; | ||
3366 | ESPLOG(("esp%d: Failed synchronous negotiation for target %d " | ||
3367 | "lun %d\n", esp->esp_id, SCptr->device->id, SCptr->device->lun)); | ||
3368 | esp_dev->sync_max_offset = 0; | ||
3369 | esp_dev->sync_min_period = 0; | ||
3370 | esp_dev->sync = 1; /* so we don't negotiate again */ | ||
3371 | |||
3372 | /* Run the command again, this time though we | ||
3373 | * won't try to negotiate for synchronous transfers. | ||
3374 | * | ||
3375 | * XXX I'd like to do something like send an | ||
3376 | * XXX INITIATOR_ERROR or ABORT message to the | ||
3377 | * XXX target to tell it, "Sorry I confused you, | ||
3378 | * XXX please come back and I will be nicer next | ||
3379 | * XXX time". But that requires having the target | ||
3380 | * XXX on the bus, and it has dropped BSY on us. | ||
3381 | */ | ||
3382 | esp->current_SC = NULL; | ||
3383 | esp_advance_phase(SCptr, not_issued); | ||
3384 | prepend_SC(&esp->issue_SC, SCptr); | ||
3385 | esp_exec_cmd(esp); | ||
3386 | return do_intr_end; | ||
3387 | } | ||
3388 | |||
3389 | /* Ok, this is normal, this is what we see during boot | ||
3390 | * or whenever when we are scanning the bus for targets. | ||
3391 | * But first make sure that is really what is happening. | ||
3392 | */ | ||
3393 | if (((1<<SCptr->device->id) & esp->targets_present)) { | ||
3394 | ESPLOG(("esp%d: Warning, live target %d not responding to " | ||
3395 | "selection.\n", esp->esp_id, SCptr->device->id)); | ||
3396 | |||
3397 | /* This _CAN_ happen. The SCSI standard states that | ||
3398 | * the target is to _not_ respond to selection if | ||
3399 | * _it_ detects bad parity on the bus for any reason. | ||
3400 | * Therefore, we assume that if we've talked successfully | ||
3401 | * to this target before, bad parity is the problem. | ||
3402 | */ | ||
3403 | esp_done(esp, (DID_PARITY << 16)); | ||
3404 | } else { | ||
3405 | /* Else, there really isn't anyone there. */ | ||
3406 | ESPMISC(("esp: selection failure, maybe nobody there?\n")); | ||
3407 | ESPMISC(("esp: target %d lun %d\n", | ||
3408 | SCptr->device->id, SCptr->device->lun)); | ||
3409 | esp_done(esp, (DID_BAD_TARGET << 16)); | ||
3410 | } | ||
3411 | return do_intr_end; | ||
3412 | } | ||
3413 | |||
3414 | ESPLOG(("esp%d: Selection failure.\n", esp->esp_id)); | ||
3415 | printk("esp%d: Currently -- ", esp->esp_id); | ||
3416 | esp_print_ireg(esp->ireg); printk(" "); | ||
3417 | esp_print_statreg(esp->sreg); printk(" "); | ||
3418 | esp_print_seqreg(esp->seqreg); printk("\n"); | ||
3419 | printk("esp%d: New -- ", esp->esp_id); | ||
3420 | esp->sreg = sbus_readb(esp->eregs + ESP_STATUS); | ||
3421 | esp->seqreg = sbus_readb(esp->eregs + ESP_SSTEP); | ||
3422 | esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT); | ||
3423 | esp_print_ireg(esp->ireg); printk(" "); | ||
3424 | esp_print_statreg(esp->sreg); printk(" "); | ||
3425 | esp_print_seqreg(esp->seqreg); printk("\n"); | ||
3426 | ESPLOG(("esp%d: resetting bus\n", esp->esp_id)); | ||
3427 | return do_reset_bus; /* ugh... */ | ||
3428 | } | ||
3429 | |||
3430 | /* Continue reading bytes for msgin phase. */ | ||
3431 | static int esp_do_msgincont(struct esp *esp) | ||
3432 | { | ||
3433 | if (esp->ireg & ESP_INTR_BSERV) { | ||
3434 | /* in the right phase too? */ | ||
3435 | if ((esp->sreg & ESP_STAT_PMASK) == ESP_MIP) { | ||
3436 | /* phew... */ | ||
3437 | esp_cmd(esp, ESP_CMD_TI); | ||
3438 | esp_advance_phase(esp->current_SC, in_msgindone); | ||
3439 | return do_intr_end; | ||
3440 | } | ||
3441 | |||
3442 | /* We changed phase but ESP shows bus service, | ||
3443 | * in this case it is most likely that we, the | ||
3444 | * hacker who has been up for 20hrs straight | ||
3445 | * staring at the screen, drowned in coffee | ||
3446 | * smelling like retched cigarette ashes | ||
3447 | * have miscoded something..... so, try to | ||
3448 | * recover as best we can. | ||
3449 | */ | ||
3450 | ESPLOG(("esp%d: message in mis-carriage.\n", esp->esp_id)); | ||
3451 | } | ||
3452 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
3453 | return do_phase_determine; | ||
3454 | } | ||
3455 | |||
3456 | static int check_singlebyte_msg(struct esp *esp) | ||
3457 | { | ||
3458 | esp->prevmsgin = esp->cur_msgin[0]; | ||
3459 | if (esp->cur_msgin[0] & 0x80) { | ||
3460 | /* wheee... */ | ||
3461 | ESPLOG(("esp%d: target sends identify amidst phases\n", | ||
3462 | esp->esp_id)); | ||
3463 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
3464 | return 0; | ||
3465 | } else if (((esp->cur_msgin[0] & 0xf0) == 0x20) || | ||
3466 | (esp->cur_msgin[0] == EXTENDED_MESSAGE)) { | ||
3467 | esp->msgin_len = 2; | ||
3468 | esp_advance_phase(esp->current_SC, in_msgincont); | ||
3469 | return 0; | ||
3470 | } | ||
3471 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
3472 | switch (esp->cur_msgin[0]) { | ||
3473 | default: | ||
3474 | /* We don't want to hear about it. */ | ||
3475 | ESPLOG(("esp%d: msg %02x which we don't know about\n", esp->esp_id, | ||
3476 | esp->cur_msgin[0])); | ||
3477 | return MESSAGE_REJECT; | ||
3478 | |||
3479 | case NOP: | ||
3480 | ESPLOG(("esp%d: target %d sends a nop\n", esp->esp_id, | ||
3481 | esp->current_SC->device->id)); | ||
3482 | return 0; | ||
3483 | |||
3484 | case RESTORE_POINTERS: | ||
3485 | /* In this case we might also have to backup the | ||
3486 | * "slow command" pointer. It is rare to get such | ||
3487 | * a save/restore pointer sequence so early in the | ||
3488 | * bus transition sequences, but cover it. | ||
3489 | */ | ||
3490 | if (esp->esp_slowcmd) { | ||
3491 | esp->esp_scmdleft = esp->current_SC->cmd_len; | ||
3492 | esp->esp_scmdp = &esp->current_SC->cmnd[0]; | ||
3493 | } | ||
3494 | esp_restore_pointers(esp, esp->current_SC); | ||
3495 | return 0; | ||
3496 | |||
3497 | case SAVE_POINTERS: | ||
3498 | esp_save_pointers(esp, esp->current_SC); | ||
3499 | return 0; | ||
3500 | |||
3501 | case COMMAND_COMPLETE: | ||
3502 | case DISCONNECT: | ||
3503 | /* Freeing the bus, let it go. */ | ||
3504 | esp->current_SC->SCp.phase = in_freeing; | ||
3505 | return 0; | ||
3506 | |||
3507 | case MESSAGE_REJECT: | ||
3508 | ESPMISC(("msg reject, ")); | ||
3509 | if (esp->prevmsgout == EXTENDED_MESSAGE) { | ||
3510 | struct esp_device *esp_dev = esp->current_SC->device->hostdata; | ||
3511 | |||
3512 | /* Doesn't look like this target can | ||
3513 | * do synchronous or WIDE transfers. | ||
3514 | */ | ||
3515 | ESPSDTR(("got reject, was trying nego, clearing sync/WIDE\n")); | ||
3516 | esp_dev->sync = 1; | ||
3517 | esp_dev->wide = 1; | ||
3518 | esp_dev->sync_min_period = 0; | ||
3519 | esp_dev->sync_max_offset = 0; | ||
3520 | return 0; | ||
3521 | } else { | ||
3522 | ESPMISC(("not sync nego, sending ABORT\n")); | ||
3523 | return ABORT; | ||
3524 | } | ||
3525 | }; | ||
3526 | } | ||
3527 | |||
3528 | /* Target negotiates for synchronous transfers before we do, this | ||
3529 | * is legal although very strange. What is even funnier is that | ||
3530 | * the SCSI2 standard specifically recommends against targets doing | ||
3531 | * this because so many initiators cannot cope with this occurring. | ||
3532 | */ | ||
3533 | static int target_with_ants_in_pants(struct esp *esp, | ||
3534 | struct scsi_cmnd *SCptr, | ||
3535 | struct esp_device *esp_dev) | ||
3536 | { | ||
3537 | if (esp_dev->sync || SCptr->device->borken) { | ||
3538 | /* sorry, no can do */ | ||
3539 | ESPSDTR(("forcing to async, ")); | ||
3540 | build_sync_nego_msg(esp, 0, 0); | ||
3541 | esp_dev->sync = 1; | ||
3542 | esp->snip = 1; | ||
3543 | ESPLOG(("esp%d: hoping for msgout\n", esp->esp_id)); | ||
3544 | esp_advance_phase(SCptr, in_the_dark); | ||
3545 | return EXTENDED_MESSAGE; | ||
3546 | } | ||
3547 | |||
3548 | /* Ok, we'll check them out... */ | ||
3549 | return 0; | ||
3550 | } | ||
3551 | |||
3552 | static void sync_report(struct esp *esp) | ||
3553 | { | ||
3554 | int msg3, msg4; | ||
3555 | char *type; | ||
3556 | |||
3557 | msg3 = esp->cur_msgin[3]; | ||
3558 | msg4 = esp->cur_msgin[4]; | ||
3559 | if (msg4) { | ||
3560 | int hz = 1000000000 / (msg3 * 4); | ||
3561 | int integer = hz / 1000000; | ||
3562 | int fraction = (hz - (integer * 1000000)) / 10000; | ||
3563 | if ((esp->erev == fashme) && | ||
3564 | (esp->config3[esp->current_SC->device->id] & ESP_CONFIG3_EWIDE)) { | ||
3565 | type = "FAST-WIDE"; | ||
3566 | integer <<= 1; | ||
3567 | fraction <<= 1; | ||
3568 | } else if ((msg3 * 4) < 200) { | ||
3569 | type = "FAST"; | ||
3570 | } else { | ||
3571 | type = "synchronous"; | ||
3572 | } | ||
3573 | |||
3574 | /* Do not transform this back into one big printk | ||
3575 | * again, it triggers a bug in our sparc64-gcc272 | ||
3576 | * sibling call optimization. -DaveM | ||
3577 | */ | ||
3578 | ESPLOG((KERN_INFO "esp%d: target %d ", | ||
3579 | esp->esp_id, esp->current_SC->device->id)); | ||
3580 | ESPLOG(("[period %dns offset %d %d.%02dMHz ", | ||
3581 | (int) msg3 * 4, (int) msg4, | ||
3582 | integer, fraction)); | ||
3583 | ESPLOG(("%s SCSI%s]\n", type, | ||
3584 | (((msg3 * 4) < 200) ? "-II" : ""))); | ||
3585 | } else { | ||
3586 | ESPLOG((KERN_INFO "esp%d: target %d asynchronous\n", | ||
3587 | esp->esp_id, esp->current_SC->device->id)); | ||
3588 | } | ||
3589 | } | ||
3590 | |||
3591 | static int check_multibyte_msg(struct esp *esp) | ||
3592 | { | ||
3593 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
3594 | struct esp_device *esp_dev = SCptr->device->hostdata; | ||
3595 | u8 regval = 0; | ||
3596 | int message_out = 0; | ||
3597 | |||
3598 | ESPSDTR(("chk multibyte msg: ")); | ||
3599 | if (esp->cur_msgin[2] == EXTENDED_SDTR) { | ||
3600 | int period = esp->cur_msgin[3]; | ||
3601 | int offset = esp->cur_msgin[4]; | ||
3602 | |||
3603 | ESPSDTR(("is sync nego response, ")); | ||
3604 | if (!esp->snip) { | ||
3605 | int rval; | ||
3606 | |||
3607 | /* Target negotiates first! */ | ||
3608 | ESPSDTR(("target jumps the gun, ")); | ||
3609 | message_out = EXTENDED_MESSAGE; /* we must respond */ | ||
3610 | rval = target_with_ants_in_pants(esp, SCptr, esp_dev); | ||
3611 | if (rval) | ||
3612 | return rval; | ||
3613 | } | ||
3614 | |||
3615 | ESPSDTR(("examining sdtr, ")); | ||
3616 | |||
3617 | /* Offset cannot be larger than ESP fifo size. */ | ||
3618 | if (offset > 15) { | ||
3619 | ESPSDTR(("offset too big %2x, ", offset)); | ||
3620 | offset = 15; | ||
3621 | ESPSDTR(("sending back new offset\n")); | ||
3622 | build_sync_nego_msg(esp, period, offset); | ||
3623 | return EXTENDED_MESSAGE; | ||
3624 | } | ||
3625 | |||
3626 | if (offset && period > esp->max_period) { | ||
3627 | /* Yeee, async for this slow device. */ | ||
3628 | ESPSDTR(("period too long %2x, ", period)); | ||
3629 | build_sync_nego_msg(esp, 0, 0); | ||
3630 | ESPSDTR(("hoping for msgout\n")); | ||
3631 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
3632 | return EXTENDED_MESSAGE; | ||
3633 | } else if (offset && period < esp->min_period) { | ||
3634 | ESPSDTR(("period too short %2x, ", period)); | ||
3635 | period = esp->min_period; | ||
3636 | if (esp->erev > esp236) | ||
3637 | regval = 4; | ||
3638 | else | ||
3639 | regval = 5; | ||
3640 | } else if (offset) { | ||
3641 | int tmp; | ||
3642 | |||
3643 | ESPSDTR(("period is ok, ")); | ||
3644 | tmp = esp->ccycle / 1000; | ||
3645 | regval = (((period << 2) + tmp - 1) / tmp); | ||
3646 | if (regval && ((esp->erev == fas100a || | ||
3647 | esp->erev == fas236 || | ||
3648 | esp->erev == fashme))) { | ||
3649 | if (period >= 50) | ||
3650 | regval--; | ||
3651 | } | ||
3652 | } | ||
3653 | |||
3654 | if (offset) { | ||
3655 | u8 bit; | ||
3656 | |||
3657 | esp_dev->sync_min_period = (regval & 0x1f); | ||
3658 | esp_dev->sync_max_offset = (offset | esp->radelay); | ||
3659 | if (esp->erev == fas100a || esp->erev == fas236 || esp->erev == fashme) { | ||
3660 | if ((esp->erev == fas100a) || (esp->erev == fashme)) | ||
3661 | bit = ESP_CONFIG3_FAST; | ||
3662 | else | ||
3663 | bit = ESP_CONFIG3_FSCSI; | ||
3664 | if (period < 50) { | ||
3665 | /* On FAS366, if using fast-20 synchronous transfers | ||
3666 | * we need to make sure the REQ/ACK assert/deassert | ||
3667 | * control bits are clear. | ||
3668 | */ | ||
3669 | if (esp->erev == fashme) | ||
3670 | esp_dev->sync_max_offset &= ~esp->radelay; | ||
3671 | esp->config3[SCptr->device->id] |= bit; | ||
3672 | } else { | ||
3673 | esp->config3[SCptr->device->id] &= ~bit; | ||
3674 | } | ||
3675 | esp->prev_cfg3 = esp->config3[SCptr->device->id]; | ||
3676 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
3677 | } | ||
3678 | esp->prev_soff = esp_dev->sync_max_offset; | ||
3679 | esp->prev_stp = esp_dev->sync_min_period; | ||
3680 | sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF); | ||
3681 | sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP); | ||
3682 | ESPSDTR(("soff=%2x stp=%2x cfg3=%2x\n", | ||
3683 | esp_dev->sync_max_offset, | ||
3684 | esp_dev->sync_min_period, | ||
3685 | esp->config3[SCptr->device->id])); | ||
3686 | |||
3687 | esp->snip = 0; | ||
3688 | } else if (esp_dev->sync_max_offset) { | ||
3689 | u8 bit; | ||
3690 | |||
3691 | /* back to async mode */ | ||
3692 | ESPSDTR(("unaccaptable sync nego, forcing async\n")); | ||
3693 | esp_dev->sync_max_offset = 0; | ||
3694 | esp_dev->sync_min_period = 0; | ||
3695 | esp->prev_soff = 0; | ||
3696 | esp->prev_stp = 0; | ||
3697 | sbus_writeb(esp->prev_soff, esp->eregs + ESP_SOFF); | ||
3698 | sbus_writeb(esp->prev_stp, esp->eregs + ESP_STP); | ||
3699 | if (esp->erev == fas100a || esp->erev == fas236 || esp->erev == fashme) { | ||
3700 | if ((esp->erev == fas100a) || (esp->erev == fashme)) | ||
3701 | bit = ESP_CONFIG3_FAST; | ||
3702 | else | ||
3703 | bit = ESP_CONFIG3_FSCSI; | ||
3704 | esp->config3[SCptr->device->id] &= ~bit; | ||
3705 | esp->prev_cfg3 = esp->config3[SCptr->device->id]; | ||
3706 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
3707 | } | ||
3708 | } | ||
3709 | |||
3710 | sync_report(esp); | ||
3711 | |||
3712 | ESPSDTR(("chk multibyte msg: sync is known, ")); | ||
3713 | esp_dev->sync = 1; | ||
3714 | |||
3715 | if (message_out) { | ||
3716 | ESPLOG(("esp%d: sending sdtr back, hoping for msgout\n", | ||
3717 | esp->esp_id)); | ||
3718 | build_sync_nego_msg(esp, period, offset); | ||
3719 | esp_advance_phase(SCptr, in_the_dark); | ||
3720 | return EXTENDED_MESSAGE; | ||
3721 | } | ||
3722 | |||
3723 | ESPSDTR(("returning zero\n")); | ||
3724 | esp_advance_phase(SCptr, in_the_dark); /* ...or else! */ | ||
3725 | return 0; | ||
3726 | } else if (esp->cur_msgin[2] == EXTENDED_WDTR) { | ||
3727 | int size = 8 << esp->cur_msgin[3]; | ||
3728 | |||
3729 | esp->wnip = 0; | ||
3730 | if (esp->erev != fashme) { | ||
3731 | ESPLOG(("esp%d: AIEEE wide msg received and not HME.\n", | ||
3732 | esp->esp_id)); | ||
3733 | message_out = MESSAGE_REJECT; | ||
3734 | } else if (size > 16) { | ||
3735 | ESPLOG(("esp%d: AIEEE wide transfer for %d size " | ||
3736 | "not supported.\n", esp->esp_id, size)); | ||
3737 | message_out = MESSAGE_REJECT; | ||
3738 | } else { | ||
3739 | /* Things look good; let's see what we got. */ | ||
3740 | if (size == 16) { | ||
3741 | /* Set config 3 register for this target. */ | ||
3742 | esp->config3[SCptr->device->id] |= ESP_CONFIG3_EWIDE; | ||
3743 | } else { | ||
3744 | /* Just make sure it was one byte sized. */ | ||
3745 | if (size != 8) { | ||
3746 | ESPLOG(("esp%d: Aieee, wide nego of %d size.\n", | ||
3747 | esp->esp_id, size)); | ||
3748 | message_out = MESSAGE_REJECT; | ||
3749 | goto finish; | ||
3750 | } | ||
3751 | /* Pure paranoia. */ | ||
3752 | esp->config3[SCptr->device->id] &= ~(ESP_CONFIG3_EWIDE); | ||
3753 | } | ||
3754 | esp->prev_cfg3 = esp->config3[SCptr->device->id]; | ||
3755 | sbus_writeb(esp->prev_cfg3, esp->eregs + ESP_CFG3); | ||
3756 | |||
3757 | /* Regardless, next try for sync transfers. */ | ||
3758 | build_sync_nego_msg(esp, esp->sync_defp, 15); | ||
3759 | esp_dev->sync = 1; | ||
3760 | esp->snip = 1; | ||
3761 | message_out = EXTENDED_MESSAGE; | ||
3762 | } | ||
3763 | } else if (esp->cur_msgin[2] == EXTENDED_MODIFY_DATA_POINTER) { | ||
3764 | ESPLOG(("esp%d: rejecting modify data ptr msg\n", esp->esp_id)); | ||
3765 | message_out = MESSAGE_REJECT; | ||
3766 | } | ||
3767 | finish: | ||
3768 | esp_advance_phase(SCptr, in_the_dark); | ||
3769 | return message_out; | ||
3770 | } | ||
3771 | |||
3772 | static int esp_do_msgindone(struct esp *esp) | ||
3773 | { | ||
3774 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
3775 | int message_out = 0, it = 0, rval; | ||
3776 | |||
3777 | rval = skipahead1(esp, SCptr, in_msgin, in_msgindone); | ||
3778 | if (rval) | ||
3779 | return rval; | ||
3780 | if (SCptr->SCp.sent_command != in_status) { | ||
3781 | if (!(esp->ireg & ESP_INTR_DC)) { | ||
3782 | if (esp->msgin_len && (esp->sreg & ESP_STAT_PERR)) { | ||
3783 | message_out = MSG_PARITY_ERROR; | ||
3784 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3785 | } else if (esp->erev != fashme && | ||
3786 | (it = (sbus_readb(esp->eregs + ESP_FFLAGS) & ESP_FF_FBYTES)) != 1) { | ||
3787 | /* We certainly dropped the ball somewhere. */ | ||
3788 | message_out = INITIATOR_ERROR; | ||
3789 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3790 | } else if (!esp->msgin_len) { | ||
3791 | if (esp->erev == fashme) | ||
3792 | it = esp->hme_fifo_workaround_buffer[0]; | ||
3793 | else | ||
3794 | it = sbus_readb(esp->eregs + ESP_FDATA); | ||
3795 | esp_advance_phase(SCptr, in_msgincont); | ||
3796 | } else { | ||
3797 | /* it is ok and we want it */ | ||
3798 | if (esp->erev == fashme) | ||
3799 | it = esp->cur_msgin[esp->msgin_ctr] = | ||
3800 | esp->hme_fifo_workaround_buffer[0]; | ||
3801 | else | ||
3802 | it = esp->cur_msgin[esp->msgin_ctr] = | ||
3803 | sbus_readb(esp->eregs + ESP_FDATA); | ||
3804 | esp->msgin_ctr++; | ||
3805 | } | ||
3806 | } else { | ||
3807 | esp_advance_phase(SCptr, in_the_dark); | ||
3808 | return do_work_bus; | ||
3809 | } | ||
3810 | } else { | ||
3811 | it = esp->cur_msgin[0]; | ||
3812 | } | ||
3813 | if (!message_out && esp->msgin_len) { | ||
3814 | if (esp->msgin_ctr < esp->msgin_len) { | ||
3815 | esp_advance_phase(SCptr, in_msgincont); | ||
3816 | } else if (esp->msgin_len == 1) { | ||
3817 | message_out = check_singlebyte_msg(esp); | ||
3818 | } else if (esp->msgin_len == 2) { | ||
3819 | if (esp->cur_msgin[0] == EXTENDED_MESSAGE) { | ||
3820 | if ((it + 2) >= 15) { | ||
3821 | message_out = MESSAGE_REJECT; | ||
3822 | } else { | ||
3823 | esp->msgin_len = (it + 2); | ||
3824 | esp_advance_phase(SCptr, in_msgincont); | ||
3825 | } | ||
3826 | } else { | ||
3827 | message_out = MESSAGE_REJECT; /* foo on you */ | ||
3828 | } | ||
3829 | } else { | ||
3830 | message_out = check_multibyte_msg(esp); | ||
3831 | } | ||
3832 | } | ||
3833 | if (message_out < 0) { | ||
3834 | return -message_out; | ||
3835 | } else if (message_out) { | ||
3836 | if (((message_out != 1) && | ||
3837 | ((message_out < 0x20) || (message_out & 0x80)))) | ||
3838 | esp->msgout_len = 1; | ||
3839 | esp->cur_msgout[0] = message_out; | ||
3840 | esp_cmd(esp, ESP_CMD_SATN); | ||
3841 | esp_advance_phase(SCptr, in_the_dark); | ||
3842 | esp->msgin_len = 0; | ||
3843 | } | ||
3844 | esp->sreg = sbus_readb(esp->eregs + ESP_STATUS); | ||
3845 | esp->sreg &= ~(ESP_STAT_INTR); | ||
3846 | if ((esp->sreg & (ESP_STAT_PMSG|ESP_STAT_PCD)) == (ESP_STAT_PMSG|ESP_STAT_PCD)) | ||
3847 | esp_cmd(esp, ESP_CMD_MOK); | ||
3848 | if ((SCptr->SCp.sent_command == in_msgindone) && | ||
3849 | (SCptr->SCp.phase == in_freeing)) | ||
3850 | return esp_do_freebus(esp); | ||
3851 | return do_intr_end; | ||
3852 | } | ||
3853 | |||
3854 | static int esp_do_cmdbegin(struct esp *esp) | ||
3855 | { | ||
3856 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
3857 | |||
3858 | esp_advance_phase(SCptr, in_cmdend); | ||
3859 | if (esp->erev == fashme) { | ||
3860 | u32 tmp = sbus_readl(esp->dregs + DMA_CSR); | ||
3861 | int i; | ||
3862 | |||
3863 | for (i = 0; i < esp->esp_scmdleft; i++) | ||
3864 | esp->esp_command[i] = *esp->esp_scmdp++; | ||
3865 | esp->esp_scmdleft = 0; | ||
3866 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3867 | esp_setcount(esp->eregs, i, 1); | ||
3868 | esp_cmd(esp, (ESP_CMD_DMA | ESP_CMD_TI)); | ||
3869 | tmp |= (DMA_SCSI_DISAB | DMA_ENABLE); | ||
3870 | tmp &= ~(DMA_ST_WRITE); | ||
3871 | sbus_writel(i, esp->dregs + DMA_COUNT); | ||
3872 | sbus_writel(esp->esp_command_dvma, esp->dregs + DMA_ADDR); | ||
3873 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
3874 | } else { | ||
3875 | u8 tmp; | ||
3876 | |||
3877 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3878 | tmp = *esp->esp_scmdp++; | ||
3879 | esp->esp_scmdleft--; | ||
3880 | sbus_writeb(tmp, esp->eregs + ESP_FDATA); | ||
3881 | esp_cmd(esp, ESP_CMD_TI); | ||
3882 | } | ||
3883 | return do_intr_end; | ||
3884 | } | ||
3885 | |||
3886 | static int esp_do_cmddone(struct esp *esp) | ||
3887 | { | ||
3888 | if (esp->erev == fashme) | ||
3889 | dma_invalidate(esp); | ||
3890 | else | ||
3891 | esp_cmd(esp, ESP_CMD_NULL); | ||
3892 | |||
3893 | if (esp->ireg & ESP_INTR_BSERV) { | ||
3894 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
3895 | return esp_do_phase_determine(esp); | ||
3896 | } | ||
3897 | |||
3898 | ESPLOG(("esp%d: in do_cmddone() but didn't get BSERV interrupt.\n", | ||
3899 | esp->esp_id)); | ||
3900 | return do_reset_bus; | ||
3901 | } | ||
3902 | |||
3903 | static int esp_do_msgout(struct esp *esp) | ||
3904 | { | ||
3905 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
3906 | switch (esp->msgout_len) { | ||
3907 | case 1: | ||
3908 | if (esp->erev == fashme) | ||
3909 | hme_fifo_push(esp, &esp->cur_msgout[0], 1); | ||
3910 | else | ||
3911 | sbus_writeb(esp->cur_msgout[0], esp->eregs + ESP_FDATA); | ||
3912 | |||
3913 | esp_cmd(esp, ESP_CMD_TI); | ||
3914 | break; | ||
3915 | |||
3916 | case 2: | ||
3917 | esp->esp_command[0] = esp->cur_msgout[0]; | ||
3918 | esp->esp_command[1] = esp->cur_msgout[1]; | ||
3919 | |||
3920 | if (esp->erev == fashme) { | ||
3921 | hme_fifo_push(esp, &esp->cur_msgout[0], 2); | ||
3922 | esp_cmd(esp, ESP_CMD_TI); | ||
3923 | } else { | ||
3924 | dma_setup(esp, esp->esp_command_dvma, 2, 0); | ||
3925 | esp_setcount(esp->eregs, 2, 0); | ||
3926 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
3927 | } | ||
3928 | break; | ||
3929 | |||
3930 | case 4: | ||
3931 | esp->esp_command[0] = esp->cur_msgout[0]; | ||
3932 | esp->esp_command[1] = esp->cur_msgout[1]; | ||
3933 | esp->esp_command[2] = esp->cur_msgout[2]; | ||
3934 | esp->esp_command[3] = esp->cur_msgout[3]; | ||
3935 | esp->snip = 1; | ||
3936 | |||
3937 | if (esp->erev == fashme) { | ||
3938 | hme_fifo_push(esp, &esp->cur_msgout[0], 4); | ||
3939 | esp_cmd(esp, ESP_CMD_TI); | ||
3940 | } else { | ||
3941 | dma_setup(esp, esp->esp_command_dvma, 4, 0); | ||
3942 | esp_setcount(esp->eregs, 4, 0); | ||
3943 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
3944 | } | ||
3945 | break; | ||
3946 | |||
3947 | case 5: | ||
3948 | esp->esp_command[0] = esp->cur_msgout[0]; | ||
3949 | esp->esp_command[1] = esp->cur_msgout[1]; | ||
3950 | esp->esp_command[2] = esp->cur_msgout[2]; | ||
3951 | esp->esp_command[3] = esp->cur_msgout[3]; | ||
3952 | esp->esp_command[4] = esp->cur_msgout[4]; | ||
3953 | esp->snip = 1; | ||
3954 | |||
3955 | if (esp->erev == fashme) { | ||
3956 | hme_fifo_push(esp, &esp->cur_msgout[0], 5); | ||
3957 | esp_cmd(esp, ESP_CMD_TI); | ||
3958 | } else { | ||
3959 | dma_setup(esp, esp->esp_command_dvma, 5, 0); | ||
3960 | esp_setcount(esp->eregs, 5, 0); | ||
3961 | esp_cmd(esp, ESP_CMD_DMA | ESP_CMD_TI); | ||
3962 | } | ||
3963 | break; | ||
3964 | |||
3965 | default: | ||
3966 | /* whoops */ | ||
3967 | ESPMISC(("bogus msgout sending NOP\n")); | ||
3968 | esp->cur_msgout[0] = NOP; | ||
3969 | |||
3970 | if (esp->erev == fashme) { | ||
3971 | hme_fifo_push(esp, &esp->cur_msgout[0], 1); | ||
3972 | } else { | ||
3973 | sbus_writeb(esp->cur_msgout[0], esp->eregs + ESP_FDATA); | ||
3974 | } | ||
3975 | |||
3976 | esp->msgout_len = 1; | ||
3977 | esp_cmd(esp, ESP_CMD_TI); | ||
3978 | break; | ||
3979 | }; | ||
3980 | |||
3981 | esp_advance_phase(esp->current_SC, in_msgoutdone); | ||
3982 | return do_intr_end; | ||
3983 | } | ||
3984 | |||
3985 | static int esp_do_msgoutdone(struct esp *esp) | ||
3986 | { | ||
3987 | if (esp->msgout_len > 1) { | ||
3988 | /* XXX HME/FAS ATN deassert workaround required, | ||
3989 | * XXX no DMA flushing, only possible ESP_CMD_FLUSH | ||
3990 | * XXX to kill the fifo. | ||
3991 | */ | ||
3992 | if (esp->erev != fashme) { | ||
3993 | u32 tmp; | ||
3994 | |||
3995 | while ((tmp = sbus_readl(esp->dregs + DMA_CSR)) & DMA_PEND_READ) | ||
3996 | udelay(1); | ||
3997 | tmp &= ~DMA_ENABLE; | ||
3998 | sbus_writel(tmp, esp->dregs + DMA_CSR); | ||
3999 | dma_invalidate(esp); | ||
4000 | } else { | ||
4001 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
4002 | } | ||
4003 | } | ||
4004 | if (!(esp->ireg & ESP_INTR_DC)) { | ||
4005 | if (esp->erev != fashme) | ||
4006 | esp_cmd(esp, ESP_CMD_NULL); | ||
4007 | switch (esp->sreg & ESP_STAT_PMASK) { | ||
4008 | case ESP_MOP: | ||
4009 | /* whoops, parity error */ | ||
4010 | ESPLOG(("esp%d: still in msgout, parity error assumed\n", | ||
4011 | esp->esp_id)); | ||
4012 | if (esp->msgout_len > 1) | ||
4013 | esp_cmd(esp, ESP_CMD_SATN); | ||
4014 | esp_advance_phase(esp->current_SC, in_msgout); | ||
4015 | return do_work_bus; | ||
4016 | |||
4017 | case ESP_DIP: | ||
4018 | break; | ||
4019 | |||
4020 | default: | ||
4021 | /* Happy Meal fifo is touchy... */ | ||
4022 | if ((esp->erev != fashme) && | ||
4023 | !fcount(esp) && | ||
4024 | !(((struct esp_device *)esp->current_SC->device->hostdata)->sync_max_offset)) | ||
4025 | esp_cmd(esp, ESP_CMD_FLUSH); | ||
4026 | break; | ||
4027 | |||
4028 | }; | ||
4029 | } else { | ||
4030 | ESPLOG(("esp%d: disconnect, resetting bus\n", esp->esp_id)); | ||
4031 | return do_reset_bus; | ||
4032 | } | ||
4033 | |||
4034 | /* If we sent out a synchronous negotiation message, update | ||
4035 | * our state. | ||
4036 | */ | ||
4037 | if (esp->cur_msgout[2] == EXTENDED_MESSAGE && | ||
4038 | esp->cur_msgout[4] == EXTENDED_SDTR) { | ||
4039 | esp->snip = 1; /* anal retentiveness... */ | ||
4040 | } | ||
4041 | |||
4042 | esp->prevmsgout = esp->cur_msgout[0]; | ||
4043 | esp->msgout_len = 0; | ||
4044 | esp_advance_phase(esp->current_SC, in_the_dark); | ||
4045 | return esp_do_phase_determine(esp); | ||
4046 | } | ||
4047 | |||
4048 | static int esp_bus_unexpected(struct esp *esp) | ||
4049 | { | ||
4050 | ESPLOG(("esp%d: command in weird state %2x\n", | ||
4051 | esp->esp_id, esp->current_SC->SCp.phase)); | ||
4052 | return do_reset_bus; | ||
4053 | } | ||
4054 | |||
4055 | static espfunc_t bus_vector[] = { | ||
4056 | esp_do_data_finale, | ||
4057 | esp_do_data_finale, | ||
4058 | esp_bus_unexpected, | ||
4059 | esp_do_msgin, | ||
4060 | esp_do_msgincont, | ||
4061 | esp_do_msgindone, | ||
4062 | esp_do_msgout, | ||
4063 | esp_do_msgoutdone, | ||
4064 | esp_do_cmdbegin, | ||
4065 | esp_do_cmddone, | ||
4066 | esp_do_status, | ||
4067 | esp_do_freebus, | ||
4068 | esp_do_phase_determine, | ||
4069 | esp_bus_unexpected, | ||
4070 | esp_bus_unexpected, | ||
4071 | esp_bus_unexpected, | ||
4072 | }; | ||
4073 | |||
4074 | /* This is the second tier in our dual-level SCSI state machine. */ | ||
4075 | static int esp_work_bus(struct esp *esp) | ||
4076 | { | ||
4077 | struct scsi_cmnd *SCptr = esp->current_SC; | ||
4078 | unsigned int phase; | ||
4079 | |||
4080 | ESPBUS(("esp_work_bus: ")); | ||
4081 | if (!SCptr) { | ||
4082 | ESPBUS(("reconnect\n")); | ||
4083 | return esp_do_reconnect(esp); | ||
4084 | } | ||
4085 | phase = SCptr->SCp.phase; | ||
4086 | if ((phase & 0xf0) == in_phases_mask) | ||
4087 | return bus_vector[(phase & 0x0f)](esp); | ||
4088 | else if ((phase & 0xf0) == in_slct_mask) | ||
4089 | return esp_select_complete(esp); | ||
4090 | else | ||
4091 | return esp_bus_unexpected(esp); | ||
4092 | } | ||
4093 | |||
4094 | static espfunc_t isvc_vector[] = { | ||
4095 | NULL, | ||
4096 | esp_do_phase_determine, | ||
4097 | esp_do_resetbus, | ||
4098 | esp_finish_reset, | ||
4099 | esp_work_bus | ||
4100 | }; | ||
4101 | |||
4102 | /* Main interrupt handler for an esp adapter. */ | ||
4103 | static void esp_handle(struct esp *esp) | ||
4104 | { | ||
4105 | struct scsi_cmnd *SCptr; | ||
4106 | int what_next = do_intr_end; | ||
4107 | |||
4108 | SCptr = esp->current_SC; | ||
4109 | |||
4110 | /* Check for errors. */ | ||
4111 | esp->sreg = sbus_readb(esp->eregs + ESP_STATUS); | ||
4112 | esp->sreg &= (~ESP_STAT_INTR); | ||
4113 | if (esp->erev == fashme) { | ||
4114 | esp->sreg2 = sbus_readb(esp->eregs + ESP_STATUS2); | ||
4115 | esp->seqreg = (sbus_readb(esp->eregs + ESP_SSTEP) & ESP_STEP_VBITS); | ||
4116 | } | ||
4117 | |||
4118 | if (esp->sreg & (ESP_STAT_SPAM)) { | ||
4119 | /* Gross error, could be due to one of: | ||
4120 | * | ||
4121 | * - top of fifo overwritten, could be because | ||
4122 | * we tried to do a synchronous transfer with | ||
4123 | * an offset greater than ESP fifo size | ||
4124 | * | ||
4125 | * - top of command register overwritten | ||
4126 | * | ||
4127 | * - DMA setup to go in one direction, SCSI | ||
4128 | * bus points in the other, whoops | ||
4129 | * | ||
4130 | * - weird phase change during asynchronous | ||
4131 | * data phase while we are initiator | ||
4132 | */ | ||
4133 | ESPLOG(("esp%d: Gross error sreg=%2x\n", esp->esp_id, esp->sreg)); | ||
4134 | |||
4135 | /* If a command is live on the bus we cannot safely | ||
4136 | * reset the bus, so we'll just let the pieces fall | ||
4137 | * where they may. Here we are hoping that the | ||
4138 | * target will be able to cleanly go away soon | ||
4139 | * so we can safely reset things. | ||
4140 | */ | ||
4141 | if (!SCptr) { | ||
4142 | ESPLOG(("esp%d: No current cmd during gross error, " | ||
4143 | "resetting bus\n", esp->esp_id)); | ||
4144 | what_next = do_reset_bus; | ||
4145 | goto state_machine; | ||
4146 | } | ||
4147 | } | ||
4148 | |||
4149 | if (sbus_readl(esp->dregs + DMA_CSR) & DMA_HNDL_ERROR) { | ||
4150 | /* A DMA gate array error. Here we must | ||
4151 | * be seeing one of two things. Either the | ||
4152 | * virtual to physical address translation | ||
4153 | * on the SBUS could not occur, else the | ||
4154 | * translation it did get pointed to a bogus | ||
4155 | * page. Ho hum... | ||
4156 | */ | ||
4157 | ESPLOG(("esp%d: DMA error %08x\n", esp->esp_id, | ||
4158 | sbus_readl(esp->dregs + DMA_CSR))); | ||
4159 | |||
4160 | /* DMA gate array itself must be reset to clear the | ||
4161 | * error condition. | ||
4162 | */ | ||
4163 | esp_reset_dma(esp); | ||
4164 | |||
4165 | what_next = do_reset_bus; | ||
4166 | goto state_machine; | ||
4167 | } | ||
4168 | |||
4169 | esp->ireg = sbus_readb(esp->eregs + ESP_INTRPT); /* Unlatch intr reg */ | ||
4170 | |||
4171 | if (esp->erev == fashme) { | ||
4172 | /* This chip is really losing. */ | ||
4173 | ESPHME(("HME[")); | ||
4174 | |||
4175 | ESPHME(("sreg2=%02x,", esp->sreg2)); | ||
4176 | /* Must latch fifo before reading the interrupt | ||
4177 | * register else garbage ends up in the FIFO | ||
4178 | * which confuses the driver utterly. | ||
4179 | */ | ||
4180 | if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || | ||
4181 | (esp->sreg2 & ESP_STAT2_F1BYTE)) { | ||
4182 | ESPHME(("fifo_workaround]")); | ||
4183 | hme_fifo_read(esp); | ||
4184 | } else { | ||
4185 | ESPHME(("no_fifo_workaround]")); | ||
4186 | } | ||
4187 | } | ||
4188 | |||
4189 | /* No current cmd is only valid at this point when there are | ||
4190 | * commands off the bus or we are trying a reset. | ||
4191 | */ | ||
4192 | if (!SCptr && !esp->disconnected_SC && !(esp->ireg & ESP_INTR_SR)) { | ||
4193 | /* Panic is safe, since current_SC is null. */ | ||
4194 | ESPLOG(("esp%d: no command in esp_handle()\n", esp->esp_id)); | ||
4195 | panic("esp_handle: current_SC == penguin within interrupt!"); | ||
4196 | } | ||
4197 | |||
4198 | if (esp->ireg & (ESP_INTR_IC)) { | ||
4199 | /* Illegal command fed to ESP. Outside of obvious | ||
4200 | * software bugs that could cause this, there is | ||
4201 | * a condition with esp100 where we can confuse the | ||
4202 | * ESP into an erroneous illegal command interrupt | ||
4203 | * because it does not scrape the FIFO properly | ||
4204 | * for reselection. See esp100_reconnect_hwbug() | ||
4205 | * to see how we try very hard to avoid this. | ||
4206 | */ | ||
4207 | ESPLOG(("esp%d: invalid command\n", esp->esp_id)); | ||
4208 | |||
4209 | esp_dump_state(esp); | ||
4210 | |||
4211 | if (SCptr != NULL) { | ||
4212 | /* Devices with very buggy firmware can drop BSY | ||
4213 | * during a scatter list interrupt when using sync | ||
4214 | * mode transfers. We continue the transfer as | ||
4215 | * expected, the target drops the bus, the ESP | ||
4216 | * gets confused, and we get a illegal command | ||
4217 | * interrupt because the bus is in the disconnected | ||
4218 | * state now and ESP_CMD_TI is only allowed when | ||
4219 | * a nexus is alive on the bus. | ||
4220 | */ | ||
4221 | ESPLOG(("esp%d: Forcing async and disabling disconnect for " | ||
4222 | "target %d\n", esp->esp_id, SCptr->device->id)); | ||
4223 | SCptr->device->borken = 1; /* foo on you */ | ||
4224 | } | ||
4225 | |||
4226 | what_next = do_reset_bus; | ||
4227 | } else if (!(esp->ireg & ~(ESP_INTR_FDONE | ESP_INTR_BSERV | ESP_INTR_DC))) { | ||
4228 | if (SCptr) { | ||
4229 | unsigned int phase = SCptr->SCp.phase; | ||
4230 | |||
4231 | if (phase & in_phases_mask) { | ||
4232 | what_next = esp_work_bus(esp); | ||
4233 | } else if (phase & in_slct_mask) { | ||
4234 | what_next = esp_select_complete(esp); | ||
4235 | } else { | ||
4236 | ESPLOG(("esp%d: interrupt for no good reason...\n", | ||
4237 | esp->esp_id)); | ||
4238 | what_next = do_intr_end; | ||
4239 | } | ||
4240 | } else { | ||
4241 | ESPLOG(("esp%d: BSERV or FDONE or DC while SCptr==NULL\n", | ||
4242 | esp->esp_id)); | ||
4243 | what_next = do_reset_bus; | ||
4244 | } | ||
4245 | } else if (esp->ireg & ESP_INTR_SR) { | ||
4246 | ESPLOG(("esp%d: SCSI bus reset interrupt\n", esp->esp_id)); | ||
4247 | what_next = do_reset_complete; | ||
4248 | } else if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN)) { | ||
4249 | ESPLOG(("esp%d: AIEEE we have been selected by another initiator!\n", | ||
4250 | esp->esp_id)); | ||
4251 | what_next = do_reset_bus; | ||
4252 | } else if (esp->ireg & ESP_INTR_RSEL) { | ||
4253 | if (SCptr == NULL) { | ||
4254 | /* This is ok. */ | ||
4255 | what_next = esp_do_reconnect(esp); | ||
4256 | } else if (SCptr->SCp.phase & in_slct_mask) { | ||
4257 | /* Only selection code knows how to clean | ||
4258 | * up properly. | ||
4259 | */ | ||
4260 | ESPDISC(("Reselected during selection attempt\n")); | ||
4261 | what_next = esp_select_complete(esp); | ||
4262 | } else { | ||
4263 | ESPLOG(("esp%d: Reselected while bus is busy\n", | ||
4264 | esp->esp_id)); | ||
4265 | what_next = do_reset_bus; | ||
4266 | } | ||
4267 | } | ||
4268 | |||
4269 | /* This is tier-one in our dual level SCSI state machine. */ | ||
4270 | state_machine: | ||
4271 | while (what_next != do_intr_end) { | ||
4272 | if (what_next >= do_phase_determine && | ||
4273 | what_next < do_intr_end) { | ||
4274 | what_next = isvc_vector[what_next](esp); | ||
4275 | } else { | ||
4276 | /* state is completely lost ;-( */ | ||
4277 | ESPLOG(("esp%d: interrupt engine loses state, resetting bus\n", | ||
4278 | esp->esp_id)); | ||
4279 | what_next = do_reset_bus; | ||
4280 | } | ||
4281 | } | ||
4282 | } | ||
4283 | |||
4284 | /* Service only the ESP described by dev_id. */ | ||
4285 | static irqreturn_t esp_intr(int irq, void *dev_id) | ||
4286 | { | ||
4287 | struct esp *esp = dev_id; | ||
4288 | unsigned long flags; | ||
4289 | |||
4290 | spin_lock_irqsave(esp->ehost->host_lock, flags); | ||
4291 | if (ESP_IRQ_P(esp->dregs)) { | ||
4292 | ESP_INTSOFF(esp->dregs); | ||
4293 | |||
4294 | ESPIRQ(("I[%d:%d](", smp_processor_id(), esp->esp_id)); | ||
4295 | esp_handle(esp); | ||
4296 | ESPIRQ((")")); | ||
4297 | |||
4298 | ESP_INTSON(esp->dregs); | ||
4299 | } | ||
4300 | spin_unlock_irqrestore(esp->ehost->host_lock, flags); | ||
4301 | |||
4302 | return IRQ_HANDLED; | ||
4303 | } | ||
4304 | |||
4305 | static int esp_slave_alloc(struct scsi_device *SDptr) | ||
4306 | { | ||
4307 | struct esp_device *esp_dev = | ||
4308 | kmalloc(sizeof(struct esp_device), GFP_ATOMIC); | ||
4309 | |||
4310 | if (!esp_dev) | ||
4311 | return -ENOMEM; | ||
4312 | memset(esp_dev, 0, sizeof(struct esp_device)); | ||
4313 | SDptr->hostdata = esp_dev; | ||
4314 | return 0; | ||
4315 | } | ||
4316 | |||
4317 | static void esp_slave_destroy(struct scsi_device *SDptr) | ||
4318 | { | ||
4319 | struct esp *esp = (struct esp *) SDptr->host->hostdata; | ||
4320 | |||
4321 | esp->targets_present &= ~(1 << SDptr->id); | ||
4322 | kfree(SDptr->hostdata); | ||
4323 | SDptr->hostdata = NULL; | ||
4324 | } | ||
4325 | |||
4326 | static struct scsi_host_template esp_template = { | ||
4327 | .module = THIS_MODULE, | ||
4328 | .name = "esp", | ||
4329 | .info = esp_info, | ||
4330 | .slave_alloc = esp_slave_alloc, | ||
4331 | .slave_destroy = esp_slave_destroy, | ||
4332 | .queuecommand = esp_queue, | ||
4333 | .eh_abort_handler = esp_abort, | ||
4334 | .eh_bus_reset_handler = esp_reset, | ||
4335 | .can_queue = 7, | ||
4336 | .this_id = 7, | ||
4337 | .sg_tablesize = SG_ALL, | ||
4338 | .cmd_per_lun = 1, | ||
4339 | .use_clustering = ENABLE_CLUSTERING, | ||
4340 | .proc_name = "esp", | ||
4341 | .proc_info = esp_proc_info, | ||
4342 | }; | ||
4343 | |||
4344 | #ifndef CONFIG_SUN4 | ||
4345 | static struct of_device_id esp_match[] = { | ||
4346 | { | ||
4347 | .name = "SUNW,esp", | ||
4348 | .data = &esp_template, | ||
4349 | }, | ||
4350 | { | ||
4351 | .name = "SUNW,fas", | ||
4352 | .data = &esp_template, | ||
4353 | }, | ||
4354 | { | ||
4355 | .name = "esp", | ||
4356 | .data = &esp_template, | ||
4357 | }, | ||
4358 | {}, | ||
4359 | }; | ||
4360 | MODULE_DEVICE_TABLE(of, esp_match); | ||
4361 | |||
4362 | static struct of_platform_driver esp_sbus_driver = { | ||
4363 | .name = "esp", | ||
4364 | .match_table = esp_match, | ||
4365 | .probe = esp_sbus_probe, | ||
4366 | .remove = __devexit_p(esp_sbus_remove), | ||
4367 | }; | ||
4368 | #endif | ||
4369 | |||
4370 | static int __init esp_init(void) | ||
4371 | { | ||
4372 | #ifdef CONFIG_SUN4 | ||
4373 | return esp_sun4_probe(&esp_template); | ||
4374 | #else | ||
4375 | return of_register_driver(&esp_sbus_driver, &sbus_bus_type); | ||
4376 | #endif | ||
4377 | } | ||
4378 | |||
4379 | static void __exit esp_exit(void) | ||
4380 | { | ||
4381 | #ifdef CONFIG_SUN4 | ||
4382 | esp_sun4_remove(); | ||
4383 | #else | ||
4384 | of_unregister_driver(&esp_sbus_driver); | ||
4385 | #endif | ||
4386 | } | ||
4387 | |||
4388 | MODULE_DESCRIPTION("ESP Sun SCSI driver"); | ||
4389 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | ||
4390 | MODULE_LICENSE("GPL"); | ||
4391 | MODULE_VERSION(DRV_VERSION); | ||
4392 | |||
4393 | module_init(esp_init); | ||
4394 | module_exit(esp_exit); | ||
diff --git a/drivers/scsi/esp.h b/drivers/scsi/esp.h deleted file mode 100644 index a98cda9121fc..000000000000 --- a/drivers/scsi/esp.h +++ /dev/null | |||
@@ -1,406 +0,0 @@ | |||
1 | /* $Id: esp.h,v 1.29 2001/12/11 04:55:47 davem Exp $ | ||
2 | * esp.h: Defines and structures for the Sparc ESP (Enhanced SCSI | ||
3 | * Processor) driver under Linux. | ||
4 | * | ||
5 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) | ||
6 | */ | ||
7 | |||
8 | #ifndef _SPARC_ESP_H | ||
9 | #define _SPARC_ESP_H | ||
10 | |||
11 | /* For dvma controller register definitions. */ | ||
12 | #include <asm/dma.h> | ||
13 | |||
14 | /* The ESP SCSI controllers have their register sets in three | ||
15 | * "classes": | ||
16 | * | ||
17 | * 1) Registers which are both read and write. | ||
18 | * 2) Registers which are read only. | ||
19 | * 3) Registers which are write only. | ||
20 | * | ||
21 | * Yet, they all live within the same IO space. | ||
22 | */ | ||
23 | |||
24 | /* All the ESP registers are one byte each and are accessed longwords | ||
25 | * apart with a big-endian ordering to the bytes. | ||
26 | */ | ||
27 | /* Access Description Offset */ | ||
28 | #define ESP_TCLOW 0x00UL /* rw Low bits of the transfer count 0x00 */ | ||
29 | #define ESP_TCMED 0x04UL /* rw Mid bits of the transfer count 0x04 */ | ||
30 | #define ESP_FDATA 0x08UL /* rw FIFO data bits 0x08 */ | ||
31 | #define ESP_CMD 0x0cUL /* rw SCSI command bits 0x0c */ | ||
32 | #define ESP_STATUS 0x10UL /* ro ESP status register 0x10 */ | ||
33 | #define ESP_BUSID ESP_STATUS /* wo Bus ID for select/reselect 0x10 */ | ||
34 | #define ESP_INTRPT 0x14UL /* ro Kind of interrupt 0x14 */ | ||
35 | #define ESP_TIMEO ESP_INTRPT /* wo Timeout value for select/resel 0x14 */ | ||
36 | #define ESP_SSTEP 0x18UL /* ro Sequence step register 0x18 */ | ||
37 | #define ESP_STP ESP_SSTEP /* wo Transfer period per sync 0x18 */ | ||
38 | #define ESP_FFLAGS 0x1cUL /* ro Bits of current FIFO info 0x1c */ | ||
39 | #define ESP_SOFF ESP_FFLAGS /* wo Sync offset 0x1c */ | ||
40 | #define ESP_CFG1 0x20UL /* rw First configuration register 0x20 */ | ||
41 | #define ESP_CFACT 0x24UL /* wo Clock conversion factor 0x24 */ | ||
42 | #define ESP_STATUS2 ESP_CFACT /* ro HME status2 register 0x24 */ | ||
43 | #define ESP_CTEST 0x28UL /* wo Chip test register 0x28 */ | ||
44 | #define ESP_CFG2 0x2cUL /* rw Second configuration register 0x2c */ | ||
45 | #define ESP_CFG3 0x30UL /* rw Third configuration register 0x30 */ | ||
46 | #define ESP_TCHI 0x38UL /* rw High bits of transfer count 0x38 */ | ||
47 | #define ESP_UID ESP_TCHI /* ro Unique ID code 0x38 */ | ||
48 | #define FAS_RLO ESP_TCHI /* rw HME extended counter 0x38 */ | ||
49 | #define ESP_FGRND 0x3cUL /* rw Data base for fifo 0x3c */ | ||
50 | #define FAS_RHI ESP_FGRND /* rw HME extended counter 0x3c */ | ||
51 | #define ESP_REG_SIZE 0x40UL | ||
52 | |||
53 | /* Various revisions of the ESP board. */ | ||
54 | enum esp_rev { | ||
55 | esp100 = 0x00, /* NCR53C90 - very broken */ | ||
56 | esp100a = 0x01, /* NCR53C90A */ | ||
57 | esp236 = 0x02, | ||
58 | fas236 = 0x03, | ||
59 | fas100a = 0x04, | ||
60 | fast = 0x05, | ||
61 | fashme = 0x06, | ||
62 | espunknown = 0x07 | ||
63 | }; | ||
64 | |||
65 | /* We allocate one of these for each scsi device and attach it to | ||
66 | * SDptr->hostdata for use in the driver | ||
67 | */ | ||
68 | struct esp_device { | ||
69 | unsigned char sync_min_period; | ||
70 | unsigned char sync_max_offset; | ||
71 | unsigned sync:1; | ||
72 | unsigned wide:1; | ||
73 | unsigned disconnect:1; | ||
74 | }; | ||
75 | |||
76 | struct scsi_cmnd; | ||
77 | |||
78 | /* We get one of these for each ESP probed. */ | ||
79 | struct esp { | ||
80 | void __iomem *eregs; /* ESP controller registers */ | ||
81 | void __iomem *dregs; /* DMA controller registers */ | ||
82 | struct sbus_dma *dma; /* DMA controller sw state */ | ||
83 | struct Scsi_Host *ehost; /* Backpointer to SCSI Host */ | ||
84 | struct sbus_dev *sdev; /* Pointer to SBus entry */ | ||
85 | |||
86 | /* ESP Configuration Registers */ | ||
87 | u8 config1; /* Copy of the 1st config register */ | ||
88 | u8 config2; /* Copy of the 2nd config register */ | ||
89 | u8 config3[16]; /* Copy of the 3rd config register */ | ||
90 | |||
91 | /* The current command we are sending to the ESP chip. This esp_command | ||
92 | * ptr needs to be mapped in DVMA area so we can send commands and read | ||
93 | * from the ESP fifo without burning precious CPU cycles. Programmed I/O | ||
94 | * sucks when we have the DVMA to do it for us. The ESP is stupid and will | ||
95 | * only send out 6, 10, and 12 byte SCSI commands, others we need to send | ||
96 | * one byte at a time. esp_slowcmd being set says that we are doing one | ||
97 | * of the command types ESP doesn't understand, esp_scmdp keeps track of | ||
98 | * which byte we are sending, esp_scmdleft says how many bytes to go. | ||
99 | */ | ||
100 | volatile u8 *esp_command; /* Location of command (CPU view) */ | ||
101 | __u32 esp_command_dvma;/* Location of command (DVMA view) */ | ||
102 | unsigned char esp_clen; /* Length of this command */ | ||
103 | unsigned char esp_slowcmd; | ||
104 | unsigned char *esp_scmdp; | ||
105 | unsigned char esp_scmdleft; | ||
106 | |||
107 | /* The following are used to determine the cause of an IRQ. Upon every | ||
108 | * IRQ entry we synchronize these with the hardware registers. | ||
109 | */ | ||
110 | u8 ireg; /* Copy of ESP interrupt register */ | ||
111 | u8 sreg; /* Copy of ESP status register */ | ||
112 | u8 seqreg; /* Copy of ESP sequence step register */ | ||
113 | u8 sreg2; /* Copy of HME status2 register */ | ||
114 | |||
115 | /* To save register writes to the ESP, which can be expensive, we | ||
116 | * keep track of the previous value that various registers had for | ||
117 | * the last target we connected to. If they are the same for the | ||
118 | * current target, we skip the register writes as they are not needed. | ||
119 | */ | ||
120 | u8 prev_soff, prev_stp; | ||
121 | u8 prev_cfg3, __cache_pad; | ||
122 | |||
123 | /* We also keep a cache of the previous FAS/HME DMA CSR register value. */ | ||
124 | u32 prev_hme_dmacsr; | ||
125 | |||
126 | /* The HME is the biggest piece of shit I have ever seen. */ | ||
127 | u8 hme_fifo_workaround_buffer[16 * 2]; | ||
128 | u8 hme_fifo_workaround_count; | ||
129 | |||
130 | /* For each target we keep track of save/restore data | ||
131 | * pointer information. This needs to be updated majorly | ||
132 | * when we add support for tagged queueing. -DaveM | ||
133 | */ | ||
134 | struct esp_pointers { | ||
135 | char *saved_ptr; | ||
136 | struct scatterlist *saved_buffer; | ||
137 | int saved_this_residual; | ||
138 | int saved_buffers_residual; | ||
139 | } data_pointers[16] /*XXX [MAX_TAGS_PER_TARGET]*/; | ||
140 | |||
141 | /* Clock periods, frequencies, synchronization, etc. */ | ||
142 | unsigned int cfreq; /* Clock frequency in HZ */ | ||
143 | unsigned int cfact; /* Clock conversion factor */ | ||
144 | unsigned int raw_cfact; /* Raw copy from probing */ | ||
145 | unsigned int ccycle; /* One ESP clock cycle */ | ||
146 | unsigned int ctick; /* One ESP clock time */ | ||
147 | unsigned int radelay; /* FAST chip req/ack delay */ | ||
148 | unsigned int neg_defp; /* Default negotiation period */ | ||
149 | unsigned int sync_defp; /* Default sync transfer period */ | ||
150 | unsigned int max_period; /* longest our period can be */ | ||
151 | unsigned int min_period; /* shortest period we can withstand */ | ||
152 | |||
153 | struct esp *next; /* Next ESP we probed or NULL */ | ||
154 | char prom_name[64]; /* Name of ESP device from prom */ | ||
155 | int prom_node; /* Prom node where ESP found */ | ||
156 | int esp_id; /* Unique per-ESP ID number */ | ||
157 | |||
158 | /* For slow to medium speed input clock rates we shoot for 5mb/s, | ||
159 | * but for high input clock rates we try to do 10mb/s although I | ||
160 | * don't think a transfer can even run that fast with an ESP even | ||
161 | * with DMA2 scatter gather pipelining. | ||
162 | */ | ||
163 | #define SYNC_DEFP_SLOW 0x32 /* 5mb/s */ | ||
164 | #define SYNC_DEFP_FAST 0x19 /* 10mb/s */ | ||
165 | |||
166 | unsigned int snip; /* Sync. negotiation in progress */ | ||
167 | unsigned int wnip; /* WIDE negotiation in progress */ | ||
168 | unsigned int targets_present;/* targets spoken to before */ | ||
169 | |||
170 | int current_transfer_size; /* Set at beginning of data dma */ | ||
171 | |||
172 | u8 espcmdlog[32]; /* Log of current esp cmds sent. */ | ||
173 | u8 espcmdent; /* Current entry in esp cmd log. */ | ||
174 | |||
175 | /* Misc. info about this ESP */ | ||
176 | enum esp_rev erev; /* ESP revision */ | ||
177 | int irq; /* SBus IRQ for this ESP */ | ||
178 | int scsi_id; /* Who am I as initiator? */ | ||
179 | int scsi_id_mask; /* Bitmask of 'me'. */ | ||
180 | int diff; /* Differential SCSI bus? */ | ||
181 | int bursts; /* Burst sizes our DVMA supports */ | ||
182 | |||
183 | /* Our command queues, only one cmd lives in the current_SC queue. */ | ||
184 | struct scsi_cmnd *issue_SC; /* Commands to be issued */ | ||
185 | struct scsi_cmnd *current_SC; /* Who is currently working the bus */ | ||
186 | struct scsi_cmnd *disconnected_SC;/* Commands disconnected from the bus */ | ||
187 | |||
188 | /* Message goo */ | ||
189 | u8 cur_msgout[16]; | ||
190 | u8 cur_msgin[16]; | ||
191 | u8 prevmsgout, prevmsgin; | ||
192 | u8 msgout_len, msgin_len; | ||
193 | u8 msgout_ctr, msgin_ctr; | ||
194 | |||
195 | /* States that we cannot keep in the per cmd structure because they | ||
196 | * cannot be assosciated with any specific command. | ||
197 | */ | ||
198 | u8 resetting_bus; | ||
199 | wait_queue_head_t reset_queue; | ||
200 | }; | ||
201 | |||
202 | /* Bitfield meanings for the above registers. */ | ||
203 | |||
204 | /* ESP config reg 1, read-write, found on all ESP chips */ | ||
205 | #define ESP_CONFIG1_ID 0x07 /* My BUS ID bits */ | ||
206 | #define ESP_CONFIG1_CHTEST 0x08 /* Enable ESP chip tests */ | ||
207 | #define ESP_CONFIG1_PENABLE 0x10 /* Enable parity checks */ | ||
208 | #define ESP_CONFIG1_PARTEST 0x20 /* Parity test mode enabled? */ | ||
209 | #define ESP_CONFIG1_SRRDISAB 0x40 /* Disable SCSI reset reports */ | ||
210 | #define ESP_CONFIG1_SLCABLE 0x80 /* Enable slow cable mode */ | ||
211 | |||
212 | /* ESP config reg 2, read-write, found only on esp100a+esp200+esp236 chips */ | ||
213 | #define ESP_CONFIG2_DMAPARITY 0x01 /* enable DMA Parity (200,236) */ | ||
214 | #define ESP_CONFIG2_REGPARITY 0x02 /* enable reg Parity (200,236) */ | ||
215 | #define ESP_CONFIG2_BADPARITY 0x04 /* Bad parity target abort */ | ||
216 | #define ESP_CONFIG2_SCSI2ENAB 0x08 /* Enable SCSI-2 features (tmode only) */ | ||
217 | #define ESP_CONFIG2_HI 0x10 /* High Impedance DREQ ??? */ | ||
218 | #define ESP_CONFIG2_HMEFENAB 0x10 /* HME features enable */ | ||
219 | #define ESP_CONFIG2_BCM 0x20 /* Enable byte-ctrl (236) */ | ||
220 | #define ESP_CONFIG2_DISPINT 0x20 /* Disable pause irq (hme) */ | ||
221 | #define ESP_CONFIG2_FENAB 0x40 /* Enable features (fas100,esp216) */ | ||
222 | #define ESP_CONFIG2_SPL 0x40 /* Enable status-phase latch (esp236) */ | ||
223 | #define ESP_CONFIG2_MKDONE 0x40 /* HME magic feature */ | ||
224 | #define ESP_CONFIG2_HME32 0x80 /* HME 32 extended */ | ||
225 | #define ESP_CONFIG2_MAGIC 0xe0 /* Invalid bits... */ | ||
226 | |||
227 | /* ESP config register 3 read-write, found only esp236+fas236+fas100a+hme chips */ | ||
228 | #define ESP_CONFIG3_FCLOCK 0x01 /* FAST SCSI clock rate (esp100a/hme) */ | ||
229 | #define ESP_CONFIG3_TEM 0x01 /* Enable thresh-8 mode (esp/fas236) */ | ||
230 | #define ESP_CONFIG3_FAST 0x02 /* Enable FAST SCSI (esp100a/hme) */ | ||
231 | #define ESP_CONFIG3_ADMA 0x02 /* Enable alternate-dma (esp/fas236) */ | ||
232 | #define ESP_CONFIG3_TENB 0x04 /* group2 SCSI2 support (esp100a/hme) */ | ||
233 | #define ESP_CONFIG3_SRB 0x04 /* Save residual byte (esp/fas236) */ | ||
234 | #define ESP_CONFIG3_TMS 0x08 /* Three-byte msg's ok (esp100a/hme) */ | ||
235 | #define ESP_CONFIG3_FCLK 0x08 /* Fast SCSI clock rate (esp/fas236) */ | ||
236 | #define ESP_CONFIG3_IDMSG 0x10 /* ID message checking (esp100a/hme) */ | ||
237 | #define ESP_CONFIG3_FSCSI 0x10 /* Enable FAST SCSI (esp/fas236) */ | ||
238 | #define ESP_CONFIG3_GTM 0x20 /* group2 SCSI2 support (esp/fas236) */ | ||
239 | #define ESP_CONFIG3_IDBIT3 0x20 /* Bit 3 of HME SCSI-ID (hme) */ | ||
240 | #define ESP_CONFIG3_TBMS 0x40 /* Three-byte msg's ok (esp/fas236) */ | ||
241 | #define ESP_CONFIG3_EWIDE 0x40 /* Enable Wide-SCSI (hme) */ | ||
242 | #define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */ | ||
243 | #define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */ | ||
244 | |||
245 | /* ESP command register read-write */ | ||
246 | /* Group 1 commands: These may be sent at any point in time to the ESP | ||
247 | * chip. None of them can generate interrupts 'cept | ||
248 | * the "SCSI bus reset" command if you have not disabled | ||
249 | * SCSI reset interrupts in the config1 ESP register. | ||
250 | */ | ||
251 | #define ESP_CMD_NULL 0x00 /* Null command, ie. a nop */ | ||
252 | #define ESP_CMD_FLUSH 0x01 /* FIFO Flush */ | ||
253 | #define ESP_CMD_RC 0x02 /* Chip reset */ | ||
254 | #define ESP_CMD_RS 0x03 /* SCSI bus reset */ | ||
255 | |||
256 | /* Group 2 commands: ESP must be an initiator and connected to a target | ||
257 | * for these commands to work. | ||
258 | */ | ||
259 | #define ESP_CMD_TI 0x10 /* Transfer Information */ | ||
260 | #define ESP_CMD_ICCSEQ 0x11 /* Initiator cmd complete sequence */ | ||
261 | #define ESP_CMD_MOK 0x12 /* Message okie-dokie */ | ||
262 | #define ESP_CMD_TPAD 0x18 /* Transfer Pad */ | ||
263 | #define ESP_CMD_SATN 0x1a /* Set ATN */ | ||
264 | #define ESP_CMD_RATN 0x1b /* De-assert ATN */ | ||
265 | |||
266 | /* Group 3 commands: ESP must be in the MSGOUT or MSGIN state and be connected | ||
267 | * to a target as the initiator for these commands to work. | ||
268 | */ | ||
269 | #define ESP_CMD_SMSG 0x20 /* Send message */ | ||
270 | #define ESP_CMD_SSTAT 0x21 /* Send status */ | ||
271 | #define ESP_CMD_SDATA 0x22 /* Send data */ | ||
272 | #define ESP_CMD_DSEQ 0x23 /* Discontinue Sequence */ | ||
273 | #define ESP_CMD_TSEQ 0x24 /* Terminate Sequence */ | ||
274 | #define ESP_CMD_TCCSEQ 0x25 /* Target cmd cmplt sequence */ | ||
275 | #define ESP_CMD_DCNCT 0x27 /* Disconnect */ | ||
276 | #define ESP_CMD_RMSG 0x28 /* Receive Message */ | ||
277 | #define ESP_CMD_RCMD 0x29 /* Receive Command */ | ||
278 | #define ESP_CMD_RDATA 0x2a /* Receive Data */ | ||
279 | #define ESP_CMD_RCSEQ 0x2b /* Receive cmd sequence */ | ||
280 | |||
281 | /* Group 4 commands: The ESP must be in the disconnected state and must | ||
282 | * not be connected to any targets as initiator for | ||
283 | * these commands to work. | ||
284 | */ | ||
285 | #define ESP_CMD_RSEL 0x40 /* Reselect */ | ||
286 | #define ESP_CMD_SEL 0x41 /* Select w/o ATN */ | ||
287 | #define ESP_CMD_SELA 0x42 /* Select w/ATN */ | ||
288 | #define ESP_CMD_SELAS 0x43 /* Select w/ATN & STOP */ | ||
289 | #define ESP_CMD_ESEL 0x44 /* Enable selection */ | ||
290 | #define ESP_CMD_DSEL 0x45 /* Disable selections */ | ||
291 | #define ESP_CMD_SA3 0x46 /* Select w/ATN3 */ | ||
292 | #define ESP_CMD_RSEL3 0x47 /* Reselect3 */ | ||
293 | |||
294 | /* This bit enables the ESP's DMA on the SBus */ | ||
295 | #define ESP_CMD_DMA 0x80 /* Do DMA? */ | ||
296 | |||
297 | |||
298 | /* ESP status register read-only */ | ||
299 | #define ESP_STAT_PIO 0x01 /* IO phase bit */ | ||
300 | #define ESP_STAT_PCD 0x02 /* CD phase bit */ | ||
301 | #define ESP_STAT_PMSG 0x04 /* MSG phase bit */ | ||
302 | #define ESP_STAT_PMASK 0x07 /* Mask of phase bits */ | ||
303 | #define ESP_STAT_TDONE 0x08 /* Transfer Completed */ | ||
304 | #define ESP_STAT_TCNT 0x10 /* Transfer Counter Is Zero */ | ||
305 | #define ESP_STAT_PERR 0x20 /* Parity error */ | ||
306 | #define ESP_STAT_SPAM 0x40 /* Real bad error */ | ||
307 | /* This indicates the 'interrupt pending' condition on esp236, it is a reserved | ||
308 | * bit on other revs of the ESP. | ||
309 | */ | ||
310 | #define ESP_STAT_INTR 0x80 /* Interrupt */ | ||
311 | |||
312 | /* HME only: status 2 register */ | ||
313 | #define ESP_STAT2_SCHBIT 0x01 /* Upper bits 3-7 of sstep enabled */ | ||
314 | #define ESP_STAT2_FFLAGS 0x02 /* The fifo flags are now latched */ | ||
315 | #define ESP_STAT2_XCNT 0x04 /* The transfer counter is latched */ | ||
316 | #define ESP_STAT2_CREGA 0x08 /* The command reg is active now */ | ||
317 | #define ESP_STAT2_WIDE 0x10 /* Interface on this adapter is wide */ | ||
318 | #define ESP_STAT2_F1BYTE 0x20 /* There is one byte at top of fifo */ | ||
319 | #define ESP_STAT2_FMSB 0x40 /* Next byte in fifo is most significant */ | ||
320 | #define ESP_STAT2_FEMPTY 0x80 /* FIFO is empty */ | ||
321 | |||
322 | /* The status register can be masked with ESP_STAT_PMASK and compared | ||
323 | * with the following values to determine the current phase the ESP | ||
324 | * (at least thinks it) is in. For our purposes we also add our own | ||
325 | * software 'done' bit for our phase management engine. | ||
326 | */ | ||
327 | #define ESP_DOP (0) /* Data Out */ | ||
328 | #define ESP_DIP (ESP_STAT_PIO) /* Data In */ | ||
329 | #define ESP_CMDP (ESP_STAT_PCD) /* Command */ | ||
330 | #define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO) /* Status */ | ||
331 | #define ESP_MOP (ESP_STAT_PMSG|ESP_STAT_PCD) /* Message Out */ | ||
332 | #define ESP_MIP (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */ | ||
333 | |||
334 | /* ESP interrupt register read-only */ | ||
335 | #define ESP_INTR_S 0x01 /* Select w/o ATN */ | ||
336 | #define ESP_INTR_SATN 0x02 /* Select w/ATN */ | ||
337 | #define ESP_INTR_RSEL 0x04 /* Reselected */ | ||
338 | #define ESP_INTR_FDONE 0x08 /* Function done */ | ||
339 | #define ESP_INTR_BSERV 0x10 /* Bus service */ | ||
340 | #define ESP_INTR_DC 0x20 /* Disconnect */ | ||
341 | #define ESP_INTR_IC 0x40 /* Illegal command given */ | ||
342 | #define ESP_INTR_SR 0x80 /* SCSI bus reset detected */ | ||
343 | |||
344 | /* Interrupt status macros */ | ||
345 | #define ESP_SRESET_IRQ(esp) ((esp)->intreg & (ESP_INTR_SR)) | ||
346 | #define ESP_ILLCMD_IRQ(esp) ((esp)->intreg & (ESP_INTR_IC)) | ||
347 | #define ESP_SELECT_WITH_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_SATN)) | ||
348 | #define ESP_SELECT_WITHOUT_ATN_IRQ(esp) ((esp)->intreg & (ESP_INTR_S)) | ||
349 | #define ESP_SELECTION_IRQ(esp) ((ESP_SELECT_WITH_ATN_IRQ(esp)) || \ | ||
350 | (ESP_SELECT_WITHOUT_ATN_IRQ(esp))) | ||
351 | #define ESP_RESELECTION_IRQ(esp) ((esp)->intreg & (ESP_INTR_RSEL)) | ||
352 | |||
353 | /* ESP sequence step register read-only */ | ||
354 | #define ESP_STEP_VBITS 0x07 /* Valid bits */ | ||
355 | #define ESP_STEP_ASEL 0x00 /* Selection&Arbitrate cmplt */ | ||
356 | #define ESP_STEP_SID 0x01 /* One msg byte sent */ | ||
357 | #define ESP_STEP_NCMD 0x02 /* Was not in command phase */ | ||
358 | #define ESP_STEP_PPC 0x03 /* Early phase chg caused cmnd | ||
359 | * bytes to be lost | ||
360 | */ | ||
361 | #define ESP_STEP_FINI4 0x04 /* Command was sent ok */ | ||
362 | |||
363 | /* Ho hum, some ESP's set the step register to this as well... */ | ||
364 | #define ESP_STEP_FINI5 0x05 | ||
365 | #define ESP_STEP_FINI6 0x06 | ||
366 | #define ESP_STEP_FINI7 0x07 | ||
367 | |||
368 | /* ESP chip-test register read-write */ | ||
369 | #define ESP_TEST_TARG 0x01 /* Target test mode */ | ||
370 | #define ESP_TEST_INI 0x02 /* Initiator test mode */ | ||
371 | #define ESP_TEST_TS 0x04 /* Tristate test mode */ | ||
372 | |||
373 | /* ESP unique ID register read-only, found on fas236+fas100a only */ | ||
374 | #define ESP_UID_F100A 0x00 /* ESP FAS100A */ | ||
375 | #define ESP_UID_F236 0x02 /* ESP FAS236 */ | ||
376 | #define ESP_UID_REV 0x07 /* ESP revision */ | ||
377 | #define ESP_UID_FAM 0xf8 /* ESP family */ | ||
378 | |||
379 | /* ESP fifo flags register read-only */ | ||
380 | /* Note that the following implies a 16 byte FIFO on the ESP. */ | ||
381 | #define ESP_FF_FBYTES 0x1f /* Num bytes in FIFO */ | ||
382 | #define ESP_FF_ONOTZERO 0x20 /* offset ctr not zero (esp100) */ | ||
383 | #define ESP_FF_SSTEP 0xe0 /* Sequence step */ | ||
384 | |||
385 | /* ESP clock conversion factor register write-only */ | ||
386 | #define ESP_CCF_F0 0x00 /* 35.01MHz - 40MHz */ | ||
387 | #define ESP_CCF_NEVER 0x01 /* Set it to this and die */ | ||
388 | #define ESP_CCF_F2 0x02 /* 10MHz */ | ||
389 | #define ESP_CCF_F3 0x03 /* 10.01MHz - 15MHz */ | ||
390 | #define ESP_CCF_F4 0x04 /* 15.01MHz - 20MHz */ | ||
391 | #define ESP_CCF_F5 0x05 /* 20.01MHz - 25MHz */ | ||
392 | #define ESP_CCF_F6 0x06 /* 25.01MHz - 30MHz */ | ||
393 | #define ESP_CCF_F7 0x07 /* 30.01MHz - 35MHz */ | ||
394 | |||
395 | /* HME only... */ | ||
396 | #define ESP_BUSID_RESELID 0x10 | ||
397 | #define ESP_BUSID_CTR32BIT 0x40 | ||
398 | |||
399 | #define ESP_BUS_TIMEOUT 275 /* In milli-seconds */ | ||
400 | #define ESP_TIMEO_CONST 8192 | ||
401 | #define ESP_NEG_DEFP(mhz, cfact) \ | ||
402 | ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact))) | ||
403 | #define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000)) | ||
404 | #define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000)) | ||
405 | |||
406 | #endif /* !(_SPARC_ESP_H) */ | ||
diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c new file mode 100644 index 000000000000..99ce03331b64 --- /dev/null +++ b/drivers/scsi/esp_scsi.c | |||
@@ -0,0 +1,2711 @@ | |||
1 | /* esp_scsi.c: ESP SCSI driver. | ||
2 | * | ||
3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/slab.h> | ||
9 | #include <linux/delay.h> | ||
10 | #include <linux/list.h> | ||
11 | #include <linux/completion.h> | ||
12 | #include <linux/kallsyms.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/moduleparam.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/irqreturn.h> | ||
17 | |||
18 | #include <asm/irq.h> | ||
19 | #include <asm/io.h> | ||
20 | #include <asm/dma.h> | ||
21 | |||
22 | #include <scsi/scsi.h> | ||
23 | #include <scsi/scsi_host.h> | ||
24 | #include <scsi/scsi_cmnd.h> | ||
25 | #include <scsi/scsi_device.h> | ||
26 | #include <scsi/scsi_tcq.h> | ||
27 | #include <scsi/scsi_dbg.h> | ||
28 | #include <scsi/scsi_transport_spi.h> | ||
29 | |||
30 | #include "esp_scsi.h" | ||
31 | |||
32 | #define DRV_MODULE_NAME "esp" | ||
33 | #define PFX DRV_MODULE_NAME ": " | ||
34 | #define DRV_VERSION "2.000" | ||
35 | #define DRV_MODULE_RELDATE "April 19, 2007" | ||
36 | |||
37 | /* SCSI bus reset settle time in seconds. */ | ||
38 | static int esp_bus_reset_settle = 3; | ||
39 | |||
40 | static u32 esp_debug; | ||
41 | #define ESP_DEBUG_INTR 0x00000001 | ||
42 | #define ESP_DEBUG_SCSICMD 0x00000002 | ||
43 | #define ESP_DEBUG_RESET 0x00000004 | ||
44 | #define ESP_DEBUG_MSGIN 0x00000008 | ||
45 | #define ESP_DEBUG_MSGOUT 0x00000010 | ||
46 | #define ESP_DEBUG_CMDDONE 0x00000020 | ||
47 | #define ESP_DEBUG_DISCONNECT 0x00000040 | ||
48 | #define ESP_DEBUG_DATASTART 0x00000080 | ||
49 | #define ESP_DEBUG_DATADONE 0x00000100 | ||
50 | #define ESP_DEBUG_RECONNECT 0x00000200 | ||
51 | #define ESP_DEBUG_AUTOSENSE 0x00000400 | ||
52 | |||
53 | #define esp_log_intr(f, a...) \ | ||
54 | do { if (esp_debug & ESP_DEBUG_INTR) \ | ||
55 | printk(f, ## a); \ | ||
56 | } while (0) | ||
57 | |||
58 | #define esp_log_reset(f, a...) \ | ||
59 | do { if (esp_debug & ESP_DEBUG_RESET) \ | ||
60 | printk(f, ## a); \ | ||
61 | } while (0) | ||
62 | |||
63 | #define esp_log_msgin(f, a...) \ | ||
64 | do { if (esp_debug & ESP_DEBUG_MSGIN) \ | ||
65 | printk(f, ## a); \ | ||
66 | } while (0) | ||
67 | |||
68 | #define esp_log_msgout(f, a...) \ | ||
69 | do { if (esp_debug & ESP_DEBUG_MSGOUT) \ | ||
70 | printk(f, ## a); \ | ||
71 | } while (0) | ||
72 | |||
73 | #define esp_log_cmddone(f, a...) \ | ||
74 | do { if (esp_debug & ESP_DEBUG_CMDDONE) \ | ||
75 | printk(f, ## a); \ | ||
76 | } while (0) | ||
77 | |||
78 | #define esp_log_disconnect(f, a...) \ | ||
79 | do { if (esp_debug & ESP_DEBUG_DISCONNECT) \ | ||
80 | printk(f, ## a); \ | ||
81 | } while (0) | ||
82 | |||
83 | #define esp_log_datastart(f, a...) \ | ||
84 | do { if (esp_debug & ESP_DEBUG_DATASTART) \ | ||
85 | printk(f, ## a); \ | ||
86 | } while (0) | ||
87 | |||
88 | #define esp_log_datadone(f, a...) \ | ||
89 | do { if (esp_debug & ESP_DEBUG_DATADONE) \ | ||
90 | printk(f, ## a); \ | ||
91 | } while (0) | ||
92 | |||
93 | #define esp_log_reconnect(f, a...) \ | ||
94 | do { if (esp_debug & ESP_DEBUG_RECONNECT) \ | ||
95 | printk(f, ## a); \ | ||
96 | } while (0) | ||
97 | |||
98 | #define esp_log_autosense(f, a...) \ | ||
99 | do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \ | ||
100 | printk(f, ## a); \ | ||
101 | } while (0) | ||
102 | |||
103 | #define esp_read8(REG) esp->ops->esp_read8(esp, REG) | ||
104 | #define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG) | ||
105 | |||
106 | static void esp_log_fill_regs(struct esp *esp, | ||
107 | struct esp_event_ent *p) | ||
108 | { | ||
109 | p->sreg = esp->sreg; | ||
110 | p->seqreg = esp->seqreg; | ||
111 | p->sreg2 = esp->sreg2; | ||
112 | p->ireg = esp->ireg; | ||
113 | p->select_state = esp->select_state; | ||
114 | p->event = esp->event; | ||
115 | } | ||
116 | |||
117 | void scsi_esp_cmd(struct esp *esp, u8 val) | ||
118 | { | ||
119 | struct esp_event_ent *p; | ||
120 | int idx = esp->esp_event_cur; | ||
121 | |||
122 | p = &esp->esp_event_log[idx]; | ||
123 | p->type = ESP_EVENT_TYPE_CMD; | ||
124 | p->val = val; | ||
125 | esp_log_fill_regs(esp, p); | ||
126 | |||
127 | esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); | ||
128 | |||
129 | esp_write8(val, ESP_CMD); | ||
130 | } | ||
131 | EXPORT_SYMBOL(scsi_esp_cmd); | ||
132 | |||
133 | static void esp_event(struct esp *esp, u8 val) | ||
134 | { | ||
135 | struct esp_event_ent *p; | ||
136 | int idx = esp->esp_event_cur; | ||
137 | |||
138 | p = &esp->esp_event_log[idx]; | ||
139 | p->type = ESP_EVENT_TYPE_EVENT; | ||
140 | p->val = val; | ||
141 | esp_log_fill_regs(esp, p); | ||
142 | |||
143 | esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); | ||
144 | |||
145 | esp->event = val; | ||
146 | } | ||
147 | |||
148 | static void esp_dump_cmd_log(struct esp *esp) | ||
149 | { | ||
150 | int idx = esp->esp_event_cur; | ||
151 | int stop = idx; | ||
152 | |||
153 | printk(KERN_INFO PFX "esp%d: Dumping command log\n", | ||
154 | esp->host->unique_id); | ||
155 | do { | ||
156 | struct esp_event_ent *p = &esp->esp_event_log[idx]; | ||
157 | |||
158 | printk(KERN_INFO PFX "esp%d: ent[%d] %s ", | ||
159 | esp->host->unique_id, idx, | ||
160 | p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT"); | ||
161 | |||
162 | printk("val[%02x] sreg[%02x] seqreg[%02x] " | ||
163 | "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n", | ||
164 | p->val, p->sreg, p->seqreg, | ||
165 | p->sreg2, p->ireg, p->select_state, p->event); | ||
166 | |||
167 | idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1); | ||
168 | } while (idx != stop); | ||
169 | } | ||
170 | |||
171 | static void esp_flush_fifo(struct esp *esp) | ||
172 | { | ||
173 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
174 | if (esp->rev == ESP236) { | ||
175 | int lim = 1000; | ||
176 | |||
177 | while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) { | ||
178 | if (--lim == 0) { | ||
179 | printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES " | ||
180 | "will not clear!\n", | ||
181 | esp->host->unique_id); | ||
182 | break; | ||
183 | } | ||
184 | udelay(1); | ||
185 | } | ||
186 | } | ||
187 | } | ||
188 | |||
189 | static void hme_read_fifo(struct esp *esp) | ||
190 | { | ||
191 | int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; | ||
192 | int idx = 0; | ||
193 | |||
194 | while (fcnt--) { | ||
195 | esp->fifo[idx++] = esp_read8(ESP_FDATA); | ||
196 | esp->fifo[idx++] = esp_read8(ESP_FDATA); | ||
197 | } | ||
198 | if (esp->sreg2 & ESP_STAT2_F1BYTE) { | ||
199 | esp_write8(0, ESP_FDATA); | ||
200 | esp->fifo[idx++] = esp_read8(ESP_FDATA); | ||
201 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
202 | } | ||
203 | esp->fifo_cnt = idx; | ||
204 | } | ||
205 | |||
206 | static void esp_set_all_config3(struct esp *esp, u8 val) | ||
207 | { | ||
208 | int i; | ||
209 | |||
210 | for (i = 0; i < ESP_MAX_TARGET; i++) | ||
211 | esp->target[i].esp_config3 = val; | ||
212 | } | ||
213 | |||
214 | /* Reset the ESP chip, _not_ the SCSI bus. */ | ||
215 | static void esp_reset_esp(struct esp *esp) | ||
216 | { | ||
217 | u8 family_code, version; | ||
218 | |||
219 | /* Now reset the ESP chip */ | ||
220 | scsi_esp_cmd(esp, ESP_CMD_RC); | ||
221 | scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); | ||
222 | scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA); | ||
223 | |||
224 | /* Reload the configuration registers */ | ||
225 | esp_write8(esp->cfact, ESP_CFACT); | ||
226 | |||
227 | esp->prev_stp = 0; | ||
228 | esp_write8(esp->prev_stp, ESP_STP); | ||
229 | |||
230 | esp->prev_soff = 0; | ||
231 | esp_write8(esp->prev_soff, ESP_SOFF); | ||
232 | |||
233 | esp_write8(esp->neg_defp, ESP_TIMEO); | ||
234 | |||
235 | /* This is the only point at which it is reliable to read | ||
236 | * the ID-code for a fast ESP chip variants. | ||
237 | */ | ||
238 | esp->max_period = ((35 * esp->ccycle) / 1000); | ||
239 | if (esp->rev == FAST) { | ||
240 | version = esp_read8(ESP_UID); | ||
241 | family_code = (version & 0xf8) >> 3; | ||
242 | if (family_code == 0x02) | ||
243 | esp->rev = FAS236; | ||
244 | else if (family_code == 0x0a) | ||
245 | esp->rev = FASHME; /* Version is usually '5'. */ | ||
246 | else | ||
247 | esp->rev = FAS100A; | ||
248 | esp->min_period = ((4 * esp->ccycle) / 1000); | ||
249 | } else { | ||
250 | esp->min_period = ((5 * esp->ccycle) / 1000); | ||
251 | } | ||
252 | esp->max_period = (esp->max_period + 3)>>2; | ||
253 | esp->min_period = (esp->min_period + 3)>>2; | ||
254 | |||
255 | esp_write8(esp->config1, ESP_CFG1); | ||
256 | switch (esp->rev) { | ||
257 | case ESP100: | ||
258 | /* nothing to do */ | ||
259 | break; | ||
260 | |||
261 | case ESP100A: | ||
262 | esp_write8(esp->config2, ESP_CFG2); | ||
263 | break; | ||
264 | |||
265 | case ESP236: | ||
266 | /* Slow 236 */ | ||
267 | esp_write8(esp->config2, ESP_CFG2); | ||
268 | esp->prev_cfg3 = esp->target[0].esp_config3; | ||
269 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
270 | break; | ||
271 | |||
272 | case FASHME: | ||
273 | esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB); | ||
274 | /* fallthrough... */ | ||
275 | |||
276 | case FAS236: | ||
277 | /* Fast 236 or HME */ | ||
278 | esp_write8(esp->config2, ESP_CFG2); | ||
279 | if (esp->rev == FASHME) { | ||
280 | u8 cfg3 = esp->target[0].esp_config3; | ||
281 | |||
282 | cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH; | ||
283 | if (esp->scsi_id >= 8) | ||
284 | cfg3 |= ESP_CONFIG3_IDBIT3; | ||
285 | esp_set_all_config3(esp, cfg3); | ||
286 | } else { | ||
287 | u32 cfg3 = esp->target[0].esp_config3; | ||
288 | |||
289 | cfg3 |= ESP_CONFIG3_FCLK; | ||
290 | esp_set_all_config3(esp, cfg3); | ||
291 | } | ||
292 | esp->prev_cfg3 = esp->target[0].esp_config3; | ||
293 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
294 | if (esp->rev == FASHME) { | ||
295 | esp->radelay = 80; | ||
296 | } else { | ||
297 | if (esp->flags & ESP_FLAG_DIFFERENTIAL) | ||
298 | esp->radelay = 0; | ||
299 | else | ||
300 | esp->radelay = 96; | ||
301 | } | ||
302 | break; | ||
303 | |||
304 | case FAS100A: | ||
305 | /* Fast 100a */ | ||
306 | esp_write8(esp->config2, ESP_CFG2); | ||
307 | esp_set_all_config3(esp, | ||
308 | (esp->target[0].esp_config3 | | ||
309 | ESP_CONFIG3_FCLOCK)); | ||
310 | esp->prev_cfg3 = esp->target[0].esp_config3; | ||
311 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
312 | esp->radelay = 32; | ||
313 | break; | ||
314 | |||
315 | default: | ||
316 | break; | ||
317 | } | ||
318 | |||
319 | /* Eat any bitrot in the chip */ | ||
320 | esp_read8(ESP_INTRPT); | ||
321 | udelay(100); | ||
322 | } | ||
323 | |||
324 | static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd) | ||
325 | { | ||
326 | struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); | ||
327 | struct scatterlist *sg = cmd->request_buffer; | ||
328 | int dir = cmd->sc_data_direction; | ||
329 | int total, i; | ||
330 | |||
331 | if (dir == DMA_NONE) | ||
332 | return; | ||
333 | |||
334 | BUG_ON(cmd->use_sg == 0); | ||
335 | |||
336 | spriv->u.num_sg = esp->ops->map_sg(esp, sg, | ||
337 | cmd->use_sg, dir); | ||
338 | spriv->cur_residue = sg_dma_len(sg); | ||
339 | spriv->cur_sg = sg; | ||
340 | |||
341 | total = 0; | ||
342 | for (i = 0; i < spriv->u.num_sg; i++) | ||
343 | total += sg_dma_len(&sg[i]); | ||
344 | spriv->tot_residue = total; | ||
345 | } | ||
346 | |||
347 | static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent, | ||
348 | struct scsi_cmnd *cmd) | ||
349 | { | ||
350 | struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); | ||
351 | |||
352 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
353 | return ent->sense_dma + | ||
354 | (ent->sense_ptr - cmd->sense_buffer); | ||
355 | } | ||
356 | |||
357 | return sg_dma_address(p->cur_sg) + | ||
358 | (sg_dma_len(p->cur_sg) - | ||
359 | p->cur_residue); | ||
360 | } | ||
361 | |||
362 | static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent, | ||
363 | struct scsi_cmnd *cmd) | ||
364 | { | ||
365 | struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); | ||
366 | |||
367 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
368 | return SCSI_SENSE_BUFFERSIZE - | ||
369 | (ent->sense_ptr - cmd->sense_buffer); | ||
370 | } | ||
371 | return p->cur_residue; | ||
372 | } | ||
373 | |||
374 | static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent, | ||
375 | struct scsi_cmnd *cmd, unsigned int len) | ||
376 | { | ||
377 | struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd); | ||
378 | |||
379 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
380 | ent->sense_ptr += len; | ||
381 | return; | ||
382 | } | ||
383 | |||
384 | p->cur_residue -= len; | ||
385 | p->tot_residue -= len; | ||
386 | if (p->cur_residue < 0 || p->tot_residue < 0) { | ||
387 | printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n", | ||
388 | esp->host->unique_id); | ||
389 | printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] " | ||
390 | "len[%u]\n", | ||
391 | esp->host->unique_id, | ||
392 | p->cur_residue, p->tot_residue, len); | ||
393 | p->cur_residue = 0; | ||
394 | p->tot_residue = 0; | ||
395 | } | ||
396 | if (!p->cur_residue && p->tot_residue) { | ||
397 | p->cur_sg++; | ||
398 | p->cur_residue = sg_dma_len(p->cur_sg); | ||
399 | } | ||
400 | } | ||
401 | |||
402 | static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd) | ||
403 | { | ||
404 | struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); | ||
405 | int dir = cmd->sc_data_direction; | ||
406 | |||
407 | if (dir == DMA_NONE) | ||
408 | return; | ||
409 | |||
410 | esp->ops->unmap_sg(esp, cmd->request_buffer, | ||
411 | spriv->u.num_sg, dir); | ||
412 | } | ||
413 | |||
414 | static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent) | ||
415 | { | ||
416 | struct scsi_cmnd *cmd = ent->cmd; | ||
417 | struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); | ||
418 | |||
419 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
420 | ent->saved_sense_ptr = ent->sense_ptr; | ||
421 | return; | ||
422 | } | ||
423 | ent->saved_cur_residue = spriv->cur_residue; | ||
424 | ent->saved_cur_sg = spriv->cur_sg; | ||
425 | ent->saved_tot_residue = spriv->tot_residue; | ||
426 | } | ||
427 | |||
428 | static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent) | ||
429 | { | ||
430 | struct scsi_cmnd *cmd = ent->cmd; | ||
431 | struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd); | ||
432 | |||
433 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
434 | ent->sense_ptr = ent->saved_sense_ptr; | ||
435 | return; | ||
436 | } | ||
437 | spriv->cur_residue = ent->saved_cur_residue; | ||
438 | spriv->cur_sg = ent->saved_cur_sg; | ||
439 | spriv->tot_residue = ent->saved_tot_residue; | ||
440 | } | ||
441 | |||
442 | static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd) | ||
443 | { | ||
444 | if (cmd->cmd_len == 6 || | ||
445 | cmd->cmd_len == 10 || | ||
446 | cmd->cmd_len == 12) { | ||
447 | esp->flags &= ~ESP_FLAG_DOING_SLOWCMD; | ||
448 | } else { | ||
449 | esp->flags |= ESP_FLAG_DOING_SLOWCMD; | ||
450 | } | ||
451 | } | ||
452 | |||
453 | static void esp_write_tgt_config3(struct esp *esp, int tgt) | ||
454 | { | ||
455 | if (esp->rev > ESP100A) { | ||
456 | u8 val = esp->target[tgt].esp_config3; | ||
457 | |||
458 | if (val != esp->prev_cfg3) { | ||
459 | esp->prev_cfg3 = val; | ||
460 | esp_write8(val, ESP_CFG3); | ||
461 | } | ||
462 | } | ||
463 | } | ||
464 | |||
465 | static void esp_write_tgt_sync(struct esp *esp, int tgt) | ||
466 | { | ||
467 | u8 off = esp->target[tgt].esp_offset; | ||
468 | u8 per = esp->target[tgt].esp_period; | ||
469 | |||
470 | if (off != esp->prev_soff) { | ||
471 | esp->prev_soff = off; | ||
472 | esp_write8(off, ESP_SOFF); | ||
473 | } | ||
474 | if (per != esp->prev_stp) { | ||
475 | esp->prev_stp = per; | ||
476 | esp_write8(per, ESP_STP); | ||
477 | } | ||
478 | } | ||
479 | |||
480 | static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len) | ||
481 | { | ||
482 | if (esp->rev == FASHME) { | ||
483 | /* Arbitrary segment boundaries, 24-bit counts. */ | ||
484 | if (dma_len > (1U << 24)) | ||
485 | dma_len = (1U << 24); | ||
486 | } else { | ||
487 | u32 base, end; | ||
488 | |||
489 | /* ESP chip limits other variants by 16-bits of transfer | ||
490 | * count. Actually on FAS100A and FAS236 we could get | ||
491 | * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB | ||
492 | * in the ESP_CFG2 register but that causes other unwanted | ||
493 | * changes so we don't use it currently. | ||
494 | */ | ||
495 | if (dma_len > (1U << 16)) | ||
496 | dma_len = (1U << 16); | ||
497 | |||
498 | /* All of the DMA variants hooked up to these chips | ||
499 | * cannot handle crossing a 24-bit address boundary. | ||
500 | */ | ||
501 | base = dma_addr & ((1U << 24) - 1U); | ||
502 | end = base + dma_len; | ||
503 | if (end > (1U << 24)) | ||
504 | end = (1U <<24); | ||
505 | dma_len = end - base; | ||
506 | } | ||
507 | return dma_len; | ||
508 | } | ||
509 | |||
510 | static int esp_need_to_nego_wide(struct esp_target_data *tp) | ||
511 | { | ||
512 | struct scsi_target *target = tp->starget; | ||
513 | |||
514 | return spi_width(target) != tp->nego_goal_width; | ||
515 | } | ||
516 | |||
517 | static int esp_need_to_nego_sync(struct esp_target_data *tp) | ||
518 | { | ||
519 | struct scsi_target *target = tp->starget; | ||
520 | |||
521 | /* When offset is zero, period is "don't care". */ | ||
522 | if (!spi_offset(target) && !tp->nego_goal_offset) | ||
523 | return 0; | ||
524 | |||
525 | if (spi_offset(target) == tp->nego_goal_offset && | ||
526 | spi_period(target) == tp->nego_goal_period) | ||
527 | return 0; | ||
528 | |||
529 | return 1; | ||
530 | } | ||
531 | |||
532 | static int esp_alloc_lun_tag(struct esp_cmd_entry *ent, | ||
533 | struct esp_lun_data *lp) | ||
534 | { | ||
535 | if (!ent->tag[0]) { | ||
536 | /* Non-tagged, slot already taken? */ | ||
537 | if (lp->non_tagged_cmd) | ||
538 | return -EBUSY; | ||
539 | |||
540 | if (lp->hold) { | ||
541 | /* We are being held by active tagged | ||
542 | * commands. | ||
543 | */ | ||
544 | if (lp->num_tagged) | ||
545 | return -EBUSY; | ||
546 | |||
547 | /* Tagged commands completed, we can unplug | ||
548 | * the queue and run this untagged command. | ||
549 | */ | ||
550 | lp->hold = 0; | ||
551 | } else if (lp->num_tagged) { | ||
552 | /* Plug the queue until num_tagged decreases | ||
553 | * to zero in esp_free_lun_tag. | ||
554 | */ | ||
555 | lp->hold = 1; | ||
556 | return -EBUSY; | ||
557 | } | ||
558 | |||
559 | lp->non_tagged_cmd = ent; | ||
560 | return 0; | ||
561 | } else { | ||
562 | /* Tagged command, see if blocked by a | ||
563 | * non-tagged one. | ||
564 | */ | ||
565 | if (lp->non_tagged_cmd || lp->hold) | ||
566 | return -EBUSY; | ||
567 | } | ||
568 | |||
569 | BUG_ON(lp->tagged_cmds[ent->tag[1]]); | ||
570 | |||
571 | lp->tagged_cmds[ent->tag[1]] = ent; | ||
572 | lp->num_tagged++; | ||
573 | |||
574 | return 0; | ||
575 | } | ||
576 | |||
577 | static void esp_free_lun_tag(struct esp_cmd_entry *ent, | ||
578 | struct esp_lun_data *lp) | ||
579 | { | ||
580 | if (ent->tag[0]) { | ||
581 | BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent); | ||
582 | lp->tagged_cmds[ent->tag[1]] = NULL; | ||
583 | lp->num_tagged--; | ||
584 | } else { | ||
585 | BUG_ON(lp->non_tagged_cmd != ent); | ||
586 | lp->non_tagged_cmd = NULL; | ||
587 | } | ||
588 | } | ||
589 | |||
590 | /* When a contingent allegiance conditon is created, we force feed a | ||
591 | * REQUEST_SENSE command to the device to fetch the sense data. I | ||
592 | * tried many other schemes, relying on the scsi error handling layer | ||
593 | * to send out the REQUEST_SENSE automatically, but this was difficult | ||
594 | * to get right especially in the presence of applications like smartd | ||
595 | * which use SG_IO to send out their own REQUEST_SENSE commands. | ||
596 | */ | ||
597 | static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent) | ||
598 | { | ||
599 | struct scsi_cmnd *cmd = ent->cmd; | ||
600 | struct scsi_device *dev = cmd->device; | ||
601 | int tgt, lun; | ||
602 | u8 *p, val; | ||
603 | |||
604 | tgt = dev->id; | ||
605 | lun = dev->lun; | ||
606 | |||
607 | |||
608 | if (!ent->sense_ptr) { | ||
609 | esp_log_autosense("esp%d: Doing auto-sense for " | ||
610 | "tgt[%d] lun[%d]\n", | ||
611 | esp->host->unique_id, tgt, lun); | ||
612 | |||
613 | ent->sense_ptr = cmd->sense_buffer; | ||
614 | ent->sense_dma = esp->ops->map_single(esp, | ||
615 | ent->sense_ptr, | ||
616 | SCSI_SENSE_BUFFERSIZE, | ||
617 | DMA_FROM_DEVICE); | ||
618 | } | ||
619 | ent->saved_sense_ptr = ent->sense_ptr; | ||
620 | |||
621 | esp->active_cmd = ent; | ||
622 | |||
623 | p = esp->command_block; | ||
624 | esp->msg_out_len = 0; | ||
625 | |||
626 | *p++ = IDENTIFY(0, lun); | ||
627 | *p++ = REQUEST_SENSE; | ||
628 | *p++ = ((dev->scsi_level <= SCSI_2) ? | ||
629 | (lun << 5) : 0); | ||
630 | *p++ = 0; | ||
631 | *p++ = 0; | ||
632 | *p++ = SCSI_SENSE_BUFFERSIZE; | ||
633 | *p++ = 0; | ||
634 | |||
635 | esp->select_state = ESP_SELECT_BASIC; | ||
636 | |||
637 | val = tgt; | ||
638 | if (esp->rev == FASHME) | ||
639 | val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT; | ||
640 | esp_write8(val, ESP_BUSID); | ||
641 | |||
642 | esp_write_tgt_sync(esp, tgt); | ||
643 | esp_write_tgt_config3(esp, tgt); | ||
644 | |||
645 | val = (p - esp->command_block); | ||
646 | |||
647 | if (esp->rev == FASHME) | ||
648 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
649 | esp->ops->send_dma_cmd(esp, esp->command_block_dma, | ||
650 | val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA); | ||
651 | } | ||
652 | |||
653 | static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp) | ||
654 | { | ||
655 | struct esp_cmd_entry *ent; | ||
656 | |||
657 | list_for_each_entry(ent, &esp->queued_cmds, list) { | ||
658 | struct scsi_cmnd *cmd = ent->cmd; | ||
659 | struct scsi_device *dev = cmd->device; | ||
660 | struct esp_lun_data *lp = dev->hostdata; | ||
661 | |||
662 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
663 | ent->tag[0] = 0; | ||
664 | ent->tag[1] = 0; | ||
665 | return ent; | ||
666 | } | ||
667 | |||
668 | if (!scsi_populate_tag_msg(cmd, &ent->tag[0])) { | ||
669 | ent->tag[0] = 0; | ||
670 | ent->tag[1] = 0; | ||
671 | } | ||
672 | |||
673 | if (esp_alloc_lun_tag(ent, lp) < 0) | ||
674 | continue; | ||
675 | |||
676 | return ent; | ||
677 | } | ||
678 | |||
679 | return NULL; | ||
680 | } | ||
681 | |||
682 | static void esp_maybe_execute_command(struct esp *esp) | ||
683 | { | ||
684 | struct esp_target_data *tp; | ||
685 | struct esp_lun_data *lp; | ||
686 | struct scsi_device *dev; | ||
687 | struct scsi_cmnd *cmd; | ||
688 | struct esp_cmd_entry *ent; | ||
689 | int tgt, lun, i; | ||
690 | u32 val, start_cmd; | ||
691 | u8 *p; | ||
692 | |||
693 | if (esp->active_cmd || | ||
694 | (esp->flags & ESP_FLAG_RESETTING)) | ||
695 | return; | ||
696 | |||
697 | ent = find_and_prep_issuable_command(esp); | ||
698 | if (!ent) | ||
699 | return; | ||
700 | |||
701 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
702 | esp_autosense(esp, ent); | ||
703 | return; | ||
704 | } | ||
705 | |||
706 | cmd = ent->cmd; | ||
707 | dev = cmd->device; | ||
708 | tgt = dev->id; | ||
709 | lun = dev->lun; | ||
710 | tp = &esp->target[tgt]; | ||
711 | lp = dev->hostdata; | ||
712 | |||
713 | list_del(&ent->list); | ||
714 | list_add(&ent->list, &esp->active_cmds); | ||
715 | |||
716 | esp->active_cmd = ent; | ||
717 | |||
718 | esp_map_dma(esp, cmd); | ||
719 | esp_save_pointers(esp, ent); | ||
720 | |||
721 | esp_check_command_len(esp, cmd); | ||
722 | |||
723 | p = esp->command_block; | ||
724 | |||
725 | esp->msg_out_len = 0; | ||
726 | if (tp->flags & ESP_TGT_CHECK_NEGO) { | ||
727 | /* Need to negotiate. If the target is broken | ||
728 | * go for synchronous transfers and non-wide. | ||
729 | */ | ||
730 | if (tp->flags & ESP_TGT_BROKEN) { | ||
731 | tp->flags &= ~ESP_TGT_DISCONNECT; | ||
732 | tp->nego_goal_period = 0; | ||
733 | tp->nego_goal_offset = 0; | ||
734 | tp->nego_goal_width = 0; | ||
735 | tp->nego_goal_tags = 0; | ||
736 | } | ||
737 | |||
738 | /* If the settings are not changing, skip this. */ | ||
739 | if (spi_width(tp->starget) == tp->nego_goal_width && | ||
740 | spi_period(tp->starget) == tp->nego_goal_period && | ||
741 | spi_offset(tp->starget) == tp->nego_goal_offset) { | ||
742 | tp->flags &= ~ESP_TGT_CHECK_NEGO; | ||
743 | goto build_identify; | ||
744 | } | ||
745 | |||
746 | if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) { | ||
747 | esp->msg_out_len = | ||
748 | spi_populate_width_msg(&esp->msg_out[0], | ||
749 | (tp->nego_goal_width ? | ||
750 | 1 : 0)); | ||
751 | tp->flags |= ESP_TGT_NEGO_WIDE; | ||
752 | } else if (esp_need_to_nego_sync(tp)) { | ||
753 | esp->msg_out_len = | ||
754 | spi_populate_sync_msg(&esp->msg_out[0], | ||
755 | tp->nego_goal_period, | ||
756 | tp->nego_goal_offset); | ||
757 | tp->flags |= ESP_TGT_NEGO_SYNC; | ||
758 | } else { | ||
759 | tp->flags &= ~ESP_TGT_CHECK_NEGO; | ||
760 | } | ||
761 | |||
762 | /* Process it like a slow command. */ | ||
763 | if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC)) | ||
764 | esp->flags |= ESP_FLAG_DOING_SLOWCMD; | ||
765 | } | ||
766 | |||
767 | build_identify: | ||
768 | /* If we don't have a lun-data struct yet, we're probing | ||
769 | * so do not disconnect. Also, do not disconnect unless | ||
770 | * we have a tag on this command. | ||
771 | */ | ||
772 | if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0]) | ||
773 | *p++ = IDENTIFY(1, lun); | ||
774 | else | ||
775 | *p++ = IDENTIFY(0, lun); | ||
776 | |||
777 | if (ent->tag[0] && esp->rev == ESP100) { | ||
778 | /* ESP100 lacks select w/atn3 command, use select | ||
779 | * and stop instead. | ||
780 | */ | ||
781 | esp->flags |= ESP_FLAG_DOING_SLOWCMD; | ||
782 | } | ||
783 | |||
784 | if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) { | ||
785 | start_cmd = ESP_CMD_DMA | ESP_CMD_SELA; | ||
786 | if (ent->tag[0]) { | ||
787 | *p++ = ent->tag[0]; | ||
788 | *p++ = ent->tag[1]; | ||
789 | |||
790 | start_cmd = ESP_CMD_DMA | ESP_CMD_SA3; | ||
791 | } | ||
792 | |||
793 | for (i = 0; i < cmd->cmd_len; i++) | ||
794 | *p++ = cmd->cmnd[i]; | ||
795 | |||
796 | esp->select_state = ESP_SELECT_BASIC; | ||
797 | } else { | ||
798 | esp->cmd_bytes_left = cmd->cmd_len; | ||
799 | esp->cmd_bytes_ptr = &cmd->cmnd[0]; | ||
800 | |||
801 | if (ent->tag[0]) { | ||
802 | for (i = esp->msg_out_len - 1; | ||
803 | i >= 0; i--) | ||
804 | esp->msg_out[i + 2] = esp->msg_out[i]; | ||
805 | esp->msg_out[0] = ent->tag[0]; | ||
806 | esp->msg_out[1] = ent->tag[1]; | ||
807 | esp->msg_out_len += 2; | ||
808 | } | ||
809 | |||
810 | start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS; | ||
811 | esp->select_state = ESP_SELECT_MSGOUT; | ||
812 | } | ||
813 | val = tgt; | ||
814 | if (esp->rev == FASHME) | ||
815 | val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT; | ||
816 | esp_write8(val, ESP_BUSID); | ||
817 | |||
818 | esp_write_tgt_sync(esp, tgt); | ||
819 | esp_write_tgt_config3(esp, tgt); | ||
820 | |||
821 | val = (p - esp->command_block); | ||
822 | |||
823 | if (esp_debug & ESP_DEBUG_SCSICMD) { | ||
824 | printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun); | ||
825 | for (i = 0; i < cmd->cmd_len; i++) | ||
826 | printk("%02x ", cmd->cmnd[i]); | ||
827 | printk("]\n"); | ||
828 | } | ||
829 | |||
830 | if (esp->rev == FASHME) | ||
831 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
832 | esp->ops->send_dma_cmd(esp, esp->command_block_dma, | ||
833 | val, 16, 0, start_cmd); | ||
834 | } | ||
835 | |||
836 | static struct esp_cmd_entry *esp_get_ent(struct esp *esp) | ||
837 | { | ||
838 | struct list_head *head = &esp->esp_cmd_pool; | ||
839 | struct esp_cmd_entry *ret; | ||
840 | |||
841 | if (list_empty(head)) { | ||
842 | ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC); | ||
843 | } else { | ||
844 | ret = list_entry(head->next, struct esp_cmd_entry, list); | ||
845 | list_del(&ret->list); | ||
846 | memset(ret, 0, sizeof(*ret)); | ||
847 | } | ||
848 | return ret; | ||
849 | } | ||
850 | |||
851 | static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent) | ||
852 | { | ||
853 | list_add(&ent->list, &esp->esp_cmd_pool); | ||
854 | } | ||
855 | |||
856 | static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent, | ||
857 | struct scsi_cmnd *cmd, unsigned int result) | ||
858 | { | ||
859 | struct scsi_device *dev = cmd->device; | ||
860 | int tgt = dev->id; | ||
861 | int lun = dev->lun; | ||
862 | |||
863 | esp->active_cmd = NULL; | ||
864 | esp_unmap_dma(esp, cmd); | ||
865 | esp_free_lun_tag(ent, dev->hostdata); | ||
866 | cmd->result = result; | ||
867 | |||
868 | if (ent->eh_done) { | ||
869 | complete(ent->eh_done); | ||
870 | ent->eh_done = NULL; | ||
871 | } | ||
872 | |||
873 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
874 | esp->ops->unmap_single(esp, ent->sense_dma, | ||
875 | SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); | ||
876 | ent->sense_ptr = NULL; | ||
877 | |||
878 | /* Restore the message/status bytes to what we actually | ||
879 | * saw originally. Also, report that we are providing | ||
880 | * the sense data. | ||
881 | */ | ||
882 | cmd->result = ((DRIVER_SENSE << 24) | | ||
883 | (DID_OK << 16) | | ||
884 | (COMMAND_COMPLETE << 8) | | ||
885 | (SAM_STAT_CHECK_CONDITION << 0)); | ||
886 | |||
887 | ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE; | ||
888 | if (esp_debug & ESP_DEBUG_AUTOSENSE) { | ||
889 | int i; | ||
890 | |||
891 | printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ", | ||
892 | esp->host->unique_id, tgt, lun); | ||
893 | for (i = 0; i < 18; i++) | ||
894 | printk("%02x ", cmd->sense_buffer[i]); | ||
895 | printk("]\n"); | ||
896 | } | ||
897 | } | ||
898 | |||
899 | cmd->scsi_done(cmd); | ||
900 | |||
901 | list_del(&ent->list); | ||
902 | esp_put_ent(esp, ent); | ||
903 | |||
904 | esp_maybe_execute_command(esp); | ||
905 | } | ||
906 | |||
907 | static unsigned int compose_result(unsigned int status, unsigned int message, | ||
908 | unsigned int driver_code) | ||
909 | { | ||
910 | return (status | (message << 8) | (driver_code << 16)); | ||
911 | } | ||
912 | |||
913 | static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent) | ||
914 | { | ||
915 | struct scsi_device *dev = ent->cmd->device; | ||
916 | struct esp_lun_data *lp = dev->hostdata; | ||
917 | |||
918 | scsi_track_queue_full(dev, lp->num_tagged - 1); | ||
919 | } | ||
920 | |||
921 | static int esp_queuecommand(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) | ||
922 | { | ||
923 | struct scsi_device *dev = cmd->device; | ||
924 | struct esp *esp = host_to_esp(dev->host); | ||
925 | struct esp_cmd_priv *spriv; | ||
926 | struct esp_cmd_entry *ent; | ||
927 | |||
928 | ent = esp_get_ent(esp); | ||
929 | if (!ent) | ||
930 | return SCSI_MLQUEUE_HOST_BUSY; | ||
931 | |||
932 | ent->cmd = cmd; | ||
933 | |||
934 | cmd->scsi_done = done; | ||
935 | |||
936 | spriv = ESP_CMD_PRIV(cmd); | ||
937 | spriv->u.dma_addr = ~(dma_addr_t)0x0; | ||
938 | |||
939 | list_add_tail(&ent->list, &esp->queued_cmds); | ||
940 | |||
941 | esp_maybe_execute_command(esp); | ||
942 | |||
943 | return 0; | ||
944 | } | ||
945 | |||
946 | static int esp_check_gross_error(struct esp *esp) | ||
947 | { | ||
948 | if (esp->sreg & ESP_STAT_SPAM) { | ||
949 | /* Gross Error, could be one of: | ||
950 | * - top of fifo overwritten | ||
951 | * - top of command register overwritten | ||
952 | * - DMA programmed with wrong direction | ||
953 | * - improper phase change | ||
954 | */ | ||
955 | printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n", | ||
956 | esp->host->unique_id, esp->sreg); | ||
957 | /* XXX Reset the chip. XXX */ | ||
958 | return 1; | ||
959 | } | ||
960 | return 0; | ||
961 | } | ||
962 | |||
963 | static int esp_check_spur_intr(struct esp *esp) | ||
964 | { | ||
965 | switch (esp->rev) { | ||
966 | case ESP100: | ||
967 | case ESP100A: | ||
968 | /* The interrupt pending bit of the status register cannot | ||
969 | * be trusted on these revisions. | ||
970 | */ | ||
971 | esp->sreg &= ~ESP_STAT_INTR; | ||
972 | break; | ||
973 | |||
974 | default: | ||
975 | if (!(esp->sreg & ESP_STAT_INTR)) { | ||
976 | esp->ireg = esp_read8(ESP_INTRPT); | ||
977 | if (esp->ireg & ESP_INTR_SR) | ||
978 | return 1; | ||
979 | |||
980 | /* If the DMA is indicating interrupt pending and the | ||
981 | * ESP is not, the only possibility is a DMA error. | ||
982 | */ | ||
983 | if (!esp->ops->dma_error(esp)) { | ||
984 | printk(KERN_ERR PFX "esp%d: Spurious irq, " | ||
985 | "sreg=%x.\n", | ||
986 | esp->host->unique_id, esp->sreg); | ||
987 | return -1; | ||
988 | } | ||
989 | |||
990 | printk(KERN_ERR PFX "esp%d: DMA error\n", | ||
991 | esp->host->unique_id); | ||
992 | |||
993 | /* XXX Reset the chip. XXX */ | ||
994 | return -1; | ||
995 | } | ||
996 | break; | ||
997 | } | ||
998 | |||
999 | return 0; | ||
1000 | } | ||
1001 | |||
1002 | static void esp_schedule_reset(struct esp *esp) | ||
1003 | { | ||
1004 | esp_log_reset("ESP: esp_schedule_reset() from %p\n", | ||
1005 | __builtin_return_address(0)); | ||
1006 | esp->flags |= ESP_FLAG_RESETTING; | ||
1007 | esp_event(esp, ESP_EVENT_RESET); | ||
1008 | } | ||
1009 | |||
1010 | /* In order to avoid having to add a special half-reconnected state | ||
1011 | * into the driver we just sit here and poll through the rest of | ||
1012 | * the reselection process to get the tag message bytes. | ||
1013 | */ | ||
1014 | static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp, | ||
1015 | struct esp_lun_data *lp) | ||
1016 | { | ||
1017 | struct esp_cmd_entry *ent; | ||
1018 | int i; | ||
1019 | |||
1020 | if (!lp->num_tagged) { | ||
1021 | printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n", | ||
1022 | esp->host->unique_id); | ||
1023 | return NULL; | ||
1024 | } | ||
1025 | |||
1026 | esp_log_reconnect("ESP: reconnect tag, "); | ||
1027 | |||
1028 | for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { | ||
1029 | if (esp->ops->irq_pending(esp)) | ||
1030 | break; | ||
1031 | } | ||
1032 | if (i == ESP_QUICKIRQ_LIMIT) { | ||
1033 | printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n", | ||
1034 | esp->host->unique_id); | ||
1035 | return NULL; | ||
1036 | } | ||
1037 | |||
1038 | esp->sreg = esp_read8(ESP_STATUS); | ||
1039 | esp->ireg = esp_read8(ESP_INTRPT); | ||
1040 | |||
1041 | esp_log_reconnect("IRQ(%d:%x:%x), ", | ||
1042 | i, esp->ireg, esp->sreg); | ||
1043 | |||
1044 | if (esp->ireg & ESP_INTR_DC) { | ||
1045 | printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n", | ||
1046 | esp->host->unique_id); | ||
1047 | return NULL; | ||
1048 | } | ||
1049 | |||
1050 | if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) { | ||
1051 | printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n", | ||
1052 | esp->host->unique_id, esp->sreg); | ||
1053 | return NULL; | ||
1054 | } | ||
1055 | |||
1056 | /* DMA in the tag bytes... */ | ||
1057 | esp->command_block[0] = 0xff; | ||
1058 | esp->command_block[1] = 0xff; | ||
1059 | esp->ops->send_dma_cmd(esp, esp->command_block_dma, | ||
1060 | 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI); | ||
1061 | |||
1062 | /* ACK the msssage. */ | ||
1063 | scsi_esp_cmd(esp, ESP_CMD_MOK); | ||
1064 | |||
1065 | for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) { | ||
1066 | if (esp->ops->irq_pending(esp)) { | ||
1067 | esp->sreg = esp_read8(ESP_STATUS); | ||
1068 | esp->ireg = esp_read8(ESP_INTRPT); | ||
1069 | if (esp->ireg & ESP_INTR_FDONE) | ||
1070 | break; | ||
1071 | } | ||
1072 | udelay(1); | ||
1073 | } | ||
1074 | if (i == ESP_RESELECT_TAG_LIMIT) { | ||
1075 | printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n", | ||
1076 | esp->host->unique_id); | ||
1077 | return NULL; | ||
1078 | } | ||
1079 | esp->ops->dma_drain(esp); | ||
1080 | esp->ops->dma_invalidate(esp); | ||
1081 | |||
1082 | esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n", | ||
1083 | i, esp->ireg, esp->sreg, | ||
1084 | esp->command_block[0], | ||
1085 | esp->command_block[1]); | ||
1086 | |||
1087 | if (esp->command_block[0] < SIMPLE_QUEUE_TAG || | ||
1088 | esp->command_block[0] > ORDERED_QUEUE_TAG) { | ||
1089 | printk(KERN_ERR PFX "esp%d: Reconnect, bad tag " | ||
1090 | "type %02x.\n", | ||
1091 | esp->host->unique_id, esp->command_block[0]); | ||
1092 | return NULL; | ||
1093 | } | ||
1094 | |||
1095 | ent = lp->tagged_cmds[esp->command_block[1]]; | ||
1096 | if (!ent) { | ||
1097 | printk(KERN_ERR PFX "esp%d: Reconnect, no entry for " | ||
1098 | "tag %02x.\n", | ||
1099 | esp->host->unique_id, esp->command_block[1]); | ||
1100 | return NULL; | ||
1101 | } | ||
1102 | |||
1103 | return ent; | ||
1104 | } | ||
1105 | |||
1106 | static int esp_reconnect(struct esp *esp) | ||
1107 | { | ||
1108 | struct esp_cmd_entry *ent; | ||
1109 | struct esp_target_data *tp; | ||
1110 | struct esp_lun_data *lp; | ||
1111 | struct scsi_device *dev; | ||
1112 | int target, lun; | ||
1113 | |||
1114 | BUG_ON(esp->active_cmd); | ||
1115 | if (esp->rev == FASHME) { | ||
1116 | /* FASHME puts the target and lun numbers directly | ||
1117 | * into the fifo. | ||
1118 | */ | ||
1119 | target = esp->fifo[0]; | ||
1120 | lun = esp->fifo[1] & 0x7; | ||
1121 | } else { | ||
1122 | u8 bits = esp_read8(ESP_FDATA); | ||
1123 | |||
1124 | /* Older chips put the lun directly into the fifo, but | ||
1125 | * the target is given as a sample of the arbitration | ||
1126 | * lines on the bus at reselection time. So we should | ||
1127 | * see the ID of the ESP and the one reconnecting target | ||
1128 | * set in the bitmap. | ||
1129 | */ | ||
1130 | if (!(bits & esp->scsi_id_mask)) | ||
1131 | goto do_reset; | ||
1132 | bits &= ~esp->scsi_id_mask; | ||
1133 | if (!bits || (bits & (bits - 1))) | ||
1134 | goto do_reset; | ||
1135 | |||
1136 | target = ffs(bits) - 1; | ||
1137 | lun = (esp_read8(ESP_FDATA) & 0x7); | ||
1138 | |||
1139 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1140 | if (esp->rev == ESP100) { | ||
1141 | u8 ireg = esp_read8(ESP_INTRPT); | ||
1142 | /* This chip has a bug during reselection that can | ||
1143 | * cause a spurious illegal-command interrupt, which | ||
1144 | * we simply ACK here. Another possibility is a bus | ||
1145 | * reset so we must check for that. | ||
1146 | */ | ||
1147 | if (ireg & ESP_INTR_SR) | ||
1148 | goto do_reset; | ||
1149 | } | ||
1150 | scsi_esp_cmd(esp, ESP_CMD_NULL); | ||
1151 | } | ||
1152 | |||
1153 | esp_write_tgt_sync(esp, target); | ||
1154 | esp_write_tgt_config3(esp, target); | ||
1155 | |||
1156 | scsi_esp_cmd(esp, ESP_CMD_MOK); | ||
1157 | |||
1158 | if (esp->rev == FASHME) | ||
1159 | esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT, | ||
1160 | ESP_BUSID); | ||
1161 | |||
1162 | tp = &esp->target[target]; | ||
1163 | dev = __scsi_device_lookup_by_target(tp->starget, lun); | ||
1164 | if (!dev) { | ||
1165 | printk(KERN_ERR PFX "esp%d: Reconnect, no lp " | ||
1166 | "tgt[%u] lun[%u]\n", | ||
1167 | esp->host->unique_id, target, lun); | ||
1168 | goto do_reset; | ||
1169 | } | ||
1170 | lp = dev->hostdata; | ||
1171 | |||
1172 | ent = lp->non_tagged_cmd; | ||
1173 | if (!ent) { | ||
1174 | ent = esp_reconnect_with_tag(esp, lp); | ||
1175 | if (!ent) | ||
1176 | goto do_reset; | ||
1177 | } | ||
1178 | |||
1179 | esp->active_cmd = ent; | ||
1180 | |||
1181 | if (ent->flags & ESP_CMD_FLAG_ABORT) { | ||
1182 | esp->msg_out[0] = ABORT_TASK_SET; | ||
1183 | esp->msg_out_len = 1; | ||
1184 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1185 | } | ||
1186 | |||
1187 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1188 | esp_restore_pointers(esp, ent); | ||
1189 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1190 | return 1; | ||
1191 | |||
1192 | do_reset: | ||
1193 | esp_schedule_reset(esp); | ||
1194 | return 0; | ||
1195 | } | ||
1196 | |||
1197 | static int esp_finish_select(struct esp *esp) | ||
1198 | { | ||
1199 | struct esp_cmd_entry *ent; | ||
1200 | struct scsi_cmnd *cmd; | ||
1201 | u8 orig_select_state; | ||
1202 | |||
1203 | orig_select_state = esp->select_state; | ||
1204 | |||
1205 | /* No longer selecting. */ | ||
1206 | esp->select_state = ESP_SELECT_NONE; | ||
1207 | |||
1208 | esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS; | ||
1209 | ent = esp->active_cmd; | ||
1210 | cmd = ent->cmd; | ||
1211 | |||
1212 | if (esp->ops->dma_error(esp)) { | ||
1213 | /* If we see a DMA error during or as a result of selection, | ||
1214 | * all bets are off. | ||
1215 | */ | ||
1216 | esp_schedule_reset(esp); | ||
1217 | esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16)); | ||
1218 | return 0; | ||
1219 | } | ||
1220 | |||
1221 | esp->ops->dma_invalidate(esp); | ||
1222 | |||
1223 | if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) { | ||
1224 | struct esp_target_data *tp = &esp->target[cmd->device->id]; | ||
1225 | |||
1226 | /* Carefully back out of the selection attempt. Release | ||
1227 | * resources (such as DMA mapping & TAG) and reset state (such | ||
1228 | * as message out and command delivery variables). | ||
1229 | */ | ||
1230 | if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { | ||
1231 | esp_unmap_dma(esp, cmd); | ||
1232 | esp_free_lun_tag(ent, cmd->device->hostdata); | ||
1233 | tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE); | ||
1234 | esp->flags &= ~ESP_FLAG_DOING_SLOWCMD; | ||
1235 | esp->cmd_bytes_ptr = NULL; | ||
1236 | esp->cmd_bytes_left = 0; | ||
1237 | } else { | ||
1238 | esp->ops->unmap_single(esp, ent->sense_dma, | ||
1239 | SCSI_SENSE_BUFFERSIZE, | ||
1240 | DMA_FROM_DEVICE); | ||
1241 | ent->sense_ptr = NULL; | ||
1242 | } | ||
1243 | |||
1244 | /* Now that the state is unwound properly, put back onto | ||
1245 | * the issue queue. This command is no longer active. | ||
1246 | */ | ||
1247 | list_del(&ent->list); | ||
1248 | list_add(&ent->list, &esp->queued_cmds); | ||
1249 | esp->active_cmd = NULL; | ||
1250 | |||
1251 | /* Return value ignored by caller, it directly invokes | ||
1252 | * esp_reconnect(). | ||
1253 | */ | ||
1254 | return 0; | ||
1255 | } | ||
1256 | |||
1257 | if (esp->ireg == ESP_INTR_DC) { | ||
1258 | struct scsi_device *dev = cmd->device; | ||
1259 | |||
1260 | /* Disconnect. Make sure we re-negotiate sync and | ||
1261 | * wide parameters if this target starts responding | ||
1262 | * again in the future. | ||
1263 | */ | ||
1264 | esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO; | ||
1265 | |||
1266 | scsi_esp_cmd(esp, ESP_CMD_ESEL); | ||
1267 | esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16)); | ||
1268 | return 1; | ||
1269 | } | ||
1270 | |||
1271 | if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) { | ||
1272 | /* Selection successful. On pre-FAST chips we have | ||
1273 | * to do a NOP and possibly clean out the FIFO. | ||
1274 | */ | ||
1275 | if (esp->rev <= ESP236) { | ||
1276 | int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; | ||
1277 | |||
1278 | scsi_esp_cmd(esp, ESP_CMD_NULL); | ||
1279 | |||
1280 | if (!fcnt && | ||
1281 | (!esp->prev_soff || | ||
1282 | ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP))) | ||
1283 | esp_flush_fifo(esp); | ||
1284 | } | ||
1285 | |||
1286 | /* If we are doing a slow command, negotiation, etc. | ||
1287 | * we'll do the right thing as we transition to the | ||
1288 | * next phase. | ||
1289 | */ | ||
1290 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1291 | return 0; | ||
1292 | } | ||
1293 | |||
1294 | printk("ESP: Unexpected selection completion ireg[%x].\n", | ||
1295 | esp->ireg); | ||
1296 | esp_schedule_reset(esp); | ||
1297 | return 0; | ||
1298 | } | ||
1299 | |||
1300 | static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent, | ||
1301 | struct scsi_cmnd *cmd) | ||
1302 | { | ||
1303 | int fifo_cnt, ecount, bytes_sent, flush_fifo; | ||
1304 | |||
1305 | fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES; | ||
1306 | if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE) | ||
1307 | fifo_cnt <<= 1; | ||
1308 | |||
1309 | ecount = 0; | ||
1310 | if (!(esp->sreg & ESP_STAT_TCNT)) { | ||
1311 | ecount = ((unsigned int)esp_read8(ESP_TCLOW) | | ||
1312 | (((unsigned int)esp_read8(ESP_TCMED)) << 8)); | ||
1313 | if (esp->rev == FASHME) | ||
1314 | ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16; | ||
1315 | } | ||
1316 | |||
1317 | bytes_sent = esp->data_dma_len; | ||
1318 | bytes_sent -= ecount; | ||
1319 | |||
1320 | if (!(ent->flags & ESP_CMD_FLAG_WRITE)) | ||
1321 | bytes_sent -= fifo_cnt; | ||
1322 | |||
1323 | flush_fifo = 0; | ||
1324 | if (!esp->prev_soff) { | ||
1325 | /* Synchronous data transfer, always flush fifo. */ | ||
1326 | flush_fifo = 1; | ||
1327 | } else { | ||
1328 | if (esp->rev == ESP100) { | ||
1329 | u32 fflags, phase; | ||
1330 | |||
1331 | /* ESP100 has a chip bug where in the synchronous data | ||
1332 | * phase it can mistake a final long REQ pulse from the | ||
1333 | * target as an extra data byte. Fun. | ||
1334 | * | ||
1335 | * To detect this case we resample the status register | ||
1336 | * and fifo flags. If we're still in a data phase and | ||
1337 | * we see spurious chunks in the fifo, we return error | ||
1338 | * to the caller which should reset and set things up | ||
1339 | * such that we only try future transfers to this | ||
1340 | * target in synchronous mode. | ||
1341 | */ | ||
1342 | esp->sreg = esp_read8(ESP_STATUS); | ||
1343 | phase = esp->sreg & ESP_STAT_PMASK; | ||
1344 | fflags = esp_read8(ESP_FFLAGS); | ||
1345 | |||
1346 | if ((phase == ESP_DOP && | ||
1347 | (fflags & ESP_FF_ONOTZERO)) || | ||
1348 | (phase == ESP_DIP && | ||
1349 | (fflags & ESP_FF_FBYTES))) | ||
1350 | return -1; | ||
1351 | } | ||
1352 | if (!(ent->flags & ESP_CMD_FLAG_WRITE)) | ||
1353 | flush_fifo = 1; | ||
1354 | } | ||
1355 | |||
1356 | if (flush_fifo) | ||
1357 | esp_flush_fifo(esp); | ||
1358 | |||
1359 | return bytes_sent; | ||
1360 | } | ||
1361 | |||
1362 | static void esp_setsync(struct esp *esp, struct esp_target_data *tp, | ||
1363 | u8 scsi_period, u8 scsi_offset, | ||
1364 | u8 esp_stp, u8 esp_soff) | ||
1365 | { | ||
1366 | spi_period(tp->starget) = scsi_period; | ||
1367 | spi_offset(tp->starget) = scsi_offset; | ||
1368 | spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0; | ||
1369 | |||
1370 | if (esp_soff) { | ||
1371 | esp_stp &= 0x1f; | ||
1372 | esp_soff |= esp->radelay; | ||
1373 | if (esp->rev >= FAS236) { | ||
1374 | u8 bit = ESP_CONFIG3_FSCSI; | ||
1375 | if (esp->rev >= FAS100A) | ||
1376 | bit = ESP_CONFIG3_FAST; | ||
1377 | |||
1378 | if (scsi_period < 50) { | ||
1379 | if (esp->rev == FASHME) | ||
1380 | esp_soff &= ~esp->radelay; | ||
1381 | tp->esp_config3 |= bit; | ||
1382 | } else { | ||
1383 | tp->esp_config3 &= ~bit; | ||
1384 | } | ||
1385 | esp->prev_cfg3 = tp->esp_config3; | ||
1386 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
1387 | } | ||
1388 | } | ||
1389 | |||
1390 | tp->esp_period = esp->prev_stp = esp_stp; | ||
1391 | tp->esp_offset = esp->prev_soff = esp_soff; | ||
1392 | |||
1393 | esp_write8(esp_soff, ESP_SOFF); | ||
1394 | esp_write8(esp_stp, ESP_STP); | ||
1395 | |||
1396 | tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO); | ||
1397 | |||
1398 | spi_display_xfer_agreement(tp->starget); | ||
1399 | } | ||
1400 | |||
1401 | static void esp_msgin_reject(struct esp *esp) | ||
1402 | { | ||
1403 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1404 | struct scsi_cmnd *cmd = ent->cmd; | ||
1405 | struct esp_target_data *tp; | ||
1406 | int tgt; | ||
1407 | |||
1408 | tgt = cmd->device->id; | ||
1409 | tp = &esp->target[tgt]; | ||
1410 | |||
1411 | if (tp->flags & ESP_TGT_NEGO_WIDE) { | ||
1412 | tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE); | ||
1413 | |||
1414 | if (!esp_need_to_nego_sync(tp)) { | ||
1415 | tp->flags &= ~ESP_TGT_CHECK_NEGO; | ||
1416 | scsi_esp_cmd(esp, ESP_CMD_RATN); | ||
1417 | } else { | ||
1418 | esp->msg_out_len = | ||
1419 | spi_populate_sync_msg(&esp->msg_out[0], | ||
1420 | tp->nego_goal_period, | ||
1421 | tp->nego_goal_offset); | ||
1422 | tp->flags |= ESP_TGT_NEGO_SYNC; | ||
1423 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1424 | } | ||
1425 | return; | ||
1426 | } | ||
1427 | |||
1428 | if (tp->flags & ESP_TGT_NEGO_SYNC) { | ||
1429 | tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO); | ||
1430 | tp->esp_period = 0; | ||
1431 | tp->esp_offset = 0; | ||
1432 | esp_setsync(esp, tp, 0, 0, 0, 0); | ||
1433 | scsi_esp_cmd(esp, ESP_CMD_RATN); | ||
1434 | return; | ||
1435 | } | ||
1436 | |||
1437 | esp->msg_out[0] = ABORT_TASK_SET; | ||
1438 | esp->msg_out_len = 1; | ||
1439 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1440 | } | ||
1441 | |||
1442 | static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp) | ||
1443 | { | ||
1444 | u8 period = esp->msg_in[3]; | ||
1445 | u8 offset = esp->msg_in[4]; | ||
1446 | u8 stp; | ||
1447 | |||
1448 | if (!(tp->flags & ESP_TGT_NEGO_SYNC)) | ||
1449 | goto do_reject; | ||
1450 | |||
1451 | if (offset > 15) | ||
1452 | goto do_reject; | ||
1453 | |||
1454 | if (offset) { | ||
1455 | int rounded_up, one_clock; | ||
1456 | |||
1457 | if (period > esp->max_period) { | ||
1458 | period = offset = 0; | ||
1459 | goto do_sdtr; | ||
1460 | } | ||
1461 | if (period < esp->min_period) | ||
1462 | goto do_reject; | ||
1463 | |||
1464 | one_clock = esp->ccycle / 1000; | ||
1465 | rounded_up = (period << 2); | ||
1466 | rounded_up = (rounded_up + one_clock - 1) / one_clock; | ||
1467 | stp = rounded_up; | ||
1468 | if (stp && esp->rev >= FAS236) { | ||
1469 | if (stp >= 50) | ||
1470 | stp--; | ||
1471 | } | ||
1472 | } else { | ||
1473 | stp = 0; | ||
1474 | } | ||
1475 | |||
1476 | esp_setsync(esp, tp, period, offset, stp, offset); | ||
1477 | return; | ||
1478 | |||
1479 | do_reject: | ||
1480 | esp->msg_out[0] = MESSAGE_REJECT; | ||
1481 | esp->msg_out_len = 1; | ||
1482 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1483 | return; | ||
1484 | |||
1485 | do_sdtr: | ||
1486 | tp->nego_goal_period = period; | ||
1487 | tp->nego_goal_offset = offset; | ||
1488 | esp->msg_out_len = | ||
1489 | spi_populate_sync_msg(&esp->msg_out[0], | ||
1490 | tp->nego_goal_period, | ||
1491 | tp->nego_goal_offset); | ||
1492 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1493 | } | ||
1494 | |||
1495 | static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp) | ||
1496 | { | ||
1497 | int size = 8 << esp->msg_in[3]; | ||
1498 | u8 cfg3; | ||
1499 | |||
1500 | if (esp->rev != FASHME) | ||
1501 | goto do_reject; | ||
1502 | |||
1503 | if (size != 8 && size != 16) | ||
1504 | goto do_reject; | ||
1505 | |||
1506 | if (!(tp->flags & ESP_TGT_NEGO_WIDE)) | ||
1507 | goto do_reject; | ||
1508 | |||
1509 | cfg3 = tp->esp_config3; | ||
1510 | if (size == 16) { | ||
1511 | tp->flags |= ESP_TGT_WIDE; | ||
1512 | cfg3 |= ESP_CONFIG3_EWIDE; | ||
1513 | } else { | ||
1514 | tp->flags &= ~ESP_TGT_WIDE; | ||
1515 | cfg3 &= ~ESP_CONFIG3_EWIDE; | ||
1516 | } | ||
1517 | tp->esp_config3 = cfg3; | ||
1518 | esp->prev_cfg3 = cfg3; | ||
1519 | esp_write8(cfg3, ESP_CFG3); | ||
1520 | |||
1521 | tp->flags &= ~ESP_TGT_NEGO_WIDE; | ||
1522 | |||
1523 | spi_period(tp->starget) = 0; | ||
1524 | spi_offset(tp->starget) = 0; | ||
1525 | if (!esp_need_to_nego_sync(tp)) { | ||
1526 | tp->flags &= ~ESP_TGT_CHECK_NEGO; | ||
1527 | scsi_esp_cmd(esp, ESP_CMD_RATN); | ||
1528 | } else { | ||
1529 | esp->msg_out_len = | ||
1530 | spi_populate_sync_msg(&esp->msg_out[0], | ||
1531 | tp->nego_goal_period, | ||
1532 | tp->nego_goal_offset); | ||
1533 | tp->flags |= ESP_TGT_NEGO_SYNC; | ||
1534 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1535 | } | ||
1536 | return; | ||
1537 | |||
1538 | do_reject: | ||
1539 | esp->msg_out[0] = MESSAGE_REJECT; | ||
1540 | esp->msg_out_len = 1; | ||
1541 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1542 | } | ||
1543 | |||
1544 | static void esp_msgin_extended(struct esp *esp) | ||
1545 | { | ||
1546 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1547 | struct scsi_cmnd *cmd = ent->cmd; | ||
1548 | struct esp_target_data *tp; | ||
1549 | int tgt = cmd->device->id; | ||
1550 | |||
1551 | tp = &esp->target[tgt]; | ||
1552 | if (esp->msg_in[2] == EXTENDED_SDTR) { | ||
1553 | esp_msgin_sdtr(esp, tp); | ||
1554 | return; | ||
1555 | } | ||
1556 | if (esp->msg_in[2] == EXTENDED_WDTR) { | ||
1557 | esp_msgin_wdtr(esp, tp); | ||
1558 | return; | ||
1559 | } | ||
1560 | |||
1561 | printk("ESP: Unexpected extended msg type %x\n", | ||
1562 | esp->msg_in[2]); | ||
1563 | |||
1564 | esp->msg_out[0] = ABORT_TASK_SET; | ||
1565 | esp->msg_out_len = 1; | ||
1566 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1567 | } | ||
1568 | |||
1569 | /* Analyze msgin bytes received from target so far. Return non-zero | ||
1570 | * if there are more bytes needed to complete the message. | ||
1571 | */ | ||
1572 | static int esp_msgin_process(struct esp *esp) | ||
1573 | { | ||
1574 | u8 msg0 = esp->msg_in[0]; | ||
1575 | int len = esp->msg_in_len; | ||
1576 | |||
1577 | if (msg0 & 0x80) { | ||
1578 | /* Identify */ | ||
1579 | printk("ESP: Unexpected msgin identify\n"); | ||
1580 | return 0; | ||
1581 | } | ||
1582 | |||
1583 | switch (msg0) { | ||
1584 | case EXTENDED_MESSAGE: | ||
1585 | if (len == 1) | ||
1586 | return 1; | ||
1587 | if (len < esp->msg_in[1] + 2) | ||
1588 | return 1; | ||
1589 | esp_msgin_extended(esp); | ||
1590 | return 0; | ||
1591 | |||
1592 | case IGNORE_WIDE_RESIDUE: { | ||
1593 | struct esp_cmd_entry *ent; | ||
1594 | struct esp_cmd_priv *spriv; | ||
1595 | if (len == 1) | ||
1596 | return 1; | ||
1597 | |||
1598 | if (esp->msg_in[1] != 1) | ||
1599 | goto do_reject; | ||
1600 | |||
1601 | ent = esp->active_cmd; | ||
1602 | spriv = ESP_CMD_PRIV(ent->cmd); | ||
1603 | |||
1604 | if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) { | ||
1605 | spriv->cur_sg--; | ||
1606 | spriv->cur_residue = 1; | ||
1607 | } else | ||
1608 | spriv->cur_residue++; | ||
1609 | spriv->tot_residue++; | ||
1610 | return 0; | ||
1611 | } | ||
1612 | case NOP: | ||
1613 | return 0; | ||
1614 | case RESTORE_POINTERS: | ||
1615 | esp_restore_pointers(esp, esp->active_cmd); | ||
1616 | return 0; | ||
1617 | case SAVE_POINTERS: | ||
1618 | esp_save_pointers(esp, esp->active_cmd); | ||
1619 | return 0; | ||
1620 | |||
1621 | case COMMAND_COMPLETE: | ||
1622 | case DISCONNECT: { | ||
1623 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1624 | |||
1625 | ent->message = msg0; | ||
1626 | esp_event(esp, ESP_EVENT_FREE_BUS); | ||
1627 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1628 | return 0; | ||
1629 | } | ||
1630 | case MESSAGE_REJECT: | ||
1631 | esp_msgin_reject(esp); | ||
1632 | return 0; | ||
1633 | |||
1634 | default: | ||
1635 | do_reject: | ||
1636 | esp->msg_out[0] = MESSAGE_REJECT; | ||
1637 | esp->msg_out_len = 1; | ||
1638 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
1639 | return 0; | ||
1640 | } | ||
1641 | } | ||
1642 | |||
1643 | static int esp_process_event(struct esp *esp) | ||
1644 | { | ||
1645 | int write; | ||
1646 | |||
1647 | again: | ||
1648 | write = 0; | ||
1649 | switch (esp->event) { | ||
1650 | case ESP_EVENT_CHECK_PHASE: | ||
1651 | switch (esp->sreg & ESP_STAT_PMASK) { | ||
1652 | case ESP_DOP: | ||
1653 | esp_event(esp, ESP_EVENT_DATA_OUT); | ||
1654 | break; | ||
1655 | case ESP_DIP: | ||
1656 | esp_event(esp, ESP_EVENT_DATA_IN); | ||
1657 | break; | ||
1658 | case ESP_STATP: | ||
1659 | esp_flush_fifo(esp); | ||
1660 | scsi_esp_cmd(esp, ESP_CMD_ICCSEQ); | ||
1661 | esp_event(esp, ESP_EVENT_STATUS); | ||
1662 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1663 | return 1; | ||
1664 | |||
1665 | case ESP_MOP: | ||
1666 | esp_event(esp, ESP_EVENT_MSGOUT); | ||
1667 | break; | ||
1668 | |||
1669 | case ESP_MIP: | ||
1670 | esp_event(esp, ESP_EVENT_MSGIN); | ||
1671 | break; | ||
1672 | |||
1673 | case ESP_CMDP: | ||
1674 | esp_event(esp, ESP_EVENT_CMD_START); | ||
1675 | break; | ||
1676 | |||
1677 | default: | ||
1678 | printk("ESP: Unexpected phase, sreg=%02x\n", | ||
1679 | esp->sreg); | ||
1680 | esp_schedule_reset(esp); | ||
1681 | return 0; | ||
1682 | } | ||
1683 | goto again; | ||
1684 | break; | ||
1685 | |||
1686 | case ESP_EVENT_DATA_IN: | ||
1687 | write = 1; | ||
1688 | /* fallthru */ | ||
1689 | |||
1690 | case ESP_EVENT_DATA_OUT: { | ||
1691 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1692 | struct scsi_cmnd *cmd = ent->cmd; | ||
1693 | dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd); | ||
1694 | unsigned int dma_len = esp_cur_dma_len(ent, cmd); | ||
1695 | |||
1696 | if (esp->rev == ESP100) | ||
1697 | scsi_esp_cmd(esp, ESP_CMD_NULL); | ||
1698 | |||
1699 | if (write) | ||
1700 | ent->flags |= ESP_CMD_FLAG_WRITE; | ||
1701 | else | ||
1702 | ent->flags &= ~ESP_CMD_FLAG_WRITE; | ||
1703 | |||
1704 | dma_len = esp_dma_length_limit(esp, dma_addr, dma_len); | ||
1705 | esp->data_dma_len = dma_len; | ||
1706 | |||
1707 | if (!dma_len) { | ||
1708 | printk(KERN_ERR PFX "esp%d: DMA length is zero!\n", | ||
1709 | esp->host->unique_id); | ||
1710 | printk(KERN_ERR PFX "esp%d: cur adr[%08llx] len[%08x]\n", | ||
1711 | esp->host->unique_id, | ||
1712 | (unsigned long long)esp_cur_dma_addr(ent, cmd), | ||
1713 | esp_cur_dma_len(ent, cmd)); | ||
1714 | esp_schedule_reset(esp); | ||
1715 | return 0; | ||
1716 | } | ||
1717 | |||
1718 | esp_log_datastart("ESP: start data addr[%08llx] len[%u] " | ||
1719 | "write(%d)\n", | ||
1720 | (unsigned long long)dma_addr, dma_len, write); | ||
1721 | |||
1722 | esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len, | ||
1723 | write, ESP_CMD_DMA | ESP_CMD_TI); | ||
1724 | esp_event(esp, ESP_EVENT_DATA_DONE); | ||
1725 | break; | ||
1726 | } | ||
1727 | case ESP_EVENT_DATA_DONE: { | ||
1728 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1729 | struct scsi_cmnd *cmd = ent->cmd; | ||
1730 | int bytes_sent; | ||
1731 | |||
1732 | if (esp->ops->dma_error(esp)) { | ||
1733 | printk("ESP: data done, DMA error, resetting\n"); | ||
1734 | esp_schedule_reset(esp); | ||
1735 | return 0; | ||
1736 | } | ||
1737 | |||
1738 | if (ent->flags & ESP_CMD_FLAG_WRITE) { | ||
1739 | /* XXX parity errors, etc. XXX */ | ||
1740 | |||
1741 | esp->ops->dma_drain(esp); | ||
1742 | } | ||
1743 | esp->ops->dma_invalidate(esp); | ||
1744 | |||
1745 | if (esp->ireg != ESP_INTR_BSERV) { | ||
1746 | /* We should always see exactly a bus-service | ||
1747 | * interrupt at the end of a successful transfer. | ||
1748 | */ | ||
1749 | printk("ESP: data done, not BSERV, resetting\n"); | ||
1750 | esp_schedule_reset(esp); | ||
1751 | return 0; | ||
1752 | } | ||
1753 | |||
1754 | bytes_sent = esp_data_bytes_sent(esp, ent, cmd); | ||
1755 | |||
1756 | esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n", | ||
1757 | ent->flags, bytes_sent); | ||
1758 | |||
1759 | if (bytes_sent < 0) { | ||
1760 | /* XXX force sync mode for this target XXX */ | ||
1761 | esp_schedule_reset(esp); | ||
1762 | return 0; | ||
1763 | } | ||
1764 | |||
1765 | esp_advance_dma(esp, ent, cmd, bytes_sent); | ||
1766 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1767 | goto again; | ||
1768 | break; | ||
1769 | } | ||
1770 | |||
1771 | case ESP_EVENT_STATUS: { | ||
1772 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1773 | |||
1774 | if (esp->ireg & ESP_INTR_FDONE) { | ||
1775 | ent->status = esp_read8(ESP_FDATA); | ||
1776 | ent->message = esp_read8(ESP_FDATA); | ||
1777 | scsi_esp_cmd(esp, ESP_CMD_MOK); | ||
1778 | } else if (esp->ireg == ESP_INTR_BSERV) { | ||
1779 | ent->status = esp_read8(ESP_FDATA); | ||
1780 | ent->message = 0xff; | ||
1781 | esp_event(esp, ESP_EVENT_MSGIN); | ||
1782 | return 0; | ||
1783 | } | ||
1784 | |||
1785 | if (ent->message != COMMAND_COMPLETE) { | ||
1786 | printk("ESP: Unexpected message %x in status\n", | ||
1787 | ent->message); | ||
1788 | esp_schedule_reset(esp); | ||
1789 | return 0; | ||
1790 | } | ||
1791 | |||
1792 | esp_event(esp, ESP_EVENT_FREE_BUS); | ||
1793 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1794 | break; | ||
1795 | } | ||
1796 | case ESP_EVENT_FREE_BUS: { | ||
1797 | struct esp_cmd_entry *ent = esp->active_cmd; | ||
1798 | struct scsi_cmnd *cmd = ent->cmd; | ||
1799 | |||
1800 | if (ent->message == COMMAND_COMPLETE || | ||
1801 | ent->message == DISCONNECT) | ||
1802 | scsi_esp_cmd(esp, ESP_CMD_ESEL); | ||
1803 | |||
1804 | if (ent->message == COMMAND_COMPLETE) { | ||
1805 | esp_log_cmddone("ESP: Command done status[%x] " | ||
1806 | "message[%x]\n", | ||
1807 | ent->status, ent->message); | ||
1808 | if (ent->status == SAM_STAT_TASK_SET_FULL) | ||
1809 | esp_event_queue_full(esp, ent); | ||
1810 | |||
1811 | if (ent->status == SAM_STAT_CHECK_CONDITION && | ||
1812 | !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) { | ||
1813 | ent->flags |= ESP_CMD_FLAG_AUTOSENSE; | ||
1814 | esp_autosense(esp, ent); | ||
1815 | } else { | ||
1816 | esp_cmd_is_done(esp, ent, cmd, | ||
1817 | compose_result(ent->status, | ||
1818 | ent->message, | ||
1819 | DID_OK)); | ||
1820 | } | ||
1821 | } else if (ent->message == DISCONNECT) { | ||
1822 | esp_log_disconnect("ESP: Disconnecting tgt[%d] " | ||
1823 | "tag[%x:%x]\n", | ||
1824 | cmd->device->id, | ||
1825 | ent->tag[0], ent->tag[1]); | ||
1826 | |||
1827 | esp->active_cmd = NULL; | ||
1828 | esp_maybe_execute_command(esp); | ||
1829 | } else { | ||
1830 | printk("ESP: Unexpected message %x in freebus\n", | ||
1831 | ent->message); | ||
1832 | esp_schedule_reset(esp); | ||
1833 | return 0; | ||
1834 | } | ||
1835 | if (esp->active_cmd) | ||
1836 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1837 | break; | ||
1838 | } | ||
1839 | case ESP_EVENT_MSGOUT: { | ||
1840 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1841 | |||
1842 | if (esp_debug & ESP_DEBUG_MSGOUT) { | ||
1843 | int i; | ||
1844 | printk("ESP: Sending message [ "); | ||
1845 | for (i = 0; i < esp->msg_out_len; i++) | ||
1846 | printk("%02x ", esp->msg_out[i]); | ||
1847 | printk("]\n"); | ||
1848 | } | ||
1849 | |||
1850 | if (esp->rev == FASHME) { | ||
1851 | int i; | ||
1852 | |||
1853 | /* Always use the fifo. */ | ||
1854 | for (i = 0; i < esp->msg_out_len; i++) { | ||
1855 | esp_write8(esp->msg_out[i], ESP_FDATA); | ||
1856 | esp_write8(0, ESP_FDATA); | ||
1857 | } | ||
1858 | scsi_esp_cmd(esp, ESP_CMD_TI); | ||
1859 | } else { | ||
1860 | if (esp->msg_out_len == 1) { | ||
1861 | esp_write8(esp->msg_out[0], ESP_FDATA); | ||
1862 | scsi_esp_cmd(esp, ESP_CMD_TI); | ||
1863 | } else { | ||
1864 | /* Use DMA. */ | ||
1865 | memcpy(esp->command_block, | ||
1866 | esp->msg_out, | ||
1867 | esp->msg_out_len); | ||
1868 | |||
1869 | esp->ops->send_dma_cmd(esp, | ||
1870 | esp->command_block_dma, | ||
1871 | esp->msg_out_len, | ||
1872 | esp->msg_out_len, | ||
1873 | 0, | ||
1874 | ESP_CMD_DMA|ESP_CMD_TI); | ||
1875 | } | ||
1876 | } | ||
1877 | esp_event(esp, ESP_EVENT_MSGOUT_DONE); | ||
1878 | break; | ||
1879 | } | ||
1880 | case ESP_EVENT_MSGOUT_DONE: | ||
1881 | if (esp->rev == FASHME) { | ||
1882 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1883 | } else { | ||
1884 | if (esp->msg_out_len > 1) | ||
1885 | esp->ops->dma_invalidate(esp); | ||
1886 | } | ||
1887 | |||
1888 | if (!(esp->ireg & ESP_INTR_DC)) { | ||
1889 | if (esp->rev != FASHME) | ||
1890 | scsi_esp_cmd(esp, ESP_CMD_NULL); | ||
1891 | } | ||
1892 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1893 | goto again; | ||
1894 | case ESP_EVENT_MSGIN: | ||
1895 | if (esp->ireg & ESP_INTR_BSERV) { | ||
1896 | if (esp->rev == FASHME) { | ||
1897 | if (!(esp_read8(ESP_STATUS2) & | ||
1898 | ESP_STAT2_FEMPTY)) | ||
1899 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1900 | } else { | ||
1901 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1902 | if (esp->rev == ESP100) | ||
1903 | scsi_esp_cmd(esp, ESP_CMD_NULL); | ||
1904 | } | ||
1905 | scsi_esp_cmd(esp, ESP_CMD_TI); | ||
1906 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1907 | return 1; | ||
1908 | } | ||
1909 | if (esp->ireg & ESP_INTR_FDONE) { | ||
1910 | u8 val; | ||
1911 | |||
1912 | if (esp->rev == FASHME) | ||
1913 | val = esp->fifo[0]; | ||
1914 | else | ||
1915 | val = esp_read8(ESP_FDATA); | ||
1916 | esp->msg_in[esp->msg_in_len++] = val; | ||
1917 | |||
1918 | esp_log_msgin("ESP: Got msgin byte %x\n", val); | ||
1919 | |||
1920 | if (!esp_msgin_process(esp)) | ||
1921 | esp->msg_in_len = 0; | ||
1922 | |||
1923 | if (esp->rev == FASHME) | ||
1924 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1925 | |||
1926 | scsi_esp_cmd(esp, ESP_CMD_MOK); | ||
1927 | |||
1928 | if (esp->event != ESP_EVENT_FREE_BUS) | ||
1929 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1930 | } else { | ||
1931 | printk("ESP: MSGIN neither BSERV not FDON, resetting"); | ||
1932 | esp_schedule_reset(esp); | ||
1933 | return 0; | ||
1934 | } | ||
1935 | break; | ||
1936 | case ESP_EVENT_CMD_START: | ||
1937 | memcpy(esp->command_block, esp->cmd_bytes_ptr, | ||
1938 | esp->cmd_bytes_left); | ||
1939 | if (esp->rev == FASHME) | ||
1940 | scsi_esp_cmd(esp, ESP_CMD_FLUSH); | ||
1941 | esp->ops->send_dma_cmd(esp, esp->command_block_dma, | ||
1942 | esp->cmd_bytes_left, 16, 0, | ||
1943 | ESP_CMD_DMA | ESP_CMD_TI); | ||
1944 | esp_event(esp, ESP_EVENT_CMD_DONE); | ||
1945 | esp->flags |= ESP_FLAG_QUICKIRQ_CHECK; | ||
1946 | break; | ||
1947 | case ESP_EVENT_CMD_DONE: | ||
1948 | esp->ops->dma_invalidate(esp); | ||
1949 | if (esp->ireg & ESP_INTR_BSERV) { | ||
1950 | esp_event(esp, ESP_EVENT_CHECK_PHASE); | ||
1951 | goto again; | ||
1952 | } | ||
1953 | esp_schedule_reset(esp); | ||
1954 | return 0; | ||
1955 | break; | ||
1956 | |||
1957 | case ESP_EVENT_RESET: | ||
1958 | scsi_esp_cmd(esp, ESP_CMD_RS); | ||
1959 | break; | ||
1960 | |||
1961 | default: | ||
1962 | printk("ESP: Unexpected event %x, resetting\n", | ||
1963 | esp->event); | ||
1964 | esp_schedule_reset(esp); | ||
1965 | return 0; | ||
1966 | break; | ||
1967 | } | ||
1968 | return 1; | ||
1969 | } | ||
1970 | |||
1971 | static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent) | ||
1972 | { | ||
1973 | struct scsi_cmnd *cmd = ent->cmd; | ||
1974 | |||
1975 | esp_unmap_dma(esp, cmd); | ||
1976 | esp_free_lun_tag(ent, cmd->device->hostdata); | ||
1977 | cmd->result = DID_RESET << 16; | ||
1978 | |||
1979 | if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) { | ||
1980 | esp->ops->unmap_single(esp, ent->sense_dma, | ||
1981 | SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE); | ||
1982 | ent->sense_ptr = NULL; | ||
1983 | } | ||
1984 | |||
1985 | cmd->scsi_done(cmd); | ||
1986 | list_del(&ent->list); | ||
1987 | esp_put_ent(esp, ent); | ||
1988 | } | ||
1989 | |||
1990 | static void esp_clear_hold(struct scsi_device *dev, void *data) | ||
1991 | { | ||
1992 | struct esp_lun_data *lp = dev->hostdata; | ||
1993 | |||
1994 | BUG_ON(lp->num_tagged); | ||
1995 | lp->hold = 0; | ||
1996 | } | ||
1997 | |||
1998 | static void esp_reset_cleanup(struct esp *esp) | ||
1999 | { | ||
2000 | struct esp_cmd_entry *ent, *tmp; | ||
2001 | int i; | ||
2002 | |||
2003 | list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) { | ||
2004 | struct scsi_cmnd *cmd = ent->cmd; | ||
2005 | |||
2006 | list_del(&ent->list); | ||
2007 | cmd->result = DID_RESET << 16; | ||
2008 | cmd->scsi_done(cmd); | ||
2009 | esp_put_ent(esp, ent); | ||
2010 | } | ||
2011 | |||
2012 | list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) { | ||
2013 | if (ent == esp->active_cmd) | ||
2014 | esp->active_cmd = NULL; | ||
2015 | esp_reset_cleanup_one(esp, ent); | ||
2016 | } | ||
2017 | |||
2018 | BUG_ON(esp->active_cmd != NULL); | ||
2019 | |||
2020 | /* Force renegotiation of sync/wide transfers. */ | ||
2021 | for (i = 0; i < ESP_MAX_TARGET; i++) { | ||
2022 | struct esp_target_data *tp = &esp->target[i]; | ||
2023 | |||
2024 | tp->esp_period = 0; | ||
2025 | tp->esp_offset = 0; | ||
2026 | tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE | | ||
2027 | ESP_CONFIG3_FSCSI | | ||
2028 | ESP_CONFIG3_FAST); | ||
2029 | tp->flags &= ~ESP_TGT_WIDE; | ||
2030 | tp->flags |= ESP_TGT_CHECK_NEGO; | ||
2031 | |||
2032 | if (tp->starget) | ||
2033 | starget_for_each_device(tp->starget, NULL, | ||
2034 | esp_clear_hold); | ||
2035 | } | ||
2036 | } | ||
2037 | |||
2038 | /* Runs under host->lock */ | ||
2039 | static void __esp_interrupt(struct esp *esp) | ||
2040 | { | ||
2041 | int finish_reset, intr_done; | ||
2042 | u8 phase; | ||
2043 | |||
2044 | esp->sreg = esp_read8(ESP_STATUS); | ||
2045 | |||
2046 | if (esp->flags & ESP_FLAG_RESETTING) { | ||
2047 | finish_reset = 1; | ||
2048 | } else { | ||
2049 | if (esp_check_gross_error(esp)) | ||
2050 | return; | ||
2051 | |||
2052 | finish_reset = esp_check_spur_intr(esp); | ||
2053 | if (finish_reset < 0) | ||
2054 | return; | ||
2055 | } | ||
2056 | |||
2057 | esp->ireg = esp_read8(ESP_INTRPT); | ||
2058 | |||
2059 | if (esp->ireg & ESP_INTR_SR) | ||
2060 | finish_reset = 1; | ||
2061 | |||
2062 | if (finish_reset) { | ||
2063 | esp_reset_cleanup(esp); | ||
2064 | if (esp->eh_reset) { | ||
2065 | complete(esp->eh_reset); | ||
2066 | esp->eh_reset = NULL; | ||
2067 | } | ||
2068 | return; | ||
2069 | } | ||
2070 | |||
2071 | phase = (esp->sreg & ESP_STAT_PMASK); | ||
2072 | if (esp->rev == FASHME) { | ||
2073 | if (((phase != ESP_DIP && phase != ESP_DOP) && | ||
2074 | esp->select_state == ESP_SELECT_NONE && | ||
2075 | esp->event != ESP_EVENT_STATUS && | ||
2076 | esp->event != ESP_EVENT_DATA_DONE) || | ||
2077 | (esp->ireg & ESP_INTR_RSEL)) { | ||
2078 | esp->sreg2 = esp_read8(ESP_STATUS2); | ||
2079 | if (!(esp->sreg2 & ESP_STAT2_FEMPTY) || | ||
2080 | (esp->sreg2 & ESP_STAT2_F1BYTE)) | ||
2081 | hme_read_fifo(esp); | ||
2082 | } | ||
2083 | } | ||
2084 | |||
2085 | esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] " | ||
2086 | "sreg2[%02x] ireg[%02x]\n", | ||
2087 | esp->sreg, esp->seqreg, esp->sreg2, esp->ireg); | ||
2088 | |||
2089 | intr_done = 0; | ||
2090 | |||
2091 | if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) { | ||
2092 | printk("ESP: unexpected IREG %02x\n", esp->ireg); | ||
2093 | if (esp->ireg & ESP_INTR_IC) | ||
2094 | esp_dump_cmd_log(esp); | ||
2095 | |||
2096 | esp_schedule_reset(esp); | ||
2097 | } else { | ||
2098 | if (!(esp->ireg & ESP_INTR_RSEL)) { | ||
2099 | /* Some combination of FDONE, BSERV, DC. */ | ||
2100 | if (esp->select_state != ESP_SELECT_NONE) | ||
2101 | intr_done = esp_finish_select(esp); | ||
2102 | } else if (esp->ireg & ESP_INTR_RSEL) { | ||
2103 | if (esp->active_cmd) | ||
2104 | (void) esp_finish_select(esp); | ||
2105 | intr_done = esp_reconnect(esp); | ||
2106 | } | ||
2107 | } | ||
2108 | while (!intr_done) | ||
2109 | intr_done = esp_process_event(esp); | ||
2110 | } | ||
2111 | |||
2112 | irqreturn_t scsi_esp_intr(int irq, void *dev_id) | ||
2113 | { | ||
2114 | struct esp *esp = dev_id; | ||
2115 | unsigned long flags; | ||
2116 | irqreturn_t ret; | ||
2117 | |||
2118 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2119 | ret = IRQ_NONE; | ||
2120 | if (esp->ops->irq_pending(esp)) { | ||
2121 | ret = IRQ_HANDLED; | ||
2122 | for (;;) { | ||
2123 | int i; | ||
2124 | |||
2125 | __esp_interrupt(esp); | ||
2126 | if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK)) | ||
2127 | break; | ||
2128 | esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK; | ||
2129 | |||
2130 | for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) { | ||
2131 | if (esp->ops->irq_pending(esp)) | ||
2132 | break; | ||
2133 | } | ||
2134 | if (i == ESP_QUICKIRQ_LIMIT) | ||
2135 | break; | ||
2136 | } | ||
2137 | } | ||
2138 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2139 | |||
2140 | return ret; | ||
2141 | } | ||
2142 | EXPORT_SYMBOL(scsi_esp_intr); | ||
2143 | |||
2144 | static void __devinit esp_get_revision(struct esp *esp) | ||
2145 | { | ||
2146 | u8 val; | ||
2147 | |||
2148 | esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7)); | ||
2149 | esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY); | ||
2150 | esp_write8(esp->config2, ESP_CFG2); | ||
2151 | |||
2152 | val = esp_read8(ESP_CFG2); | ||
2153 | val &= ~ESP_CONFIG2_MAGIC; | ||
2154 | if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) { | ||
2155 | /* If what we write to cfg2 does not come back, cfg2 is not | ||
2156 | * implemented, therefore this must be a plain esp100. | ||
2157 | */ | ||
2158 | esp->rev = ESP100; | ||
2159 | } else { | ||
2160 | esp->config2 = 0; | ||
2161 | esp_set_all_config3(esp, 5); | ||
2162 | esp->prev_cfg3 = 5; | ||
2163 | esp_write8(esp->config2, ESP_CFG2); | ||
2164 | esp_write8(0, ESP_CFG3); | ||
2165 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
2166 | |||
2167 | val = esp_read8(ESP_CFG3); | ||
2168 | if (val != 5) { | ||
2169 | /* The cfg2 register is implemented, however | ||
2170 | * cfg3 is not, must be esp100a. | ||
2171 | */ | ||
2172 | esp->rev = ESP100A; | ||
2173 | } else { | ||
2174 | esp_set_all_config3(esp, 0); | ||
2175 | esp->prev_cfg3 = 0; | ||
2176 | esp_write8(esp->prev_cfg3, ESP_CFG3); | ||
2177 | |||
2178 | /* All of cfg{1,2,3} implemented, must be one of | ||
2179 | * the fas variants, figure out which one. | ||
2180 | */ | ||
2181 | if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) { | ||
2182 | esp->rev = FAST; | ||
2183 | esp->sync_defp = SYNC_DEFP_FAST; | ||
2184 | } else { | ||
2185 | esp->rev = ESP236; | ||
2186 | } | ||
2187 | esp->config2 = 0; | ||
2188 | esp_write8(esp->config2, ESP_CFG2); | ||
2189 | } | ||
2190 | } | ||
2191 | } | ||
2192 | |||
2193 | static void __devinit esp_init_swstate(struct esp *esp) | ||
2194 | { | ||
2195 | int i; | ||
2196 | |||
2197 | INIT_LIST_HEAD(&esp->queued_cmds); | ||
2198 | INIT_LIST_HEAD(&esp->active_cmds); | ||
2199 | INIT_LIST_HEAD(&esp->esp_cmd_pool); | ||
2200 | |||
2201 | /* Start with a clear state, domain validation (via ->slave_configure, | ||
2202 | * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged | ||
2203 | * commands. | ||
2204 | */ | ||
2205 | for (i = 0 ; i < ESP_MAX_TARGET; i++) { | ||
2206 | esp->target[i].flags = 0; | ||
2207 | esp->target[i].nego_goal_period = 0; | ||
2208 | esp->target[i].nego_goal_offset = 0; | ||
2209 | esp->target[i].nego_goal_width = 0; | ||
2210 | esp->target[i].nego_goal_tags = 0; | ||
2211 | } | ||
2212 | } | ||
2213 | |||
2214 | /* This places the ESP into a known state at boot time. */ | ||
2215 | static void __devinit esp_bootup_reset(struct esp *esp) | ||
2216 | { | ||
2217 | u8 val; | ||
2218 | |||
2219 | /* Reset the DMA */ | ||
2220 | esp->ops->reset_dma(esp); | ||
2221 | |||
2222 | /* Reset the ESP */ | ||
2223 | esp_reset_esp(esp); | ||
2224 | |||
2225 | /* Reset the SCSI bus, but tell ESP not to generate an irq */ | ||
2226 | val = esp_read8(ESP_CFG1); | ||
2227 | val |= ESP_CONFIG1_SRRDISAB; | ||
2228 | esp_write8(val, ESP_CFG1); | ||
2229 | |||
2230 | scsi_esp_cmd(esp, ESP_CMD_RS); | ||
2231 | udelay(400); | ||
2232 | |||
2233 | esp_write8(esp->config1, ESP_CFG1); | ||
2234 | |||
2235 | /* Eat any bitrot in the chip and we are done... */ | ||
2236 | esp_read8(ESP_INTRPT); | ||
2237 | } | ||
2238 | |||
2239 | static void __devinit esp_set_clock_params(struct esp *esp) | ||
2240 | { | ||
2241 | int fmhz; | ||
2242 | u8 ccf; | ||
2243 | |||
2244 | /* This is getting messy but it has to be done correctly or else | ||
2245 | * you get weird behavior all over the place. We are trying to | ||
2246 | * basically figure out three pieces of information. | ||
2247 | * | ||
2248 | * a) Clock Conversion Factor | ||
2249 | * | ||
2250 | * This is a representation of the input crystal clock frequency | ||
2251 | * going into the ESP on this machine. Any operation whose timing | ||
2252 | * is longer than 400ns depends on this value being correct. For | ||
2253 | * example, you'll get blips for arbitration/selection during high | ||
2254 | * load or with multiple targets if this is not set correctly. | ||
2255 | * | ||
2256 | * b) Selection Time-Out | ||
2257 | * | ||
2258 | * The ESP isn't very bright and will arbitrate for the bus and try | ||
2259 | * to select a target forever if you let it. This value tells the | ||
2260 | * ESP when it has taken too long to negotiate and that it should | ||
2261 | * interrupt the CPU so we can see what happened. The value is | ||
2262 | * computed as follows (from NCR/Symbios chip docs). | ||
2263 | * | ||
2264 | * (Time Out Period) * (Input Clock) | ||
2265 | * STO = ---------------------------------- | ||
2266 | * (8192) * (Clock Conversion Factor) | ||
2267 | * | ||
2268 | * We use a time out period of 250ms (ESP_BUS_TIMEOUT). | ||
2269 | * | ||
2270 | * c) Imperical constants for synchronous offset and transfer period | ||
2271 | * register values | ||
2272 | * | ||
2273 | * This entails the smallest and largest sync period we could ever | ||
2274 | * handle on this ESP. | ||
2275 | */ | ||
2276 | fmhz = esp->cfreq; | ||
2277 | |||
2278 | ccf = ((fmhz / 1000000) + 4) / 5; | ||
2279 | if (ccf == 1) | ||
2280 | ccf = 2; | ||
2281 | |||
2282 | /* If we can't find anything reasonable, just assume 20MHZ. | ||
2283 | * This is the clock frequency of the older sun4c's where I've | ||
2284 | * been unable to find the clock-frequency PROM property. All | ||
2285 | * other machines provide useful values it seems. | ||
2286 | */ | ||
2287 | if (fmhz <= 5000000 || ccf < 1 || ccf > 8) { | ||
2288 | fmhz = 20000000; | ||
2289 | ccf = 4; | ||
2290 | } | ||
2291 | |||
2292 | esp->cfact = (ccf == 8 ? 0 : ccf); | ||
2293 | esp->cfreq = fmhz; | ||
2294 | esp->ccycle = ESP_MHZ_TO_CYCLE(fmhz); | ||
2295 | esp->ctick = ESP_TICK(ccf, esp->ccycle); | ||
2296 | esp->neg_defp = ESP_NEG_DEFP(fmhz, ccf); | ||
2297 | esp->sync_defp = SYNC_DEFP_SLOW; | ||
2298 | } | ||
2299 | |||
2300 | static const char *esp_chip_names[] = { | ||
2301 | "ESP100", | ||
2302 | "ESP100A", | ||
2303 | "ESP236", | ||
2304 | "FAS236", | ||
2305 | "FAS100A", | ||
2306 | "FAST", | ||
2307 | "FASHME", | ||
2308 | }; | ||
2309 | |||
2310 | static struct scsi_transport_template *esp_transport_template; | ||
2311 | |||
2312 | int __devinit scsi_esp_register(struct esp *esp, struct device *dev) | ||
2313 | { | ||
2314 | static int instance; | ||
2315 | int err; | ||
2316 | |||
2317 | esp->host->transportt = esp_transport_template; | ||
2318 | esp->host->max_lun = ESP_MAX_LUN; | ||
2319 | esp->host->cmd_per_lun = 2; | ||
2320 | |||
2321 | esp_set_clock_params(esp); | ||
2322 | |||
2323 | esp_get_revision(esp); | ||
2324 | |||
2325 | esp_init_swstate(esp); | ||
2326 | |||
2327 | esp_bootup_reset(esp); | ||
2328 | |||
2329 | printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n", | ||
2330 | esp->host->unique_id, esp->regs, esp->dma_regs, | ||
2331 | esp->host->irq); | ||
2332 | printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n", | ||
2333 | esp->host->unique_id, esp_chip_names[esp->rev], | ||
2334 | esp->cfreq / 1000000, esp->cfact, esp->scsi_id); | ||
2335 | |||
2336 | /* Let the SCSI bus reset settle. */ | ||
2337 | ssleep(esp_bus_reset_settle); | ||
2338 | |||
2339 | err = scsi_add_host(esp->host, dev); | ||
2340 | if (err) | ||
2341 | return err; | ||
2342 | |||
2343 | esp->host->unique_id = instance++; | ||
2344 | |||
2345 | scsi_scan_host(esp->host); | ||
2346 | |||
2347 | return 0; | ||
2348 | } | ||
2349 | EXPORT_SYMBOL(scsi_esp_register); | ||
2350 | |||
2351 | void __devexit scsi_esp_unregister(struct esp *esp) | ||
2352 | { | ||
2353 | scsi_remove_host(esp->host); | ||
2354 | } | ||
2355 | EXPORT_SYMBOL(scsi_esp_unregister); | ||
2356 | |||
2357 | static int esp_slave_alloc(struct scsi_device *dev) | ||
2358 | { | ||
2359 | struct esp *esp = host_to_esp(dev->host); | ||
2360 | struct esp_target_data *tp = &esp->target[dev->id]; | ||
2361 | struct esp_lun_data *lp; | ||
2362 | |||
2363 | lp = kzalloc(sizeof(*lp), GFP_KERNEL); | ||
2364 | if (!lp) | ||
2365 | return -ENOMEM; | ||
2366 | dev->hostdata = lp; | ||
2367 | |||
2368 | tp->starget = dev->sdev_target; | ||
2369 | |||
2370 | spi_min_period(tp->starget) = esp->min_period; | ||
2371 | spi_max_offset(tp->starget) = 15; | ||
2372 | |||
2373 | if (esp->flags & ESP_FLAG_WIDE_CAPABLE) | ||
2374 | spi_max_width(tp->starget) = 1; | ||
2375 | else | ||
2376 | spi_max_width(tp->starget) = 0; | ||
2377 | |||
2378 | return 0; | ||
2379 | } | ||
2380 | |||
2381 | static int esp_slave_configure(struct scsi_device *dev) | ||
2382 | { | ||
2383 | struct esp *esp = host_to_esp(dev->host); | ||
2384 | struct esp_target_data *tp = &esp->target[dev->id]; | ||
2385 | int goal_tags, queue_depth; | ||
2386 | |||
2387 | goal_tags = 0; | ||
2388 | |||
2389 | if (dev->tagged_supported) { | ||
2390 | /* XXX make this configurable somehow XXX */ | ||
2391 | goal_tags = ESP_DEFAULT_TAGS; | ||
2392 | |||
2393 | if (goal_tags > ESP_MAX_TAG) | ||
2394 | goal_tags = ESP_MAX_TAG; | ||
2395 | } | ||
2396 | |||
2397 | queue_depth = goal_tags; | ||
2398 | if (queue_depth < dev->host->cmd_per_lun) | ||
2399 | queue_depth = dev->host->cmd_per_lun; | ||
2400 | |||
2401 | if (goal_tags) { | ||
2402 | scsi_set_tag_type(dev, MSG_ORDERED_TAG); | ||
2403 | scsi_activate_tcq(dev, queue_depth); | ||
2404 | } else { | ||
2405 | scsi_deactivate_tcq(dev, queue_depth); | ||
2406 | } | ||
2407 | tp->flags |= ESP_TGT_DISCONNECT; | ||
2408 | |||
2409 | if (!spi_initial_dv(dev->sdev_target)) | ||
2410 | spi_dv_device(dev); | ||
2411 | |||
2412 | return 0; | ||
2413 | } | ||
2414 | |||
2415 | static void esp_slave_destroy(struct scsi_device *dev) | ||
2416 | { | ||
2417 | struct esp_lun_data *lp = dev->hostdata; | ||
2418 | |||
2419 | kfree(lp); | ||
2420 | dev->hostdata = NULL; | ||
2421 | } | ||
2422 | |||
2423 | static int esp_eh_abort_handler(struct scsi_cmnd *cmd) | ||
2424 | { | ||
2425 | struct esp *esp = host_to_esp(cmd->device->host); | ||
2426 | struct esp_cmd_entry *ent, *tmp; | ||
2427 | struct completion eh_done; | ||
2428 | unsigned long flags; | ||
2429 | |||
2430 | /* XXX This helps a lot with debugging but might be a bit | ||
2431 | * XXX much for the final driver. | ||
2432 | */ | ||
2433 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2434 | printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n", | ||
2435 | esp->host->unique_id, cmd, cmd->cmnd[0]); | ||
2436 | ent = esp->active_cmd; | ||
2437 | if (ent) | ||
2438 | printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n", | ||
2439 | esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); | ||
2440 | list_for_each_entry(ent, &esp->queued_cmds, list) { | ||
2441 | printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n", | ||
2442 | esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); | ||
2443 | } | ||
2444 | list_for_each_entry(ent, &esp->active_cmds, list) { | ||
2445 | printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n", | ||
2446 | esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]); | ||
2447 | } | ||
2448 | esp_dump_cmd_log(esp); | ||
2449 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2450 | |||
2451 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2452 | |||
2453 | ent = NULL; | ||
2454 | list_for_each_entry(tmp, &esp->queued_cmds, list) { | ||
2455 | if (tmp->cmd == cmd) { | ||
2456 | ent = tmp; | ||
2457 | break; | ||
2458 | } | ||
2459 | } | ||
2460 | |||
2461 | if (ent) { | ||
2462 | /* Easiest case, we didn't even issue the command | ||
2463 | * yet so it is trivial to abort. | ||
2464 | */ | ||
2465 | list_del(&ent->list); | ||
2466 | |||
2467 | cmd->result = DID_ABORT << 16; | ||
2468 | cmd->scsi_done(cmd); | ||
2469 | |||
2470 | esp_put_ent(esp, ent); | ||
2471 | |||
2472 | goto out_success; | ||
2473 | } | ||
2474 | |||
2475 | init_completion(&eh_done); | ||
2476 | |||
2477 | ent = esp->active_cmd; | ||
2478 | if (ent && ent->cmd == cmd) { | ||
2479 | /* Command is the currently active command on | ||
2480 | * the bus. If we already have an output message | ||
2481 | * pending, no dice. | ||
2482 | */ | ||
2483 | if (esp->msg_out_len) | ||
2484 | goto out_failure; | ||
2485 | |||
2486 | /* Send out an abort, encouraging the target to | ||
2487 | * go to MSGOUT phase by asserting ATN. | ||
2488 | */ | ||
2489 | esp->msg_out[0] = ABORT_TASK_SET; | ||
2490 | esp->msg_out_len = 1; | ||
2491 | ent->eh_done = &eh_done; | ||
2492 | |||
2493 | scsi_esp_cmd(esp, ESP_CMD_SATN); | ||
2494 | } else { | ||
2495 | /* The command is disconnected. This is not easy to | ||
2496 | * abort. For now we fail and let the scsi error | ||
2497 | * handling layer go try a scsi bus reset or host | ||
2498 | * reset. | ||
2499 | * | ||
2500 | * What we could do is put together a scsi command | ||
2501 | * solely for the purpose of sending an abort message | ||
2502 | * to the target. Coming up with all the code to | ||
2503 | * cook up scsi commands, special case them everywhere, | ||
2504 | * etc. is for questionable gain and it would be better | ||
2505 | * if the generic scsi error handling layer could do at | ||
2506 | * least some of that for us. | ||
2507 | * | ||
2508 | * Anyways this is an area for potential future improvement | ||
2509 | * in this driver. | ||
2510 | */ | ||
2511 | goto out_failure; | ||
2512 | } | ||
2513 | |||
2514 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2515 | |||
2516 | if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) { | ||
2517 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2518 | ent->eh_done = NULL; | ||
2519 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2520 | |||
2521 | return FAILED; | ||
2522 | } | ||
2523 | |||
2524 | return SUCCESS; | ||
2525 | |||
2526 | out_success: | ||
2527 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2528 | return SUCCESS; | ||
2529 | |||
2530 | out_failure: | ||
2531 | /* XXX This might be a good location to set ESP_TGT_BROKEN | ||
2532 | * XXX since we know which target/lun in particular is | ||
2533 | * XXX causing trouble. | ||
2534 | */ | ||
2535 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2536 | return FAILED; | ||
2537 | } | ||
2538 | |||
2539 | static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd) | ||
2540 | { | ||
2541 | struct esp *esp = host_to_esp(cmd->device->host); | ||
2542 | struct completion eh_reset; | ||
2543 | unsigned long flags; | ||
2544 | |||
2545 | init_completion(&eh_reset); | ||
2546 | |||
2547 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2548 | |||
2549 | esp->eh_reset = &eh_reset; | ||
2550 | |||
2551 | /* XXX This is too simple... We should add lots of | ||
2552 | * XXX checks here so that if we find that the chip is | ||
2553 | * XXX very wedged we return failure immediately so | ||
2554 | * XXX that we can perform a full chip reset. | ||
2555 | */ | ||
2556 | esp->flags |= ESP_FLAG_RESETTING; | ||
2557 | scsi_esp_cmd(esp, ESP_CMD_RS); | ||
2558 | |||
2559 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2560 | |||
2561 | ssleep(esp_bus_reset_settle); | ||
2562 | |||
2563 | if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) { | ||
2564 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2565 | esp->eh_reset = NULL; | ||
2566 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2567 | |||
2568 | return FAILED; | ||
2569 | } | ||
2570 | |||
2571 | return SUCCESS; | ||
2572 | } | ||
2573 | |||
2574 | /* All bets are off, reset the entire device. */ | ||
2575 | static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd) | ||
2576 | { | ||
2577 | struct esp *esp = host_to_esp(cmd->device->host); | ||
2578 | unsigned long flags; | ||
2579 | |||
2580 | spin_lock_irqsave(esp->host->host_lock, flags); | ||
2581 | esp_bootup_reset(esp); | ||
2582 | esp_reset_cleanup(esp); | ||
2583 | spin_unlock_irqrestore(esp->host->host_lock, flags); | ||
2584 | |||
2585 | ssleep(esp_bus_reset_settle); | ||
2586 | |||
2587 | return SUCCESS; | ||
2588 | } | ||
2589 | |||
2590 | static const char *esp_info(struct Scsi_Host *host) | ||
2591 | { | ||
2592 | return "esp"; | ||
2593 | } | ||
2594 | |||
2595 | struct scsi_host_template scsi_esp_template = { | ||
2596 | .module = THIS_MODULE, | ||
2597 | .name = "esp", | ||
2598 | .info = esp_info, | ||
2599 | .queuecommand = esp_queuecommand, | ||
2600 | .slave_alloc = esp_slave_alloc, | ||
2601 | .slave_configure = esp_slave_configure, | ||
2602 | .slave_destroy = esp_slave_destroy, | ||
2603 | .eh_abort_handler = esp_eh_abort_handler, | ||
2604 | .eh_bus_reset_handler = esp_eh_bus_reset_handler, | ||
2605 | .eh_host_reset_handler = esp_eh_host_reset_handler, | ||
2606 | .can_queue = 7, | ||
2607 | .this_id = 7, | ||
2608 | .sg_tablesize = SG_ALL, | ||
2609 | .use_clustering = ENABLE_CLUSTERING, | ||
2610 | .max_sectors = 0xffff, | ||
2611 | .skip_settle_delay = 1, | ||
2612 | }; | ||
2613 | EXPORT_SYMBOL(scsi_esp_template); | ||
2614 | |||
2615 | static void esp_get_signalling(struct Scsi_Host *host) | ||
2616 | { | ||
2617 | struct esp *esp = host_to_esp(host); | ||
2618 | enum spi_signal_type type; | ||
2619 | |||
2620 | if (esp->flags & ESP_FLAG_DIFFERENTIAL) | ||
2621 | type = SPI_SIGNAL_HVD; | ||
2622 | else | ||
2623 | type = SPI_SIGNAL_SE; | ||
2624 | |||
2625 | spi_signalling(host) = type; | ||
2626 | } | ||
2627 | |||
2628 | static void esp_set_offset(struct scsi_target *target, int offset) | ||
2629 | { | ||
2630 | struct Scsi_Host *host = dev_to_shost(target->dev.parent); | ||
2631 | struct esp *esp = host_to_esp(host); | ||
2632 | struct esp_target_data *tp = &esp->target[target->id]; | ||
2633 | |||
2634 | tp->nego_goal_offset = offset; | ||
2635 | tp->flags |= ESP_TGT_CHECK_NEGO; | ||
2636 | } | ||
2637 | |||
2638 | static void esp_set_period(struct scsi_target *target, int period) | ||
2639 | { | ||
2640 | struct Scsi_Host *host = dev_to_shost(target->dev.parent); | ||
2641 | struct esp *esp = host_to_esp(host); | ||
2642 | struct esp_target_data *tp = &esp->target[target->id]; | ||
2643 | |||
2644 | tp->nego_goal_period = period; | ||
2645 | tp->flags |= ESP_TGT_CHECK_NEGO; | ||
2646 | } | ||
2647 | |||
2648 | static void esp_set_width(struct scsi_target *target, int width) | ||
2649 | { | ||
2650 | struct Scsi_Host *host = dev_to_shost(target->dev.parent); | ||
2651 | struct esp *esp = host_to_esp(host); | ||
2652 | struct esp_target_data *tp = &esp->target[target->id]; | ||
2653 | |||
2654 | tp->nego_goal_width = (width ? 1 : 0); | ||
2655 | tp->flags |= ESP_TGT_CHECK_NEGO; | ||
2656 | } | ||
2657 | |||
2658 | static struct spi_function_template esp_transport_ops = { | ||
2659 | .set_offset = esp_set_offset, | ||
2660 | .show_offset = 1, | ||
2661 | .set_period = esp_set_period, | ||
2662 | .show_period = 1, | ||
2663 | .set_width = esp_set_width, | ||
2664 | .show_width = 1, | ||
2665 | .get_signalling = esp_get_signalling, | ||
2666 | }; | ||
2667 | |||
2668 | static int __init esp_init(void) | ||
2669 | { | ||
2670 | BUILD_BUG_ON(sizeof(struct scsi_pointer) < | ||
2671 | sizeof(struct esp_cmd_priv)); | ||
2672 | |||
2673 | esp_transport_template = spi_attach_transport(&esp_transport_ops); | ||
2674 | if (!esp_transport_template) | ||
2675 | return -ENODEV; | ||
2676 | |||
2677 | return 0; | ||
2678 | } | ||
2679 | |||
2680 | static void __exit esp_exit(void) | ||
2681 | { | ||
2682 | spi_release_transport(esp_transport_template); | ||
2683 | } | ||
2684 | |||
2685 | MODULE_DESCRIPTION("ESP SCSI driver core"); | ||
2686 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | ||
2687 | MODULE_LICENSE("GPL"); | ||
2688 | MODULE_VERSION(DRV_VERSION); | ||
2689 | |||
2690 | module_param(esp_bus_reset_settle, int, 0); | ||
2691 | MODULE_PARM_DESC(esp_bus_reset_settle, | ||
2692 | "ESP scsi bus reset delay in seconds"); | ||
2693 | |||
2694 | module_param(esp_debug, int, 0); | ||
2695 | MODULE_PARM_DESC(esp_debug, | ||
2696 | "ESP bitmapped debugging message enable value:\n" | ||
2697 | " 0x00000001 Log interrupt events\n" | ||
2698 | " 0x00000002 Log scsi commands\n" | ||
2699 | " 0x00000004 Log resets\n" | ||
2700 | " 0x00000008 Log message in events\n" | ||
2701 | " 0x00000010 Log message out events\n" | ||
2702 | " 0x00000020 Log command completion\n" | ||
2703 | " 0x00000040 Log disconnects\n" | ||
2704 | " 0x00000080 Log data start\n" | ||
2705 | " 0x00000100 Log data done\n" | ||
2706 | " 0x00000200 Log reconnects\n" | ||
2707 | " 0x00000400 Log auto-sense data\n" | ||
2708 | ); | ||
2709 | |||
2710 | module_init(esp_init); | ||
2711 | module_exit(esp_exit); | ||
diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h new file mode 100644 index 000000000000..8d4a6690401f --- /dev/null +++ b/drivers/scsi/esp_scsi.h | |||
@@ -0,0 +1,560 @@ | |||
1 | /* esp_scsi.h: Defines and structures for the ESP drier. | ||
2 | * | ||
3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #ifndef _ESP_SCSI_H | ||
7 | #define _ESP_SCSI_H | ||
8 | |||
9 | /* Access Description Offset */ | ||
10 | #define ESP_TCLOW 0x00UL /* rw Low bits transfer count 0x00 */ | ||
11 | #define ESP_TCMED 0x01UL /* rw Mid bits transfer count 0x04 */ | ||
12 | #define ESP_FDATA 0x02UL /* rw FIFO data bits 0x08 */ | ||
13 | #define ESP_CMD 0x03UL /* rw SCSI command bits 0x0c */ | ||
14 | #define ESP_STATUS 0x04UL /* ro ESP status register 0x10 */ | ||
15 | #define ESP_BUSID ESP_STATUS /* wo BusID for sel/resel 0x10 */ | ||
16 | #define ESP_INTRPT 0x05UL /* ro Kind of interrupt 0x14 */ | ||
17 | #define ESP_TIMEO ESP_INTRPT /* wo Timeout for sel/resel 0x14 */ | ||
18 | #define ESP_SSTEP 0x06UL /* ro Sequence step register 0x18 */ | ||
19 | #define ESP_STP ESP_SSTEP /* wo Transfer period/sync 0x18 */ | ||
20 | #define ESP_FFLAGS 0x07UL /* ro Bits current FIFO info 0x1c */ | ||
21 | #define ESP_SOFF ESP_FFLAGS /* wo Sync offset 0x1c */ | ||
22 | #define ESP_CFG1 0x08UL /* rw First cfg register 0x20 */ | ||
23 | #define ESP_CFACT 0x09UL /* wo Clock conv factor 0x24 */ | ||
24 | #define ESP_STATUS2 ESP_CFACT /* ro HME status2 register 0x24 */ | ||
25 | #define ESP_CTEST 0x0aUL /* wo Chip test register 0x28 */ | ||
26 | #define ESP_CFG2 0x0bUL /* rw Second cfg register 0x2c */ | ||
27 | #define ESP_CFG3 0x0cUL /* rw Third cfg register 0x30 */ | ||
28 | #define ESP_TCHI 0x0eUL /* rw High bits transf count 0x38 */ | ||
29 | #define ESP_UID ESP_TCHI /* ro Unique ID code 0x38 */ | ||
30 | #define FAS_RLO ESP_TCHI /* rw HME extended counter 0x38 */ | ||
31 | #define ESP_FGRND 0x0fUL /* rw Data base for fifo 0x3c */ | ||
32 | #define FAS_RHI ESP_FGRND /* rw HME extended counter 0x3c */ | ||
33 | |||
34 | #define SBUS_ESP_REG_SIZE 0x40UL | ||
35 | |||
36 | /* Bitfield meanings for the above registers. */ | ||
37 | |||
38 | /* ESP config reg 1, read-write, found on all ESP chips */ | ||
39 | #define ESP_CONFIG1_ID 0x07 /* My BUS ID bits */ | ||
40 | #define ESP_CONFIG1_CHTEST 0x08 /* Enable ESP chip tests */ | ||
41 | #define ESP_CONFIG1_PENABLE 0x10 /* Enable parity checks */ | ||
42 | #define ESP_CONFIG1_PARTEST 0x20 /* Parity test mode enabled? */ | ||
43 | #define ESP_CONFIG1_SRRDISAB 0x40 /* Disable SCSI reset reports */ | ||
44 | #define ESP_CONFIG1_SLCABLE 0x80 /* Enable slow cable mode */ | ||
45 | |||
46 | /* ESP config reg 2, read-write, found only on esp100a+esp200+esp236 chips */ | ||
47 | #define ESP_CONFIG2_DMAPARITY 0x01 /* enable DMA Parity (200,236) */ | ||
48 | #define ESP_CONFIG2_REGPARITY 0x02 /* enable reg Parity (200,236) */ | ||
49 | #define ESP_CONFIG2_BADPARITY 0x04 /* Bad parity target abort */ | ||
50 | #define ESP_CONFIG2_SCSI2ENAB 0x08 /* Enable SCSI-2 features (tgtmode) */ | ||
51 | #define ESP_CONFIG2_HI 0x10 /* High Impedance DREQ ??? */ | ||
52 | #define ESP_CONFIG2_HMEFENAB 0x10 /* HME features enable */ | ||
53 | #define ESP_CONFIG2_BCM 0x20 /* Enable byte-ctrl (236) */ | ||
54 | #define ESP_CONFIG2_DISPINT 0x20 /* Disable pause irq (hme) */ | ||
55 | #define ESP_CONFIG2_FENAB 0x40 /* Enable features (fas100,216) */ | ||
56 | #define ESP_CONFIG2_SPL 0x40 /* Enable status-phase latch (236) */ | ||
57 | #define ESP_CONFIG2_MKDONE 0x40 /* HME magic feature */ | ||
58 | #define ESP_CONFIG2_HME32 0x80 /* HME 32 extended */ | ||
59 | #define ESP_CONFIG2_MAGIC 0xe0 /* Invalid bits... */ | ||
60 | |||
61 | /* ESP config register 3 read-write, found only esp236+fas236+fas100a+hme chips */ | ||
62 | #define ESP_CONFIG3_FCLOCK 0x01 /* FAST SCSI clock rate (esp100a/hme) */ | ||
63 | #define ESP_CONFIG3_TEM 0x01 /* Enable thresh-8 mode (esp/fas236) */ | ||
64 | #define ESP_CONFIG3_FAST 0x02 /* Enable FAST SCSI (esp100a/hme) */ | ||
65 | #define ESP_CONFIG3_ADMA 0x02 /* Enable alternate-dma (esp/fas236) */ | ||
66 | #define ESP_CONFIG3_TENB 0x04 /* group2 SCSI2 support (esp100a/hme) */ | ||
67 | #define ESP_CONFIG3_SRB 0x04 /* Save residual byte (esp/fas236) */ | ||
68 | #define ESP_CONFIG3_TMS 0x08 /* Three-byte msg's ok (esp100a/hme) */ | ||
69 | #define ESP_CONFIG3_FCLK 0x08 /* Fast SCSI clock rate (esp/fas236) */ | ||
70 | #define ESP_CONFIG3_IDMSG 0x10 /* ID message checking (esp100a/hme) */ | ||
71 | #define ESP_CONFIG3_FSCSI 0x10 /* Enable FAST SCSI (esp/fas236) */ | ||
72 | #define ESP_CONFIG3_GTM 0x20 /* group2 SCSI2 support (esp/fas236) */ | ||
73 | #define ESP_CONFIG3_IDBIT3 0x20 /* Bit 3 of HME SCSI-ID (hme) */ | ||
74 | #define ESP_CONFIG3_TBMS 0x40 /* Three-byte msg's ok (esp/fas236) */ | ||
75 | #define ESP_CONFIG3_EWIDE 0x40 /* Enable Wide-SCSI (hme) */ | ||
76 | #define ESP_CONFIG3_IMS 0x80 /* ID msg chk'ng (esp/fas236) */ | ||
77 | #define ESP_CONFIG3_OBPUSH 0x80 /* Push odd-byte to dma (hme) */ | ||
78 | |||
79 | /* ESP command register read-write */ | ||
80 | /* Group 1 commands: These may be sent at any point in time to the ESP | ||
81 | * chip. None of them can generate interrupts 'cept | ||
82 | * the "SCSI bus reset" command if you have not disabled | ||
83 | * SCSI reset interrupts in the config1 ESP register. | ||
84 | */ | ||
85 | #define ESP_CMD_NULL 0x00 /* Null command, ie. a nop */ | ||
86 | #define ESP_CMD_FLUSH 0x01 /* FIFO Flush */ | ||
87 | #define ESP_CMD_RC 0x02 /* Chip reset */ | ||
88 | #define ESP_CMD_RS 0x03 /* SCSI bus reset */ | ||
89 | |||
90 | /* Group 2 commands: ESP must be an initiator and connected to a target | ||
91 | * for these commands to work. | ||
92 | */ | ||
93 | #define ESP_CMD_TI 0x10 /* Transfer Information */ | ||
94 | #define ESP_CMD_ICCSEQ 0x11 /* Initiator cmd complete sequence */ | ||
95 | #define ESP_CMD_MOK 0x12 /* Message okie-dokie */ | ||
96 | #define ESP_CMD_TPAD 0x18 /* Transfer Pad */ | ||
97 | #define ESP_CMD_SATN 0x1a /* Set ATN */ | ||
98 | #define ESP_CMD_RATN 0x1b /* De-assert ATN */ | ||
99 | |||
100 | /* Group 3 commands: ESP must be in the MSGOUT or MSGIN state and be connected | ||
101 | * to a target as the initiator for these commands to work. | ||
102 | */ | ||
103 | #define ESP_CMD_SMSG 0x20 /* Send message */ | ||
104 | #define ESP_CMD_SSTAT 0x21 /* Send status */ | ||
105 | #define ESP_CMD_SDATA 0x22 /* Send data */ | ||
106 | #define ESP_CMD_DSEQ 0x23 /* Discontinue Sequence */ | ||
107 | #define ESP_CMD_TSEQ 0x24 /* Terminate Sequence */ | ||
108 | #define ESP_CMD_TCCSEQ 0x25 /* Target cmd cmplt sequence */ | ||
109 | #define ESP_CMD_DCNCT 0x27 /* Disconnect */ | ||
110 | #define ESP_CMD_RMSG 0x28 /* Receive Message */ | ||
111 | #define ESP_CMD_RCMD 0x29 /* Receive Command */ | ||
112 | #define ESP_CMD_RDATA 0x2a /* Receive Data */ | ||
113 | #define ESP_CMD_RCSEQ 0x2b /* Receive cmd sequence */ | ||
114 | |||
115 | /* Group 4 commands: The ESP must be in the disconnected state and must | ||
116 | * not be connected to any targets as initiator for | ||
117 | * these commands to work. | ||
118 | */ | ||
119 | #define ESP_CMD_RSEL 0x40 /* Reselect */ | ||
120 | #define ESP_CMD_SEL 0x41 /* Select w/o ATN */ | ||
121 | #define ESP_CMD_SELA 0x42 /* Select w/ATN */ | ||
122 | #define ESP_CMD_SELAS 0x43 /* Select w/ATN & STOP */ | ||
123 | #define ESP_CMD_ESEL 0x44 /* Enable selection */ | ||
124 | #define ESP_CMD_DSEL 0x45 /* Disable selections */ | ||
125 | #define ESP_CMD_SA3 0x46 /* Select w/ATN3 */ | ||
126 | #define ESP_CMD_RSEL3 0x47 /* Reselect3 */ | ||
127 | |||
128 | /* This bit enables the ESP's DMA on the SBus */ | ||
129 | #define ESP_CMD_DMA 0x80 /* Do DMA? */ | ||
130 | |||
131 | /* ESP status register read-only */ | ||
132 | #define ESP_STAT_PIO 0x01 /* IO phase bit */ | ||
133 | #define ESP_STAT_PCD 0x02 /* CD phase bit */ | ||
134 | #define ESP_STAT_PMSG 0x04 /* MSG phase bit */ | ||
135 | #define ESP_STAT_PMASK 0x07 /* Mask of phase bits */ | ||
136 | #define ESP_STAT_TDONE 0x08 /* Transfer Completed */ | ||
137 | #define ESP_STAT_TCNT 0x10 /* Transfer Counter Is Zero */ | ||
138 | #define ESP_STAT_PERR 0x20 /* Parity error */ | ||
139 | #define ESP_STAT_SPAM 0x40 /* Real bad error */ | ||
140 | /* This indicates the 'interrupt pending' condition on esp236, it is a reserved | ||
141 | * bit on other revs of the ESP. | ||
142 | */ | ||
143 | #define ESP_STAT_INTR 0x80 /* Interrupt */ | ||
144 | |||
145 | /* The status register can be masked with ESP_STAT_PMASK and compared | ||
146 | * with the following values to determine the current phase the ESP | ||
147 | * (at least thinks it) is in. For our purposes we also add our own | ||
148 | * software 'done' bit for our phase management engine. | ||
149 | */ | ||
150 | #define ESP_DOP (0) /* Data Out */ | ||
151 | #define ESP_DIP (ESP_STAT_PIO) /* Data In */ | ||
152 | #define ESP_CMDP (ESP_STAT_PCD) /* Command */ | ||
153 | #define ESP_STATP (ESP_STAT_PCD|ESP_STAT_PIO) /* Status */ | ||
154 | #define ESP_MOP (ESP_STAT_PMSG|ESP_STAT_PCD) /* Message Out */ | ||
155 | #define ESP_MIP (ESP_STAT_PMSG|ESP_STAT_PCD|ESP_STAT_PIO) /* Message In */ | ||
156 | |||
157 | /* HME only: status 2 register */ | ||
158 | #define ESP_STAT2_SCHBIT 0x01 /* Upper bits 3-7 of sstep enabled */ | ||
159 | #define ESP_STAT2_FFLAGS 0x02 /* The fifo flags are now latched */ | ||
160 | #define ESP_STAT2_XCNT 0x04 /* The transfer counter is latched */ | ||
161 | #define ESP_STAT2_CREGA 0x08 /* The command reg is active now */ | ||
162 | #define ESP_STAT2_WIDE 0x10 /* Interface on this adapter is wide */ | ||
163 | #define ESP_STAT2_F1BYTE 0x20 /* There is one byte at top of fifo */ | ||
164 | #define ESP_STAT2_FMSB 0x40 /* Next byte in fifo is most significant */ | ||
165 | #define ESP_STAT2_FEMPTY 0x80 /* FIFO is empty */ | ||
166 | |||
167 | /* ESP interrupt register read-only */ | ||
168 | #define ESP_INTR_S 0x01 /* Select w/o ATN */ | ||
169 | #define ESP_INTR_SATN 0x02 /* Select w/ATN */ | ||
170 | #define ESP_INTR_RSEL 0x04 /* Reselected */ | ||
171 | #define ESP_INTR_FDONE 0x08 /* Function done */ | ||
172 | #define ESP_INTR_BSERV 0x10 /* Bus service */ | ||
173 | #define ESP_INTR_DC 0x20 /* Disconnect */ | ||
174 | #define ESP_INTR_IC 0x40 /* Illegal command given */ | ||
175 | #define ESP_INTR_SR 0x80 /* SCSI bus reset detected */ | ||
176 | |||
177 | /* ESP sequence step register read-only */ | ||
178 | #define ESP_STEP_VBITS 0x07 /* Valid bits */ | ||
179 | #define ESP_STEP_ASEL 0x00 /* Selection&Arbitrate cmplt */ | ||
180 | #define ESP_STEP_SID 0x01 /* One msg byte sent */ | ||
181 | #define ESP_STEP_NCMD 0x02 /* Was not in command phase */ | ||
182 | #define ESP_STEP_PPC 0x03 /* Early phase chg caused cmnd | ||
183 | * bytes to be lost | ||
184 | */ | ||
185 | #define ESP_STEP_FINI4 0x04 /* Command was sent ok */ | ||
186 | |||
187 | /* Ho hum, some ESP's set the step register to this as well... */ | ||
188 | #define ESP_STEP_FINI5 0x05 | ||
189 | #define ESP_STEP_FINI6 0x06 | ||
190 | #define ESP_STEP_FINI7 0x07 | ||
191 | |||
192 | /* ESP chip-test register read-write */ | ||
193 | #define ESP_TEST_TARG 0x01 /* Target test mode */ | ||
194 | #define ESP_TEST_INI 0x02 /* Initiator test mode */ | ||
195 | #define ESP_TEST_TS 0x04 /* Tristate test mode */ | ||
196 | |||
197 | /* ESP unique ID register read-only, found on fas236+fas100a only */ | ||
198 | #define ESP_UID_F100A 0x00 /* ESP FAS100A */ | ||
199 | #define ESP_UID_F236 0x02 /* ESP FAS236 */ | ||
200 | #define ESP_UID_REV 0x07 /* ESP revision */ | ||
201 | #define ESP_UID_FAM 0xf8 /* ESP family */ | ||
202 | |||
203 | /* ESP fifo flags register read-only */ | ||
204 | /* Note that the following implies a 16 byte FIFO on the ESP. */ | ||
205 | #define ESP_FF_FBYTES 0x1f /* Num bytes in FIFO */ | ||
206 | #define ESP_FF_ONOTZERO 0x20 /* offset ctr not zero (esp100) */ | ||
207 | #define ESP_FF_SSTEP 0xe0 /* Sequence step */ | ||
208 | |||
209 | /* ESP clock conversion factor register write-only */ | ||
210 | #define ESP_CCF_F0 0x00 /* 35.01MHz - 40MHz */ | ||
211 | #define ESP_CCF_NEVER 0x01 /* Set it to this and die */ | ||
212 | #define ESP_CCF_F2 0x02 /* 10MHz */ | ||
213 | #define ESP_CCF_F3 0x03 /* 10.01MHz - 15MHz */ | ||
214 | #define ESP_CCF_F4 0x04 /* 15.01MHz - 20MHz */ | ||
215 | #define ESP_CCF_F5 0x05 /* 20.01MHz - 25MHz */ | ||
216 | #define ESP_CCF_F6 0x06 /* 25.01MHz - 30MHz */ | ||
217 | #define ESP_CCF_F7 0x07 /* 30.01MHz - 35MHz */ | ||
218 | |||
219 | /* HME only... */ | ||
220 | #define ESP_BUSID_RESELID 0x10 | ||
221 | #define ESP_BUSID_CTR32BIT 0x40 | ||
222 | |||
223 | #define ESP_BUS_TIMEOUT 250 /* In milli-seconds */ | ||
224 | #define ESP_TIMEO_CONST 8192 | ||
225 | #define ESP_NEG_DEFP(mhz, cfact) \ | ||
226 | ((ESP_BUS_TIMEOUT * ((mhz) / 1000)) / (8192 * (cfact))) | ||
227 | #define ESP_MHZ_TO_CYCLE(mhertz) ((1000000000) / ((mhertz) / 1000)) | ||
228 | #define ESP_TICK(ccf, cycle) ((7682 * (ccf) * (cycle) / 1000)) | ||
229 | |||
230 | /* For slow to medium speed input clock rates we shoot for 5mb/s, but for high | ||
231 | * input clock rates we try to do 10mb/s although I don't think a transfer can | ||
232 | * even run that fast with an ESP even with DMA2 scatter gather pipelining. | ||
233 | */ | ||
234 | #define SYNC_DEFP_SLOW 0x32 /* 5mb/s */ | ||
235 | #define SYNC_DEFP_FAST 0x19 /* 10mb/s */ | ||
236 | |||
237 | struct esp_cmd_priv { | ||
238 | union { | ||
239 | dma_addr_t dma_addr; | ||
240 | int num_sg; | ||
241 | } u; | ||
242 | |||
243 | unsigned int cur_residue; | ||
244 | struct scatterlist *cur_sg; | ||
245 | unsigned int tot_residue; | ||
246 | }; | ||
247 | #define ESP_CMD_PRIV(CMD) ((struct esp_cmd_priv *)(&(CMD)->SCp)) | ||
248 | |||
249 | enum esp_rev { | ||
250 | ESP100 = 0x00, /* NCR53C90 - very broken */ | ||
251 | ESP100A = 0x01, /* NCR53C90A */ | ||
252 | ESP236 = 0x02, | ||
253 | FAS236 = 0x03, | ||
254 | FAS100A = 0x04, | ||
255 | FAST = 0x05, | ||
256 | FASHME = 0x06, | ||
257 | }; | ||
258 | |||
259 | struct esp_cmd_entry { | ||
260 | struct list_head list; | ||
261 | |||
262 | struct scsi_cmnd *cmd; | ||
263 | |||
264 | unsigned int saved_cur_residue; | ||
265 | struct scatterlist *saved_cur_sg; | ||
266 | unsigned int saved_tot_residue; | ||
267 | |||
268 | u8 flags; | ||
269 | #define ESP_CMD_FLAG_WRITE 0x01 /* DMA is a write */ | ||
270 | #define ESP_CMD_FLAG_ABORT 0x02 /* being aborted */ | ||
271 | #define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */ | ||
272 | |||
273 | u8 tag[2]; | ||
274 | |||
275 | u8 status; | ||
276 | u8 message; | ||
277 | |||
278 | unsigned char *sense_ptr; | ||
279 | unsigned char *saved_sense_ptr; | ||
280 | dma_addr_t sense_dma; | ||
281 | |||
282 | struct completion *eh_done; | ||
283 | }; | ||
284 | |||
285 | /* XXX make this configurable somehow XXX */ | ||
286 | #define ESP_DEFAULT_TAGS 16 | ||
287 | |||
288 | #define ESP_MAX_TARGET 16 | ||
289 | #define ESP_MAX_LUN 8 | ||
290 | #define ESP_MAX_TAG 256 | ||
291 | |||
292 | struct esp_lun_data { | ||
293 | struct esp_cmd_entry *non_tagged_cmd; | ||
294 | int num_tagged; | ||
295 | int hold; | ||
296 | struct esp_cmd_entry *tagged_cmds[ESP_MAX_TAG]; | ||
297 | }; | ||
298 | |||
299 | struct esp_target_data { | ||
300 | /* These are the ESP_STP, ESP_SOFF, and ESP_CFG3 register values which | ||
301 | * match the currently negotiated settings for this target. The SCSI | ||
302 | * protocol values are maintained in spi_{offset,period,wide}(starget). | ||
303 | */ | ||
304 | u8 esp_period; | ||
305 | u8 esp_offset; | ||
306 | u8 esp_config3; | ||
307 | |||
308 | u8 flags; | ||
309 | #define ESP_TGT_WIDE 0x01 | ||
310 | #define ESP_TGT_DISCONNECT 0x02 | ||
311 | #define ESP_TGT_NEGO_WIDE 0x04 | ||
312 | #define ESP_TGT_NEGO_SYNC 0x08 | ||
313 | #define ESP_TGT_CHECK_NEGO 0x40 | ||
314 | #define ESP_TGT_BROKEN 0x80 | ||
315 | |||
316 | /* When ESP_TGT_CHECK_NEGO is set, on the next scsi command to this | ||
317 | * device we will try to negotiate the following parameters. | ||
318 | */ | ||
319 | u8 nego_goal_period; | ||
320 | u8 nego_goal_offset; | ||
321 | u8 nego_goal_width; | ||
322 | u8 nego_goal_tags; | ||
323 | |||
324 | struct scsi_target *starget; | ||
325 | }; | ||
326 | |||
327 | struct esp_event_ent { | ||
328 | u8 type; | ||
329 | #define ESP_EVENT_TYPE_EVENT 0x01 | ||
330 | #define ESP_EVENT_TYPE_CMD 0x02 | ||
331 | u8 val; | ||
332 | |||
333 | u8 sreg; | ||
334 | u8 seqreg; | ||
335 | u8 sreg2; | ||
336 | u8 ireg; | ||
337 | u8 select_state; | ||
338 | u8 event; | ||
339 | u8 __pad; | ||
340 | }; | ||
341 | |||
342 | struct esp; | ||
343 | struct esp_driver_ops { | ||
344 | /* Read and write the ESP 8-bit registers. On some | ||
345 | * applications of the ESP chip the registers are at 4-byte | ||
346 | * instead of 1-byte intervals. | ||
347 | */ | ||
348 | void (*esp_write8)(struct esp *esp, u8 val, unsigned long reg); | ||
349 | u8 (*esp_read8)(struct esp *esp, unsigned long reg); | ||
350 | |||
351 | /* Map and unmap DMA memory. Eventually the driver will be | ||
352 | * converted to the generic DMA API as soon as SBUS is able to | ||
353 | * cope with that. At such time we can remove this. | ||
354 | */ | ||
355 | dma_addr_t (*map_single)(struct esp *esp, void *buf, | ||
356 | size_t sz, int dir); | ||
357 | int (*map_sg)(struct esp *esp, struct scatterlist *sg, | ||
358 | int num_sg, int dir); | ||
359 | void (*unmap_single)(struct esp *esp, dma_addr_t addr, | ||
360 | size_t sz, int dir); | ||
361 | void (*unmap_sg)(struct esp *esp, struct scatterlist *sg, | ||
362 | int num_sg, int dir); | ||
363 | |||
364 | /* Return non-zero if there is an IRQ pending. Usually this | ||
365 | * status bit lives in the DMA controller sitting in front of | ||
366 | * the ESP. This has to be accurate or else the ESP interrupt | ||
367 | * handler will not run. | ||
368 | */ | ||
369 | int (*irq_pending)(struct esp *esp); | ||
370 | |||
371 | /* Reset the DMA engine entirely. On return, ESP interrupts | ||
372 | * should be enabled. Often the interrupt enabling is | ||
373 | * controlled in the DMA engine. | ||
374 | */ | ||
375 | void (*reset_dma)(struct esp *esp); | ||
376 | |||
377 | /* Drain any pending DMA in the DMA engine after a transfer. | ||
378 | * This is for writes to memory. | ||
379 | */ | ||
380 | void (*dma_drain)(struct esp *esp); | ||
381 | |||
382 | /* Invalidate the DMA engine after a DMA transfer. */ | ||
383 | void (*dma_invalidate)(struct esp *esp); | ||
384 | |||
385 | /* Setup an ESP command that will use a DMA transfer. | ||
386 | * The 'esp_count' specifies what transfer length should be | ||
387 | * programmed into the ESP transfer counter registers, whereas | ||
388 | * the 'dma_count' is the length that should be programmed into | ||
389 | * the DMA controller. Usually they are the same. If 'write' | ||
390 | * is non-zero, this transfer is a write into memory. 'cmd' | ||
391 | * holds the ESP command that should be issued by calling | ||
392 | * scsi_esp_cmd() at the appropriate time while programming | ||
393 | * the DMA hardware. | ||
394 | */ | ||
395 | void (*send_dma_cmd)(struct esp *esp, u32 dma_addr, u32 esp_count, | ||
396 | u32 dma_count, int write, u8 cmd); | ||
397 | |||
398 | /* Return non-zero if the DMA engine is reporting an error | ||
399 | * currently. | ||
400 | */ | ||
401 | int (*dma_error)(struct esp *esp); | ||
402 | }; | ||
403 | |||
404 | #define ESP_MAX_MSG_SZ 8 | ||
405 | #define ESP_EVENT_LOG_SZ 32 | ||
406 | |||
407 | #define ESP_QUICKIRQ_LIMIT 100 | ||
408 | #define ESP_RESELECT_TAG_LIMIT 2500 | ||
409 | |||
410 | struct esp { | ||
411 | void __iomem *regs; | ||
412 | void __iomem *dma_regs; | ||
413 | |||
414 | const struct esp_driver_ops *ops; | ||
415 | |||
416 | struct Scsi_Host *host; | ||
417 | void *dev; | ||
418 | |||
419 | struct esp_cmd_entry *active_cmd; | ||
420 | |||
421 | struct list_head queued_cmds; | ||
422 | struct list_head active_cmds; | ||
423 | |||
424 | u8 *command_block; | ||
425 | dma_addr_t command_block_dma; | ||
426 | |||
427 | unsigned int data_dma_len; | ||
428 | |||
429 | /* The following are used to determine the cause of an IRQ. Upon every | ||
430 | * IRQ entry we synchronize these with the hardware registers. | ||
431 | */ | ||
432 | u8 sreg; | ||
433 | u8 seqreg; | ||
434 | u8 sreg2; | ||
435 | u8 ireg; | ||
436 | |||
437 | u32 prev_hme_dmacsr; | ||
438 | u8 prev_soff; | ||
439 | u8 prev_stp; | ||
440 | u8 prev_cfg3; | ||
441 | u8 __pad; | ||
442 | |||
443 | struct list_head esp_cmd_pool; | ||
444 | |||
445 | struct esp_target_data target[ESP_MAX_TARGET]; | ||
446 | |||
447 | int fifo_cnt; | ||
448 | u8 fifo[16]; | ||
449 | |||
450 | struct esp_event_ent esp_event_log[ESP_EVENT_LOG_SZ]; | ||
451 | int esp_event_cur; | ||
452 | |||
453 | u8 msg_out[ESP_MAX_MSG_SZ]; | ||
454 | int msg_out_len; | ||
455 | |||
456 | u8 msg_in[ESP_MAX_MSG_SZ]; | ||
457 | int msg_in_len; | ||
458 | |||
459 | u8 bursts; | ||
460 | u8 config1; | ||
461 | u8 config2; | ||
462 | |||
463 | u8 scsi_id; | ||
464 | u32 scsi_id_mask; | ||
465 | |||
466 | enum esp_rev rev; | ||
467 | |||
468 | u32 flags; | ||
469 | #define ESP_FLAG_DIFFERENTIAL 0x00000001 | ||
470 | #define ESP_FLAG_RESETTING 0x00000002 | ||
471 | #define ESP_FLAG_DOING_SLOWCMD 0x00000004 | ||
472 | #define ESP_FLAG_WIDE_CAPABLE 0x00000008 | ||
473 | #define ESP_FLAG_QUICKIRQ_CHECK 0x00000010 | ||
474 | |||
475 | u8 select_state; | ||
476 | #define ESP_SELECT_NONE 0x00 /* Not selecting */ | ||
477 | #define ESP_SELECT_BASIC 0x01 /* Select w/o MSGOUT phase */ | ||
478 | #define ESP_SELECT_MSGOUT 0x02 /* Select with MSGOUT */ | ||
479 | |||
480 | /* When we are not selecting, we are expecting an event. */ | ||
481 | u8 event; | ||
482 | #define ESP_EVENT_NONE 0x00 | ||
483 | #define ESP_EVENT_CMD_START 0x01 | ||
484 | #define ESP_EVENT_CMD_DONE 0x02 | ||
485 | #define ESP_EVENT_DATA_IN 0x03 | ||
486 | #define ESP_EVENT_DATA_OUT 0x04 | ||
487 | #define ESP_EVENT_DATA_DONE 0x05 | ||
488 | #define ESP_EVENT_MSGIN 0x06 | ||
489 | #define ESP_EVENT_MSGIN_MORE 0x07 | ||
490 | #define ESP_EVENT_MSGIN_DONE 0x08 | ||
491 | #define ESP_EVENT_MSGOUT 0x09 | ||
492 | #define ESP_EVENT_MSGOUT_DONE 0x0a | ||
493 | #define ESP_EVENT_STATUS 0x0b | ||
494 | #define ESP_EVENT_FREE_BUS 0x0c | ||
495 | #define ESP_EVENT_CHECK_PHASE 0x0d | ||
496 | #define ESP_EVENT_RESET 0x10 | ||
497 | |||
498 | /* Probed in esp_get_clock_params() */ | ||
499 | u32 cfact; | ||
500 | u32 cfreq; | ||
501 | u32 ccycle; | ||
502 | u32 ctick; | ||
503 | u32 neg_defp; | ||
504 | u32 sync_defp; | ||
505 | |||
506 | /* Computed in esp_reset_esp() */ | ||
507 | u32 max_period; | ||
508 | u32 min_period; | ||
509 | u32 radelay; | ||
510 | |||
511 | /* Slow command state. */ | ||
512 | u8 *cmd_bytes_ptr; | ||
513 | int cmd_bytes_left; | ||
514 | |||
515 | struct completion *eh_reset; | ||
516 | |||
517 | struct sbus_dma *dma; | ||
518 | }; | ||
519 | |||
520 | #define host_to_esp(host) ((struct esp *)(host)->hostdata) | ||
521 | |||
522 | /* A front-end driver for the ESP chip should do the following in | ||
523 | * it's device probe routine: | ||
524 | * 1) Allocate the host and private area using scsi_host_alloc() | ||
525 | * with size 'sizeof(struct esp)'. The first argument to | ||
526 | * scsi_host_alloc() should be &scsi_esp_template. | ||
527 | * 2) Set host->max_id as appropriate. | ||
528 | * 3) Set esp->host to the scsi_host itself, and esp->dev | ||
529 | * to the device object pointer. | ||
530 | * 4) Hook up esp->ops to the front-end implementation. | ||
531 | * 5) If the ESP chip supports wide transfers, set ESP_FLAG_WIDE_CAPABLE | ||
532 | * in esp->flags. | ||
533 | * 6) Map the DMA and ESP chip registers. | ||
534 | * 7) DMA map the ESP command block, store the DMA address | ||
535 | * in esp->command_block_dma. | ||
536 | * 8) Register the scsi_esp_intr() interrupt handler. | ||
537 | * 9) Probe for and provide the following chip properties: | ||
538 | * esp->scsi_id (assign to esp->host->this_id too) | ||
539 | * esp->scsi_id_mask | ||
540 | * If ESP bus is differential, set ESP_FLAG_DIFFERENTIAL | ||
541 | * esp->cfreq | ||
542 | * DMA burst bit mask in esp->bursts, if necessary | ||
543 | * 10) Perform any actions necessary before the ESP device can | ||
544 | * be programmed for the first time. On some configs, for | ||
545 | * example, the DMA engine has to be reset before ESP can | ||
546 | * be programmed. | ||
547 | * 11) If necessary, call dev_set_drvdata() as needed. | ||
548 | * 12) Call scsi_esp_register() with prepared 'esp' structure | ||
549 | * and a device pointer if possible. | ||
550 | * 13) Check scsi_esp_register() return value, release all resources | ||
551 | * if an error was returned. | ||
552 | */ | ||
553 | extern struct scsi_host_template scsi_esp_template; | ||
554 | extern int scsi_esp_register(struct esp *, struct device *); | ||
555 | |||
556 | extern void scsi_esp_unregister(struct esp *); | ||
557 | extern irqreturn_t scsi_esp_intr(int, void *); | ||
558 | extern void scsi_esp_cmd(struct esp *, u8); | ||
559 | |||
560 | #endif /* !(_ESP_SCSI_H) */ | ||
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c index 8c81cec85298..60446b88f721 100644 --- a/drivers/scsi/gdth.c +++ b/drivers/scsi/gdth.c | |||
@@ -3091,6 +3091,7 @@ static int gdth_fill_raw_cmd(int hanum,Scsi_Cmnd *scp,unchar b) | |||
3091 | cmdp->u.raw64.direction = | 3091 | cmdp->u.raw64.direction = |
3092 | gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN; | 3092 | gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN; |
3093 | memcpy(cmdp->u.raw64.cmd,scp->cmnd,16); | 3093 | memcpy(cmdp->u.raw64.cmd,scp->cmnd,16); |
3094 | cmdp->u.raw64.sg_ranz = 0; | ||
3094 | } else { | 3095 | } else { |
3095 | cmdp->u.raw.reserved = 0; | 3096 | cmdp->u.raw.reserved = 0; |
3096 | cmdp->u.raw.mdisc_time = 0; | 3097 | cmdp->u.raw.mdisc_time = 0; |
@@ -3107,6 +3108,7 @@ static int gdth_fill_raw_cmd(int hanum,Scsi_Cmnd *scp,unchar b) | |||
3107 | cmdp->u.raw.direction = | 3108 | cmdp->u.raw.direction = |
3108 | gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN; | 3109 | gdth_direction_tab[scp->cmnd[0]]==DOU ? GDTH_DATA_OUT:GDTH_DATA_IN; |
3109 | memcpy(cmdp->u.raw.cmd,scp->cmnd,12); | 3110 | memcpy(cmdp->u.raw.cmd,scp->cmnd,12); |
3111 | cmdp->u.raw.sg_ranz = 0; | ||
3110 | } | 3112 | } |
3111 | 3113 | ||
3112 | if (scp->use_sg) { | 3114 | if (scp->use_sg) { |
diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index 38c3a291efac..bd8e7f323c69 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c | |||
@@ -435,7 +435,7 @@ struct Scsi_Host *scsi_host_lookup(unsigned short hostnum) | |||
435 | struct class_device *cdev; | 435 | struct class_device *cdev; |
436 | struct Scsi_Host *shost = ERR_PTR(-ENXIO), *p; | 436 | struct Scsi_Host *shost = ERR_PTR(-ENXIO), *p; |
437 | 437 | ||
438 | down_read(&class->subsys.rwsem); | 438 | down(&class->sem); |
439 | list_for_each_entry(cdev, &class->children, node) { | 439 | list_for_each_entry(cdev, &class->children, node) { |
440 | p = class_to_shost(cdev); | 440 | p = class_to_shost(cdev); |
441 | if (p->host_no == hostnum) { | 441 | if (p->host_no == hostnum) { |
@@ -443,7 +443,7 @@ struct Scsi_Host *scsi_host_lookup(unsigned short hostnum) | |||
443 | break; | 443 | break; |
444 | } | 444 | } |
445 | } | 445 | } |
446 | up_read(&class->subsys.rwsem); | 446 | up(&class->sem); |
447 | 447 | ||
448 | return shost; | 448 | return shost; |
449 | } | 449 | } |
diff --git a/drivers/scsi/ibmvscsi/ibmvstgt.c b/drivers/scsi/ibmvscsi/ibmvstgt.c index 84363c181620..6d223dd76440 100644 --- a/drivers/scsi/ibmvscsi/ibmvstgt.c +++ b/drivers/scsi/ibmvscsi/ibmvstgt.c | |||
@@ -886,9 +886,9 @@ static int get_system_info(void) | |||
886 | { | 886 | { |
887 | struct device_node *rootdn; | 887 | struct device_node *rootdn; |
888 | const char *id, *model, *name; | 888 | const char *id, *model, *name; |
889 | unsigned int *num; | 889 | const unsigned int *num; |
890 | 890 | ||
891 | rootdn = find_path_device("/"); | 891 | rootdn = of_find_node_by_path("/"); |
892 | if (!rootdn) | 892 | if (!rootdn) |
893 | return -ENOENT; | 893 | return -ENOENT; |
894 | 894 | ||
@@ -901,10 +901,11 @@ static int get_system_info(void) | |||
901 | if (name) | 901 | if (name) |
902 | strncpy(partition_name, name, sizeof(partition_name)); | 902 | strncpy(partition_name, name, sizeof(partition_name)); |
903 | 903 | ||
904 | num = (unsigned int *) get_property(rootdn, "ibm,partition-no", NULL); | 904 | num = get_property(rootdn, "ibm,partition-no", NULL); |
905 | if (num) | 905 | if (num) |
906 | partition_number = *num; | 906 | partition_number = *num; |
907 | 907 | ||
908 | of_node_put(rootdn); | ||
908 | return 0; | 909 | return 0; |
909 | } | 910 | } |
910 | 911 | ||
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c index 227c0f2f4d74..0a533f398f52 100644 --- a/drivers/scsi/ibmvscsi/rpa_vscsi.c +++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c | |||
@@ -157,7 +157,7 @@ static void gather_partition_info(void) | |||
157 | const unsigned int *p_number_ptr; | 157 | const unsigned int *p_number_ptr; |
158 | 158 | ||
159 | /* Retrieve information about this partition */ | 159 | /* Retrieve information about this partition */ |
160 | rootdn = find_path_device("/"); | 160 | rootdn = of_find_node_by_path("/"); |
161 | if (!rootdn) { | 161 | if (!rootdn) { |
162 | return; | 162 | return; |
163 | } | 163 | } |
@@ -169,6 +169,7 @@ static void gather_partition_info(void) | |||
169 | p_number_ptr = get_property(rootdn, "ibm,partition-no", NULL); | 169 | p_number_ptr = get_property(rootdn, "ibm,partition-no", NULL); |
170 | if (p_number_ptr) | 170 | if (p_number_ptr) |
171 | partition_number = *p_number_ptr; | 171 | partition_number = *p_number_ptr; |
172 | of_node_put(rootdn); | ||
172 | } | 173 | } |
173 | 174 | ||
174 | static void set_adapter_info(struct ibmvscsi_host_data *hostdata) | 175 | static void set_adapter_info(struct ibmvscsi_host_data *hostdata) |
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index 5cf1002283b4..2c7b77e833f9 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c | |||
@@ -3773,7 +3773,8 @@ static int ipr_device_reset(struct ipr_ioa_cfg *ioa_cfg, | |||
3773 | * Return value: | 3773 | * Return value: |
3774 | * 0 on success / non-zero on failure | 3774 | * 0 on success / non-zero on failure |
3775 | **/ | 3775 | **/ |
3776 | static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes) | 3776 | static int ipr_sata_reset(struct ata_port *ap, unsigned int *classes, |
3777 | unsigned long deadline) | ||
3777 | { | 3778 | { |
3778 | struct ipr_sata_port *sata_port = ap->private_data; | 3779 | struct ipr_sata_port *sata_port = ap->private_data; |
3779 | struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; | 3780 | struct ipr_ioa_cfg *ioa_cfg = sata_port->ioa_cfg; |
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c index dc70c180e115..e34442e405e8 100644 --- a/drivers/scsi/libsas/sas_expander.c +++ b/drivers/scsi/libsas/sas_expander.c | |||
@@ -22,7 +22,6 @@ | |||
22 | * | 22 | * |
23 | */ | 23 | */ |
24 | 24 | ||
25 | #include <linux/pci.h> | ||
26 | #include <linux/scatterlist.h> | 25 | #include <linux/scatterlist.h> |
27 | 26 | ||
28 | #include "sas_internal.h" | 27 | #include "sas_internal.h" |
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c index 37a0f4dd6186..5631c199a8eb 100644 --- a/drivers/scsi/libsrp.c +++ b/drivers/scsi/libsrp.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/kfifo.h> | 22 | #include <linux/kfifo.h> |
23 | #include <linux/scatterlist.h> | 23 | #include <linux/scatterlist.h> |
24 | #include <linux/dma-mapping.h> | 24 | #include <linux/dma-mapping.h> |
25 | #include <linux/pci.h> | ||
26 | #include <scsi/scsi.h> | 25 | #include <scsi/scsi.h> |
27 | #include <scsi/scsi_cmnd.h> | 26 | #include <scsi/scsi_cmnd.h> |
28 | #include <scsi/scsi_tcq.h> | 27 | #include <scsi/scsi_tcq.h> |
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c index 09a9c8ab2eae..dcf6106f557a 100644 --- a/drivers/scsi/lpfc/lpfc_init.c +++ b/drivers/scsi/lpfc/lpfc_init.c | |||
@@ -1817,10 +1817,9 @@ static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev, | |||
1817 | struct lpfc_sli *psli = &phba->sli; | 1817 | struct lpfc_sli *psli = &phba->sli; |
1818 | struct lpfc_sli_ring *pring; | 1818 | struct lpfc_sli_ring *pring; |
1819 | 1819 | ||
1820 | if (state == pci_channel_io_perm_failure) { | 1820 | if (state == pci_channel_io_perm_failure) |
1821 | lpfc_pci_remove_one(pdev); | ||
1822 | return PCI_ERS_RESULT_DISCONNECT; | 1821 | return PCI_ERS_RESULT_DISCONNECT; |
1823 | } | 1822 | |
1824 | pci_disable_device(pdev); | 1823 | pci_disable_device(pdev); |
1825 | /* | 1824 | /* |
1826 | * There may be I/Os dropped by the firmware. | 1825 | * There may be I/Os dropped by the firmware. |
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c index 0aa3304f6b9b..7fc6e06ea7e1 100644 --- a/drivers/scsi/megaraid.c +++ b/drivers/scsi/megaraid.c | |||
@@ -2088,7 +2088,7 @@ megaraid_abort_and_reset(adapter_t *adapter, Scsi_Cmnd *cmd, int aor) | |||
2088 | static inline int | 2088 | static inline int |
2089 | make_local_pdev(adapter_t *adapter, struct pci_dev **pdev) | 2089 | make_local_pdev(adapter_t *adapter, struct pci_dev **pdev) |
2090 | { | 2090 | { |
2091 | *pdev = kmalloc(sizeof(struct pci_dev), GFP_KERNEL); | 2091 | *pdev = alloc_pci_dev(); |
2092 | 2092 | ||
2093 | if( *pdev == NULL ) return -1; | 2093 | if( *pdev == NULL ) return -1; |
2094 | 2094 | ||
diff --git a/drivers/scsi/qlogicpti.c b/drivers/scsi/qlogicpti.c index 9b827ceec501..c4195ea869e9 100644 --- a/drivers/scsi/qlogicpti.c +++ b/drivers/scsi/qlogicpti.c | |||
@@ -1281,7 +1281,7 @@ static struct scsi_cmnd *qlogicpti_intr_handler(struct qlogicpti *qpti) | |||
1281 | (struct scatterlist *)Cmnd->request_buffer, | 1281 | (struct scatterlist *)Cmnd->request_buffer, |
1282 | Cmnd->use_sg, | 1282 | Cmnd->use_sg, |
1283 | Cmnd->sc_data_direction); | 1283 | Cmnd->sc_data_direction); |
1284 | } else { | 1284 | } else if (Cmnd->request_bufflen) { |
1285 | sbus_unmap_single(qpti->sdev, | 1285 | sbus_unmap_single(qpti->sdev, |
1286 | (__u32)((unsigned long)Cmnd->SCp.ptr), | 1286 | (__u32)((unsigned long)Cmnd->SCp.ptr), |
1287 | Cmnd->request_bufflen, | 1287 | Cmnd->request_bufflen, |
@@ -1403,7 +1403,7 @@ static int __devinit qpti_sbus_probe(struct of_device *dev, const struct of_devi | |||
1403 | struct scsi_host_template *tpnt = match->data; | 1403 | struct scsi_host_template *tpnt = match->data; |
1404 | struct Scsi_Host *host; | 1404 | struct Scsi_Host *host; |
1405 | struct qlogicpti *qpti; | 1405 | struct qlogicpti *qpti; |
1406 | char *fcode; | 1406 | const char *fcode; |
1407 | 1407 | ||
1408 | /* Sometimes Antares cards come up not completely | 1408 | /* Sometimes Antares cards come up not completely |
1409 | * setup, and we get a report of a zero IRQ. | 1409 | * setup, and we get a report of a zero IRQ. |
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c index 28a266c804be..3963e7013bd9 100644 --- a/drivers/scsi/scsi_error.c +++ b/drivers/scsi/scsi_error.c | |||
@@ -725,7 +725,7 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd, | |||
725 | */ | 725 | */ |
726 | if (copy_sense) { | 726 | if (copy_sense) { |
727 | if (!SCSI_SENSE_VALID(scmd)) { | 727 | if (!SCSI_SENSE_VALID(scmd)) { |
728 | memcpy(scmd->sense_buffer, scmd->request_buffer, | 728 | memcpy(scmd->sense_buffer, page_address(sgl.page), |
729 | sizeof(scmd->sense_buffer)); | 729 | sizeof(scmd->sense_buffer)); |
730 | } | 730 | } |
731 | __free_page(sgl.page); | 731 | __free_page(sgl.page); |
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index be8e6558b89e..61fbcdcbb009 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c | |||
@@ -31,7 +31,7 @@ | |||
31 | 31 | ||
32 | 32 | ||
33 | #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) | 33 | #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) |
34 | #define SG_MEMPOOL_SIZE 32 | 34 | #define SG_MEMPOOL_SIZE 2 |
35 | 35 | ||
36 | struct scsi_host_sg_pool { | 36 | struct scsi_host_sg_pool { |
37 | size_t size; | 37 | size_t size; |
diff --git a/drivers/scsi/scsi_netlink.c b/drivers/scsi/scsi_netlink.c index 1b59b27e887f..4bf9aa547c78 100644 --- a/drivers/scsi/scsi_netlink.c +++ b/drivers/scsi/scsi_netlink.c | |||
@@ -50,7 +50,7 @@ scsi_nl_rcv_msg(struct sk_buff *skb) | |||
50 | while (skb->len >= NLMSG_SPACE(0)) { | 50 | while (skb->len >= NLMSG_SPACE(0)) { |
51 | err = 0; | 51 | err = 0; |
52 | 52 | ||
53 | nlh = (struct nlmsghdr *) skb->data; | 53 | nlh = nlmsg_hdr(skb); |
54 | if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) || | 54 | if ((nlh->nlmsg_len < (sizeof(*nlh) + sizeof(*hdr))) || |
55 | (skb->len < nlh->nlmsg_len)) { | 55 | (skb->len < nlh->nlmsg_len)) { |
56 | printk(KERN_WARNING "%s: discarding partial skb\n", | 56 | printk(KERN_WARNING "%s: discarding partial skb\n", |
@@ -168,7 +168,8 @@ scsi_netlink_init(void) | |||
168 | } | 168 | } |
169 | 169 | ||
170 | scsi_nl_sock = netlink_kernel_create(NETLINK_SCSITRANSPORT, | 170 | scsi_nl_sock = netlink_kernel_create(NETLINK_SCSITRANSPORT, |
171 | SCSI_NL_GRP_CNT, scsi_nl_rcv, THIS_MODULE); | 171 | SCSI_NL_GRP_CNT, scsi_nl_rcv, NULL, |
172 | THIS_MODULE); | ||
172 | if (!scsi_nl_sock) { | 173 | if (!scsi_nl_sock) { |
173 | printk(KERN_ERR "%s: register of recieve handler failed\n", | 174 | printk(KERN_ERR "%s: register of recieve handler failed\n", |
174 | __FUNCTION__); | 175 | __FUNCTION__); |
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c index 5326f5cbeae9..67a38a1409ba 100644 --- a/drivers/scsi/scsi_sysfs.c +++ b/drivers/scsi/scsi_sysfs.c | |||
@@ -489,10 +489,22 @@ store_rescan_field (struct device *dev, struct device_attribute *attr, const cha | |||
489 | } | 489 | } |
490 | static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field); | 490 | static DEVICE_ATTR(rescan, S_IWUSR, NULL, store_rescan_field); |
491 | 491 | ||
492 | static void sdev_store_delete_callback(struct device *dev) | ||
493 | { | ||
494 | scsi_remove_device(to_scsi_device(dev)); | ||
495 | } | ||
496 | |||
492 | static ssize_t sdev_store_delete(struct device *dev, struct device_attribute *attr, const char *buf, | 497 | static ssize_t sdev_store_delete(struct device *dev, struct device_attribute *attr, const char *buf, |
493 | size_t count) | 498 | size_t count) |
494 | { | 499 | { |
495 | scsi_remove_device(to_scsi_device(dev)); | 500 | int rc; |
501 | |||
502 | /* An attribute cannot be unregistered by one of its own methods, | ||
503 | * so we have to use this roundabout approach. | ||
504 | */ | ||
505 | rc = device_schedule_callback(dev, sdev_store_delete_callback); | ||
506 | if (rc) | ||
507 | count = rc; | ||
496 | return count; | 508 | return count; |
497 | }; | 509 | }; |
498 | static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete); | 510 | static DEVICE_ATTR(delete, S_IWUSR, NULL, sdev_store_delete); |
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index ff05c84479ca..caf1836bbeca 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c | |||
@@ -1081,7 +1081,7 @@ iscsi_if_rx(struct sock *sk, int len) | |||
1081 | struct nlmsghdr *nlh; | 1081 | struct nlmsghdr *nlh; |
1082 | struct iscsi_uevent *ev; | 1082 | struct iscsi_uevent *ev; |
1083 | 1083 | ||
1084 | nlh = (struct nlmsghdr *)skb->data; | 1084 | nlh = nlmsg_hdr(skb); |
1085 | if (nlh->nlmsg_len < sizeof(*nlh) || | 1085 | if (nlh->nlmsg_len < sizeof(*nlh) || |
1086 | skb->len < nlh->nlmsg_len) { | 1086 | skb->len < nlh->nlmsg_len) { |
1087 | break; | 1087 | break; |
@@ -1437,7 +1437,7 @@ static __init int iscsi_transport_init(void) | |||
1437 | if (err) | 1437 | if (err) |
1438 | goto unregister_conn_class; | 1438 | goto unregister_conn_class; |
1439 | 1439 | ||
1440 | nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx, | 1440 | nls = netlink_kernel_create(NETLINK_ISCSI, 1, iscsi_if_rx, NULL, |
1441 | THIS_MODULE); | 1441 | THIS_MODULE); |
1442 | if (!nls) { | 1442 | if (!nls) { |
1443 | err = -ENOBUFS; | 1443 | err = -ENOBUFS; |
diff --git a/drivers/scsi/sun_esp.c b/drivers/scsi/sun_esp.c new file mode 100644 index 000000000000..8c766bcd1095 --- /dev/null +++ b/drivers/scsi/sun_esp.c | |||
@@ -0,0 +1,634 @@ | |||
1 | /* sun_esp.c: ESP front-end for Sparc SBUS systems. | ||
2 | * | ||
3 | * Copyright (C) 2007 David S. Miller (davem@davemloft.net) | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/types.h> | ||
8 | #include <linux/module.h> | ||
9 | #include <linux/init.h> | ||
10 | |||
11 | #include <asm/irq.h> | ||
12 | #include <asm/io.h> | ||
13 | #include <asm/dma.h> | ||
14 | |||
15 | #include <asm/sbus.h> | ||
16 | |||
17 | #include <scsi/scsi_host.h> | ||
18 | |||
19 | #include "esp_scsi.h" | ||
20 | |||
21 | #define DRV_MODULE_NAME "sun_esp" | ||
22 | #define PFX DRV_MODULE_NAME ": " | ||
23 | #define DRV_VERSION "1.000" | ||
24 | #define DRV_MODULE_RELDATE "April 19, 2007" | ||
25 | |||
26 | #define dma_read32(REG) \ | ||
27 | sbus_readl(esp->dma_regs + (REG)) | ||
28 | #define dma_write32(VAL, REG) \ | ||
29 | sbus_writel((VAL), esp->dma_regs + (REG)) | ||
30 | |||
31 | static int __devinit esp_sbus_find_dma(struct esp *esp, struct sbus_dev *dma_sdev) | ||
32 | { | ||
33 | struct sbus_dev *sdev = esp->dev; | ||
34 | struct sbus_dma *dma; | ||
35 | |||
36 | if (dma_sdev != NULL) { | ||
37 | for_each_dvma(dma) { | ||
38 | if (dma->sdev == dma_sdev) | ||
39 | break; | ||
40 | } | ||
41 | } else { | ||
42 | for_each_dvma(dma) { | ||
43 | if (dma->sdev == NULL) | ||
44 | break; | ||
45 | |||
46 | /* If bus + slot are the same and it has the | ||
47 | * correct OBP name, it's ours. | ||
48 | */ | ||
49 | if (sdev->bus == dma->sdev->bus && | ||
50 | sdev->slot == dma->sdev->slot && | ||
51 | (!strcmp(dma->sdev->prom_name, "dma") || | ||
52 | !strcmp(dma->sdev->prom_name, "espdma"))) | ||
53 | break; | ||
54 | } | ||
55 | } | ||
56 | |||
57 | if (dma == NULL) { | ||
58 | printk(KERN_ERR PFX "[%s] Cannot find dma.\n", | ||
59 | sdev->ofdev.node->full_name); | ||
60 | return -ENODEV; | ||
61 | } | ||
62 | esp->dma = dma; | ||
63 | esp->dma_regs = dma->regs; | ||
64 | |||
65 | return 0; | ||
66 | |||
67 | } | ||
68 | |||
69 | static int __devinit esp_sbus_map_regs(struct esp *esp, int hme) | ||
70 | { | ||
71 | struct sbus_dev *sdev = esp->dev; | ||
72 | struct resource *res; | ||
73 | |||
74 | /* On HME, two reg sets exist, first is DVMA, | ||
75 | * second is ESP registers. | ||
76 | */ | ||
77 | if (hme) | ||
78 | res = &sdev->resource[1]; | ||
79 | else | ||
80 | res = &sdev->resource[0]; | ||
81 | |||
82 | esp->regs = sbus_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP"); | ||
83 | if (!esp->regs) | ||
84 | return -ENOMEM; | ||
85 | |||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | static int __devinit esp_sbus_map_command_block(struct esp *esp) | ||
90 | { | ||
91 | struct sbus_dev *sdev = esp->dev; | ||
92 | |||
93 | esp->command_block = sbus_alloc_consistent(sdev, 16, | ||
94 | &esp->command_block_dma); | ||
95 | if (!esp->command_block) | ||
96 | return -ENOMEM; | ||
97 | return 0; | ||
98 | } | ||
99 | |||
100 | static int __devinit esp_sbus_register_irq(struct esp *esp) | ||
101 | { | ||
102 | struct Scsi_Host *host = esp->host; | ||
103 | struct sbus_dev *sdev = esp->dev; | ||
104 | |||
105 | host->irq = sdev->irqs[0]; | ||
106 | return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp); | ||
107 | } | ||
108 | |||
109 | static void __devinit esp_get_scsi_id(struct esp *esp) | ||
110 | { | ||
111 | struct sbus_dev *sdev = esp->dev; | ||
112 | struct device_node *dp = sdev->ofdev.node; | ||
113 | |||
114 | esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff); | ||
115 | if (esp->scsi_id != 0xff) | ||
116 | goto done; | ||
117 | |||
118 | esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff); | ||
119 | if (esp->scsi_id != 0xff) | ||
120 | goto done; | ||
121 | |||
122 | if (!sdev->bus) { | ||
123 | /* SUN4 */ | ||
124 | esp->scsi_id = 7; | ||
125 | goto done; | ||
126 | } | ||
127 | |||
128 | esp->scsi_id = of_getintprop_default(sdev->bus->ofdev.node, | ||
129 | "scsi-initiator-id", 7); | ||
130 | |||
131 | done: | ||
132 | esp->host->this_id = esp->scsi_id; | ||
133 | esp->scsi_id_mask = (1 << esp->scsi_id); | ||
134 | } | ||
135 | |||
136 | static void __devinit esp_get_differential(struct esp *esp) | ||
137 | { | ||
138 | struct sbus_dev *sdev = esp->dev; | ||
139 | struct device_node *dp = sdev->ofdev.node; | ||
140 | |||
141 | if (of_find_property(dp, "differential", NULL)) | ||
142 | esp->flags |= ESP_FLAG_DIFFERENTIAL; | ||
143 | else | ||
144 | esp->flags &= ~ESP_FLAG_DIFFERENTIAL; | ||
145 | } | ||
146 | |||
147 | static void __devinit esp_get_clock_params(struct esp *esp) | ||
148 | { | ||
149 | struct sbus_dev *sdev = esp->dev; | ||
150 | struct device_node *dp = sdev->ofdev.node; | ||
151 | struct device_node *bus_dp; | ||
152 | int fmhz; | ||
153 | |||
154 | bus_dp = NULL; | ||
155 | if (sdev != NULL && sdev->bus != NULL) | ||
156 | bus_dp = sdev->bus->ofdev.node; | ||
157 | |||
158 | fmhz = of_getintprop_default(dp, "clock-frequency", 0); | ||
159 | if (fmhz == 0) | ||
160 | fmhz = (!bus_dp) ? 0 : | ||
161 | of_getintprop_default(bus_dp, "clock-frequency", 0); | ||
162 | |||
163 | esp->cfreq = fmhz; | ||
164 | } | ||
165 | |||
166 | static void __devinit esp_get_bursts(struct esp *esp, struct sbus_dev *dma) | ||
167 | { | ||
168 | struct sbus_dev *sdev = esp->dev; | ||
169 | struct device_node *dp = sdev->ofdev.node; | ||
170 | u8 bursts; | ||
171 | |||
172 | bursts = of_getintprop_default(dp, "burst-sizes", 0xff); | ||
173 | if (dma) { | ||
174 | struct device_node *dma_dp = dma->ofdev.node; | ||
175 | u8 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff); | ||
176 | if (val != 0xff) | ||
177 | bursts &= val; | ||
178 | } | ||
179 | |||
180 | if (sdev->bus) { | ||
181 | u8 val = of_getintprop_default(sdev->bus->ofdev.node, | ||
182 | "burst-sizes", 0xff); | ||
183 | if (val != 0xff) | ||
184 | bursts &= val; | ||
185 | } | ||
186 | |||
187 | if (bursts == 0xff || | ||
188 | (bursts & DMA_BURST16) == 0 || | ||
189 | (bursts & DMA_BURST32) == 0) | ||
190 | bursts = (DMA_BURST32 - 1); | ||
191 | |||
192 | esp->bursts = bursts; | ||
193 | } | ||
194 | |||
195 | static void __devinit esp_sbus_get_props(struct esp *esp, struct sbus_dev *espdma) | ||
196 | { | ||
197 | esp_get_scsi_id(esp); | ||
198 | esp_get_differential(esp); | ||
199 | esp_get_clock_params(esp); | ||
200 | esp_get_bursts(esp, espdma); | ||
201 | } | ||
202 | |||
203 | static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg) | ||
204 | { | ||
205 | sbus_writeb(val, esp->regs + (reg * 4UL)); | ||
206 | } | ||
207 | |||
208 | static u8 sbus_esp_read8(struct esp *esp, unsigned long reg) | ||
209 | { | ||
210 | return sbus_readb(esp->regs + (reg * 4UL)); | ||
211 | } | ||
212 | |||
213 | static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf, | ||
214 | size_t sz, int dir) | ||
215 | { | ||
216 | return sbus_map_single(esp->dev, buf, sz, dir); | ||
217 | } | ||
218 | |||
219 | static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg, | ||
220 | int num_sg, int dir) | ||
221 | { | ||
222 | return sbus_map_sg(esp->dev, sg, num_sg, dir); | ||
223 | } | ||
224 | |||
225 | static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr, | ||
226 | size_t sz, int dir) | ||
227 | { | ||
228 | sbus_unmap_single(esp->dev, addr, sz, dir); | ||
229 | } | ||
230 | |||
231 | static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg, | ||
232 | int num_sg, int dir) | ||
233 | { | ||
234 | sbus_unmap_sg(esp->dev, sg, num_sg, dir); | ||
235 | } | ||
236 | |||
237 | static int sbus_esp_irq_pending(struct esp *esp) | ||
238 | { | ||
239 | if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR)) | ||
240 | return 1; | ||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | static void sbus_esp_reset_dma(struct esp *esp) | ||
245 | { | ||
246 | int can_do_burst16, can_do_burst32, can_do_burst64; | ||
247 | int can_do_sbus64, lim; | ||
248 | u32 val; | ||
249 | |||
250 | can_do_burst16 = (esp->bursts & DMA_BURST16) != 0; | ||
251 | can_do_burst32 = (esp->bursts & DMA_BURST32) != 0; | ||
252 | can_do_burst64 = 0; | ||
253 | can_do_sbus64 = 0; | ||
254 | if (sbus_can_dma_64bit(esp->dev)) | ||
255 | can_do_sbus64 = 1; | ||
256 | if (sbus_can_burst64(esp->sdev)) | ||
257 | can_do_burst64 = (esp->bursts & DMA_BURST64) != 0; | ||
258 | |||
259 | /* Put the DVMA into a known state. */ | ||
260 | if (esp->dma->revision != dvmahme) { | ||
261 | val = dma_read32(DMA_CSR); | ||
262 | dma_write32(val | DMA_RST_SCSI, DMA_CSR); | ||
263 | dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); | ||
264 | } | ||
265 | switch (esp->dma->revision) { | ||
266 | case dvmahme: | ||
267 | dma_write32(DMA_RESET_FAS366, DMA_CSR); | ||
268 | dma_write32(DMA_RST_SCSI, DMA_CSR); | ||
269 | |||
270 | esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS | | ||
271 | DMA_SCSI_DISAB | DMA_INT_ENAB); | ||
272 | |||
273 | esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE | | ||
274 | DMA_BRST_SZ); | ||
275 | |||
276 | if (can_do_burst64) | ||
277 | esp->prev_hme_dmacsr |= DMA_BRST64; | ||
278 | else if (can_do_burst32) | ||
279 | esp->prev_hme_dmacsr |= DMA_BRST32; | ||
280 | |||
281 | if (can_do_sbus64) { | ||
282 | esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64; | ||
283 | sbus_set_sbus64(esp->dev, esp->bursts); | ||
284 | } | ||
285 | |||
286 | lim = 1000; | ||
287 | while (dma_read32(DMA_CSR) & DMA_PEND_READ) { | ||
288 | if (--lim == 0) { | ||
289 | printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ " | ||
290 | "will not clear!\n", | ||
291 | esp->host->unique_id); | ||
292 | break; | ||
293 | } | ||
294 | udelay(1); | ||
295 | } | ||
296 | |||
297 | dma_write32(0, DMA_CSR); | ||
298 | dma_write32(esp->prev_hme_dmacsr, DMA_CSR); | ||
299 | |||
300 | dma_write32(0, DMA_ADDR); | ||
301 | break; | ||
302 | |||
303 | case dvmarev2: | ||
304 | if (esp->rev != ESP100) { | ||
305 | val = dma_read32(DMA_CSR); | ||
306 | dma_write32(val | DMA_3CLKS, DMA_CSR); | ||
307 | } | ||
308 | break; | ||
309 | |||
310 | case dvmarev3: | ||
311 | val = dma_read32(DMA_CSR); | ||
312 | val &= ~DMA_3CLKS; | ||
313 | val |= DMA_2CLKS; | ||
314 | if (can_do_burst32) { | ||
315 | val &= ~DMA_BRST_SZ; | ||
316 | val |= DMA_BRST32; | ||
317 | } | ||
318 | dma_write32(val, DMA_CSR); | ||
319 | break; | ||
320 | |||
321 | case dvmaesc1: | ||
322 | val = dma_read32(DMA_CSR); | ||
323 | val |= DMA_ADD_ENABLE; | ||
324 | val &= ~DMA_BCNT_ENAB; | ||
325 | if (!can_do_burst32 && can_do_burst16) { | ||
326 | val |= DMA_ESC_BURST; | ||
327 | } else { | ||
328 | val &= ~(DMA_ESC_BURST); | ||
329 | } | ||
330 | dma_write32(val, DMA_CSR); | ||
331 | break; | ||
332 | |||
333 | default: | ||
334 | break; | ||
335 | } | ||
336 | |||
337 | /* Enable interrupts. */ | ||
338 | val = dma_read32(DMA_CSR); | ||
339 | dma_write32(val | DMA_INT_ENAB, DMA_CSR); | ||
340 | } | ||
341 | |||
342 | static void sbus_esp_dma_drain(struct esp *esp) | ||
343 | { | ||
344 | u32 csr; | ||
345 | int lim; | ||
346 | |||
347 | if (esp->dma->revision == dvmahme) | ||
348 | return; | ||
349 | |||
350 | csr = dma_read32(DMA_CSR); | ||
351 | if (!(csr & DMA_FIFO_ISDRAIN)) | ||
352 | return; | ||
353 | |||
354 | if (esp->dma->revision != dvmarev3 && esp->dma->revision != dvmaesc1) | ||
355 | dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR); | ||
356 | |||
357 | lim = 1000; | ||
358 | while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) { | ||
359 | if (--lim == 0) { | ||
360 | printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n", | ||
361 | esp->host->unique_id); | ||
362 | break; | ||
363 | } | ||
364 | udelay(1); | ||
365 | } | ||
366 | } | ||
367 | |||
368 | static void sbus_esp_dma_invalidate(struct esp *esp) | ||
369 | { | ||
370 | if (esp->dma->revision == dvmahme) { | ||
371 | dma_write32(DMA_RST_SCSI, DMA_CSR); | ||
372 | |||
373 | esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr | | ||
374 | (DMA_PARITY_OFF | DMA_2CLKS | | ||
375 | DMA_SCSI_DISAB | DMA_INT_ENAB)) & | ||
376 | ~(DMA_ST_WRITE | DMA_ENABLE)); | ||
377 | |||
378 | dma_write32(0, DMA_CSR); | ||
379 | dma_write32(esp->prev_hme_dmacsr, DMA_CSR); | ||
380 | |||
381 | /* This is necessary to avoid having the SCSI channel | ||
382 | * engine lock up on us. | ||
383 | */ | ||
384 | dma_write32(0, DMA_ADDR); | ||
385 | } else { | ||
386 | u32 val; | ||
387 | int lim; | ||
388 | |||
389 | lim = 1000; | ||
390 | while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) { | ||
391 | if (--lim == 0) { | ||
392 | printk(KERN_ALERT PFX "esp%d: DMA will not " | ||
393 | "invalidate!\n", esp->host->unique_id); | ||
394 | break; | ||
395 | } | ||
396 | udelay(1); | ||
397 | } | ||
398 | |||
399 | val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB); | ||
400 | val |= DMA_FIFO_INV; | ||
401 | dma_write32(val, DMA_CSR); | ||
402 | val &= ~DMA_FIFO_INV; | ||
403 | dma_write32(val, DMA_CSR); | ||
404 | } | ||
405 | } | ||
406 | |||
407 | static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count, | ||
408 | u32 dma_count, int write, u8 cmd) | ||
409 | { | ||
410 | u32 csr; | ||
411 | |||
412 | BUG_ON(!(cmd & ESP_CMD_DMA)); | ||
413 | |||
414 | sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW); | ||
415 | sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED); | ||
416 | if (esp->rev == FASHME) { | ||
417 | sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO); | ||
418 | sbus_esp_write8(esp, 0, FAS_RHI); | ||
419 | |||
420 | scsi_esp_cmd(esp, cmd); | ||
421 | |||
422 | csr = esp->prev_hme_dmacsr; | ||
423 | csr |= DMA_SCSI_DISAB | DMA_ENABLE; | ||
424 | if (write) | ||
425 | csr |= DMA_ST_WRITE; | ||
426 | else | ||
427 | csr &= ~DMA_ST_WRITE; | ||
428 | esp->prev_hme_dmacsr = csr; | ||
429 | |||
430 | dma_write32(dma_count, DMA_COUNT); | ||
431 | dma_write32(addr, DMA_ADDR); | ||
432 | dma_write32(csr, DMA_CSR); | ||
433 | } else { | ||
434 | csr = dma_read32(DMA_CSR); | ||
435 | csr |= DMA_ENABLE; | ||
436 | if (write) | ||
437 | csr |= DMA_ST_WRITE; | ||
438 | else | ||
439 | csr &= ~DMA_ST_WRITE; | ||
440 | dma_write32(csr, DMA_CSR); | ||
441 | if (esp->dma->revision == dvmaesc1) { | ||
442 | u32 end = PAGE_ALIGN(addr + dma_count + 16U); | ||
443 | dma_write32(end - addr, DMA_COUNT); | ||
444 | } | ||
445 | dma_write32(addr, DMA_ADDR); | ||
446 | |||
447 | scsi_esp_cmd(esp, cmd); | ||
448 | } | ||
449 | |||
450 | } | ||
451 | |||
452 | static int sbus_esp_dma_error(struct esp *esp) | ||
453 | { | ||
454 | u32 csr = dma_read32(DMA_CSR); | ||
455 | |||
456 | if (csr & DMA_HNDL_ERROR) | ||
457 | return 1; | ||
458 | |||
459 | return 0; | ||
460 | } | ||
461 | |||
462 | static const struct esp_driver_ops sbus_esp_ops = { | ||
463 | .esp_write8 = sbus_esp_write8, | ||
464 | .esp_read8 = sbus_esp_read8, | ||
465 | .map_single = sbus_esp_map_single, | ||
466 | .map_sg = sbus_esp_map_sg, | ||
467 | .unmap_single = sbus_esp_unmap_single, | ||
468 | .unmap_sg = sbus_esp_unmap_sg, | ||
469 | .irq_pending = sbus_esp_irq_pending, | ||
470 | .reset_dma = sbus_esp_reset_dma, | ||
471 | .dma_drain = sbus_esp_dma_drain, | ||
472 | .dma_invalidate = sbus_esp_dma_invalidate, | ||
473 | .send_dma_cmd = sbus_esp_send_dma_cmd, | ||
474 | .dma_error = sbus_esp_dma_error, | ||
475 | }; | ||
476 | |||
477 | static int __devinit esp_sbus_probe_one(struct device *dev, | ||
478 | struct sbus_dev *esp_dev, | ||
479 | struct sbus_dev *espdma, | ||
480 | struct sbus_bus *sbus, | ||
481 | int hme) | ||
482 | { | ||
483 | struct scsi_host_template *tpnt = &scsi_esp_template; | ||
484 | struct Scsi_Host *host; | ||
485 | struct esp *esp; | ||
486 | int err; | ||
487 | |||
488 | host = scsi_host_alloc(tpnt, sizeof(struct esp)); | ||
489 | |||
490 | err = -ENOMEM; | ||
491 | if (!host) | ||
492 | goto fail; | ||
493 | |||
494 | host->max_id = (hme ? 16 : 8); | ||
495 | esp = host_to_esp(host); | ||
496 | |||
497 | esp->host = host; | ||
498 | esp->dev = esp_dev; | ||
499 | esp->ops = &sbus_esp_ops; | ||
500 | |||
501 | if (hme) | ||
502 | esp->flags |= ESP_FLAG_WIDE_CAPABLE; | ||
503 | |||
504 | err = esp_sbus_find_dma(esp, espdma); | ||
505 | if (err < 0) | ||
506 | goto fail_unlink; | ||
507 | |||
508 | err = esp_sbus_map_regs(esp, hme); | ||
509 | if (err < 0) | ||
510 | goto fail_unlink; | ||
511 | |||
512 | err = esp_sbus_map_command_block(esp); | ||
513 | if (err < 0) | ||
514 | goto fail_unmap_regs; | ||
515 | |||
516 | err = esp_sbus_register_irq(esp); | ||
517 | if (err < 0) | ||
518 | goto fail_unmap_command_block; | ||
519 | |||
520 | esp_sbus_get_props(esp, espdma); | ||
521 | |||
522 | /* Before we try to touch the ESP chip, ESC1 dma can | ||
523 | * come up with the reset bit set, so make sure that | ||
524 | * is clear first. | ||
525 | */ | ||
526 | if (esp->dma->revision == dvmaesc1) { | ||
527 | u32 val = dma_read32(DMA_CSR); | ||
528 | |||
529 | dma_write32(val & ~DMA_RST_SCSI, DMA_CSR); | ||
530 | } | ||
531 | |||
532 | dev_set_drvdata(&esp_dev->ofdev.dev, esp); | ||
533 | |||
534 | err = scsi_esp_register(esp, dev); | ||
535 | if (err) | ||
536 | goto fail_free_irq; | ||
537 | |||
538 | return 0; | ||
539 | |||
540 | fail_free_irq: | ||
541 | free_irq(host->irq, esp); | ||
542 | fail_unmap_command_block: | ||
543 | sbus_free_consistent(esp->dev, 16, | ||
544 | esp->command_block, | ||
545 | esp->command_block_dma); | ||
546 | fail_unmap_regs: | ||
547 | sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE); | ||
548 | fail_unlink: | ||
549 | scsi_host_put(host); | ||
550 | fail: | ||
551 | return err; | ||
552 | } | ||
553 | |||
554 | static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match) | ||
555 | { | ||
556 | struct sbus_dev *sdev = to_sbus_device(&dev->dev); | ||
557 | struct device_node *dp = dev->node; | ||
558 | struct sbus_dev *dma_sdev = NULL; | ||
559 | int hme = 0; | ||
560 | |||
561 | if (dp->parent && | ||
562 | (!strcmp(dp->parent->name, "espdma") || | ||
563 | !strcmp(dp->parent->name, "dma"))) | ||
564 | dma_sdev = sdev->parent; | ||
565 | else if (!strcmp(dp->name, "SUNW,fas")) { | ||
566 | dma_sdev = sdev; | ||
567 | hme = 1; | ||
568 | } | ||
569 | |||
570 | return esp_sbus_probe_one(&dev->dev, sdev, dma_sdev, | ||
571 | sdev->bus, hme); | ||
572 | } | ||
573 | |||
574 | static int __devexit esp_sbus_remove(struct of_device *dev) | ||
575 | { | ||
576 | struct esp *esp = dev_get_drvdata(&dev->dev); | ||
577 | unsigned int irq = esp->host->irq; | ||
578 | u32 val; | ||
579 | |||
580 | scsi_esp_unregister(esp); | ||
581 | |||
582 | /* Disable interrupts. */ | ||
583 | val = dma_read32(DMA_CSR); | ||
584 | dma_write32(val & ~DMA_INT_ENAB, DMA_CSR); | ||
585 | |||
586 | free_irq(irq, esp); | ||
587 | sbus_free_consistent(esp->dev, 16, | ||
588 | esp->command_block, | ||
589 | esp->command_block_dma); | ||
590 | sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE); | ||
591 | |||
592 | scsi_host_put(esp->host); | ||
593 | |||
594 | return 0; | ||
595 | } | ||
596 | |||
597 | static struct of_device_id esp_match[] = { | ||
598 | { | ||
599 | .name = "SUNW,esp", | ||
600 | }, | ||
601 | { | ||
602 | .name = "SUNW,fas", | ||
603 | }, | ||
604 | { | ||
605 | .name = "esp", | ||
606 | }, | ||
607 | {}, | ||
608 | }; | ||
609 | MODULE_DEVICE_TABLE(of, esp_match); | ||
610 | |||
611 | static struct of_platform_driver esp_sbus_driver = { | ||
612 | .name = "esp", | ||
613 | .match_table = esp_match, | ||
614 | .probe = esp_sbus_probe, | ||
615 | .remove = __devexit_p(esp_sbus_remove), | ||
616 | }; | ||
617 | |||
618 | static int __init sunesp_init(void) | ||
619 | { | ||
620 | return of_register_driver(&esp_sbus_driver, &sbus_bus_type); | ||
621 | } | ||
622 | |||
623 | static void __exit sunesp_exit(void) | ||
624 | { | ||
625 | of_unregister_driver(&esp_sbus_driver); | ||
626 | } | ||
627 | |||
628 | MODULE_DESCRIPTION("Sun ESP SCSI driver"); | ||
629 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | ||
630 | MODULE_LICENSE("GPL"); | ||
631 | MODULE_VERSION(DRV_VERSION); | ||
632 | |||
633 | module_init(sunesp_init); | ||
634 | module_exit(sunesp_exit); | ||