aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/scsi/scsi_fc_transport.txt450
-rw-r--r--drivers/block/cciss_scsi.c75
-rw-r--r--drivers/ieee1394/sbp2.c75
-rw-r--r--drivers/message/fusion/mptscsih.c78
-rw-r--r--drivers/scsi/3w-9xxx.c139
-rw-r--r--drivers/scsi/3w-xxxx.c104
-rw-r--r--drivers/scsi/53c700.c55
-rw-r--r--drivers/scsi/53c7xx.c6102
-rw-r--r--drivers/scsi/53c7xx.h1608
-rw-r--r--drivers/scsi/53c7xx.scr1591
-rw-r--r--drivers/scsi/53c7xx_d.h_shipped2874
-rw-r--r--drivers/scsi/53c7xx_u.h_shipped102
-rw-r--r--drivers/scsi/BusLogic.c51
-rw-r--r--drivers/scsi/Kconfig2
-rw-r--r--drivers/scsi/Makefile12
-rw-r--r--drivers/scsi/NCR5380.c14
-rw-r--r--drivers/scsi/NCR5380.h6
-rw-r--r--drivers/scsi/NCR53c406a.c45
-rw-r--r--drivers/scsi/a100u2w.c50
-rw-r--r--drivers/scsi/aacraid/aachba.c62
-rw-r--r--drivers/scsi/aacraid/aacraid.h11
-rw-r--r--drivers/scsi/aacraid/linit.c4
-rw-r--r--drivers/scsi/advansys.c101
-rw-r--r--drivers/scsi/advansys.h36
-rw-r--r--drivers/scsi/aha152x.c50
-rw-r--r--drivers/scsi/aha1740.c48
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.c51
-rw-r--r--drivers/scsi/aic7xxx/aic79xx_osm.h4
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.c59
-rw-r--r--drivers/scsi/aic7xxx/aic7xxx_osm.h4
-rw-r--r--drivers/scsi/aic7xxx_old.c55
-rw-r--r--drivers/scsi/amiga7xx.c138
-rw-r--r--drivers/scsi/amiga7xx.h23
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c103
-rw-r--r--drivers/scsi/bvme6000.c76
-rw-r--r--drivers/scsi/bvme6000.h24
-rw-r--r--drivers/scsi/eata.c48
-rw-r--r--drivers/scsi/fdomain.c70
-rw-r--r--drivers/scsi/gdth.c4
-rw-r--r--drivers/scsi/ibmmca.c1267
-rw-r--r--drivers/scsi/ibmmca.h21
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c18
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.h1
-rw-r--r--drivers/scsi/initio.c3819
-rw-r--r--drivers/scsi/initio.h313
-rw-r--r--drivers/scsi/ipr.c144
-rw-r--r--drivers/scsi/ips.c145
-rw-r--r--drivers/scsi/ips.h44
-rw-r--r--drivers/scsi/jazz_esp.c2
-rw-r--r--drivers/scsi/libsas/sas_scsi_host.c10
-rw-r--r--drivers/scsi/mac53c94.c62
-rw-r--r--drivers/scsi/megaraid.c10
-rw-r--r--drivers/scsi/mvme16x.c78
-rw-r--r--drivers/scsi/mvme16x.h24
-rw-r--r--drivers/scsi/nsp32.c194
-rw-r--r--drivers/scsi/pcmcia/sym53c500_cs.c42
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c2
-rw-r--r--drivers/scsi/qla4xxx/ql4_dbg.c174
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h78
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h426
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h7
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c92
-rw-r--r--drivers/scsi/qla4xxx/ql4_iocb.c101
-rw-r--r--drivers/scsi/qla4xxx/ql4_isr.c63
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c274
-rw-r--r--drivers/scsi/qla4xxx/ql4_nvram.c3
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c26
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h3
-rw-r--r--drivers/scsi/qlogicfas408.c30
-rw-r--r--drivers/scsi/scsi_error.c26
-rw-r--r--drivers/scsi/scsi_lib.c38
-rw-r--r--drivers/scsi/scsi_sysfs.c25
-rw-r--r--drivers/scsi/scsi_transport_fc.c828
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sg.c2
-rw-r--r--drivers/scsi/stex.c109
-rw-r--r--drivers/scsi/sym53c416.c44
-rw-r--r--drivers/scsi/tmscsim.c59
-rw-r--r--drivers/scsi/u14-34f.c60
-rw-r--r--drivers/scsi/ultrastor.c19
-rw-r--r--drivers/scsi/wd7000.c20
-rw-r--r--include/scsi/scsi_cmnd.h20
-rw-r--r--include/scsi/scsi_host.h6
-rw-r--r--include/scsi/scsi_transport_fc.h186
84 files changed, 5228 insertions, 18093 deletions
diff --git a/Documentation/scsi/scsi_fc_transport.txt b/Documentation/scsi/scsi_fc_transport.txt
new file mode 100644
index 000000000000..d403e46d8463
--- /dev/null
+++ b/Documentation/scsi/scsi_fc_transport.txt
@@ -0,0 +1,450 @@
1 SCSI FC Tansport
2 =============================================
3
4Date: 4/12/2007
5Kernel Revisions for features:
6 rports : <<TBS>>
7 vports : 2.6.22 (? TBD)
8
9
10Introduction
11============
12This file documents the features and components of the SCSI FC Transport.
13It also provides documents the API between the transport and FC LLDDs.
14The FC transport can be found at:
15 drivers/scsi/scsi_transport_fc.c
16 include/scsi/scsi_transport_fc.h
17 include/scsi/scsi_netlink_fc.h
18
19This file is found at Documentation/scsi/scsi_fc_transport.txt
20
21
22FC Remote Ports (rports)
23========================================================================
24<< To Be Supplied >>
25
26
27FC Virtual Ports (vports)
28========================================================================
29
30Overview:
31-------------------------------
32
33 New FC standards have defined mechanisms which allows for a single physical
34 port to appear on as multiple communication ports. Using the N_Port Id
35 Virtualization (NPIV) mechanism, a point-to-point connection to a Fabric
36 can be assigned more than 1 N_Port_ID. Each N_Port_ID appears as a
37 separate port to other endpoints on the fabric, even though it shares one
38 physical link to the switch for communication. Each N_Port_ID can have a
39 unique view of the fabric based on fabric zoning and array lun-masking
40 (just like a normal non-NPIV adapter). Using the Virtual Fabric (VF)
41 mechanism, adding a fabric header to each frame allows the port to
42 interact with the Fabric Port to join multiple fabrics. The port will
43 obtain an N_Port_ID on each fabric it joins. Each fabric will have its
44 own unique view of endpoints and configuration parameters. NPIV may be
45 used together with VF so that the port can obtain multiple N_Port_IDs
46 on each virtual fabric.
47
48 The FC transport is now recognizing a new object - a vport. A vport is
49 an entity that has a world-wide unique World Wide Port Name (wwpn) and
50 World Wide Node Name (wwnn). The transport also allows for the FC4's to
51 be specified for the vport, with FCP_Initiator being the primary role
52 expected. Once instantiated by one of the above methods, it will have a
53 distinct N_Port_ID and view of fabric endpoints and storage entities.
54 The fc_host associated with the physical adapter will export the ability
55 to create vports. The transport will create the vport object within the
56 Linux device tree, and instruct the fc_host's driver to instantiate the
57 virtual port. Typically, the driver will create a new scsi_host instance
58 on the vport, resulting in a unique <H,C,T,L> namespace for the vport.
59 Thus, whether a FC port is based on a physical port or on a virtual port,
60 each will appear as a unique scsi_host with its own target and lun space.
61
62 Note: At this time, the transport is written to create only NPIV-based
63 vports. However, consideration was given to VF-based vports and it
64 should be a minor change to add support if needed. The remaining
65 discussion will concentrate on NPIV.
66
67 Note: World Wide Name assignment (and uniqueness guarantees) are left
68 up to an administrative entity controling the vport. For example,
69 if vports are to be associated with virtual machines, a XEN mgmt
70 utility would be responsible for creating wwpn/wwnn's for the vport,
71 using it's own naming authority and OUI. (Note: it already does this
72 for virtual MAC addresses).
73
74
75Device Trees and Vport Objects:
76-------------------------------
77
78 Today, the device tree typically contains the scsi_host object,
79 with rports and scsi target objects underneath it. Currently the FC
80 transport creates the vport object and places it under the scsi_host
81 object corresponding to the physical adapter. The LLDD will allocate
82 a new scsi_host for the vport and link it's object under the vport.
83 The remainder of the tree under the vports scsi_host is the same
84 as the non-NPIV case. The transport is written currently to easily
85 allow the parent of the vport to be something other than the scsi_host.
86 This could be used in the future to link the object onto a vm-specific
87 device tree. If the vport's parent is not the physical port's scsi_host,
88 a symbolic link to the vport object will be placed in the physical
89 port's scsi_host.
90
91 Here's what to expect in the device tree :
92 The typical Physical Port's Scsi_Host:
93 /sys/devices/.../host17/
94 and it has the typical decendent tree:
95 /sys/devices/.../host17/rport-17:0-0/target17:0:0/17:0:0:0:
96 and then the vport is created on the Physical Port:
97 /sys/devices/.../host17/vport-17:0-0
98 and the vport's Scsi_Host is then created:
99 /sys/devices/.../host17/vport-17:0-0/host18
100 and then the rest of the tree progresses, such as:
101 /sys/devices/.../host17/vport-17:0-0/host18/rport-18:0-0/target18:0:0/18:0:0:0:
102
103 Here's what to expect in the sysfs tree :
104 scsi_hosts:
105 /sys/class/scsi_host/host17 physical port's scsi_host
106 /sys/class/scsi_host/host18 vport's scsi_host
107 fc_hosts:
108 /sys/class/fc_host/host17 physical port's fc_host
109 /sys/class/fc_host/host18 vport's fc_host
110 fc_vports:
111 /sys/class/fc_vports/vport-17:0-0 the vport's fc_vport
112 fc_rports:
113 /sys/class/fc_remote_ports/rport-17:0-0 rport on the physical port
114 /sys/class/fc_remote_ports/rport-18:0-0 rport on the vport
115
116
117Vport Attributes:
118-------------------------------
119
120 The new fc_vport class object has the following attributes
121
122 node_name: Read_Only
123 The WWNN of the vport
124
125 port_name: Read_Only
126 The WWPN of the vport
127
128 roles: Read_Only
129 Indicates the FC4 roles enabled on the vport.
130
131 symbolic_name: Read_Write
132 A string, appended to the driver's symbolic port name string, which
133 is registered with the switch to identify the vport. For example,
134 a hypervisor could set this string to "Xen Domain 2 VM 5 Vport 2",
135 and this set of identifiers can be seen on switch management screens
136 to identify the port.
137
138 vport_delete: Write_Only
139 When written with a "1", will tear down the vport.
140
141 vport_disable: Write_Only
142 When written with a "1", will transition the vport to a disabled.
143 state. The vport will still be instantiated with the Linux kernel,
144 but it will not be active on the FC link.
145 When written with a "0", will enable the vport.
146
147 vport_last_state: Read_Only
148 Indicates the previous state of the vport. See the section below on
149 "Vport States".
150
151 vport_state: Read_Only
152 Indicates the state of the vport. See the section below on
153 "Vport States".
154
155 vport_type: Read_Only
156 Reflects the FC mechanism used to create the virtual port.
157 Only NPIV is supported currently.
158
159
160 For the fc_host class object, the following attributes are added for vports:
161
162 max_npiv_vports: Read_Only
163 Indicates the maximum number of NPIV-based vports that the
164 driver/adapter can support on the fc_host.
165
166 npiv_vports_inuse: Read_Only
167 Indicates how many NPIV-based vports have been instantiated on the
168 fc_host.
169
170 vport_create: Write_Only
171 A "simple" create interface to instantiate a vport on an fc_host.
172 A "<WWPN>:<WWNN>" string is written to the attribute. The transport
173 then instantiates the vport object and calls the LLDD to create the
174 vport with the role of FCP_Initiator. Each WWN is specified as 16
175 hex characters and may *not* contain any prefixes (e.g. 0x, x, etc).
176
177 vport_delete: Write_Only
178 A "simple" delete interface to teardown a vport. A "<WWPN>:<WWNN>"
179 string is written to the attribute. The transport will locate the
180 vport on the fc_host with the same WWNs and tear it down. Each WWN
181 is specified as 16 hex characters and may *not* contain any prefixes
182 (e.g. 0x, x, etc).
183
184
185Vport States:
186-------------------------------
187
188 Vport instantiation consists of two parts:
189 - Creation with the kernel and LLDD. This means all transport and
190 driver data structures are built up, and device objects created.
191 This is equivalent to a driver "attach" on an adapter, which is
192 independent of the adapter's link state.
193 - Instantiation of the vport on the FC link via ELS traffic, etc.
194 This is equivalent to a "link up" and successfull link initialization.
195 Futher information can be found in the interfaces section below for
196 Vport Creation.
197
198 Once a vport has been instantiated with the kernel/LLDD, a vport state
199 can be reported via the sysfs attribute. The following states exist:
200
201 FC_VPORT_UNKNOWN - Unknown
202 An temporary state, typically set only while the vport is being
203 instantiated with the kernel and LLDD.
204
205 FC_VPORT_ACTIVE - Active
206 The vport has been successfully been created on the FC link.
207 It is fully functional.
208
209 FC_VPORT_DISABLED - Disabled
210 The vport instantiated, but "disabled". The vport is not instantiated
211 on the FC link. This is equivalent to a physical port with the
212 link "down".
213
214 FC_VPORT_LINKDOWN - Linkdown
215 The vport is not operational as the physical link is not operational.
216
217 FC_VPORT_INITIALIZING - Initializing
218 The vport is in the process of instantiating on the FC link.
219 The LLDD will set this state just prior to starting the ELS traffic
220 to create the vport. This state will persist until the vport is
221 successfully created (state becomes FC_VPORT_ACTIVE) or it fails
222 (state is one of the values below). As this state is transitory,
223 it will not be preserved in the "vport_last_state".
224
225 FC_VPORT_NO_FABRIC_SUPP - No Fabric Support
226 The vport is not operational. One of the following conditions were
227 encountered:
228 - The FC topology is not Point-to-Point
229 - The FC port is not connected to an F_Port
230 - The F_Port has indicated that NPIV is not supported.
231
232 FC_VPORT_NO_FABRIC_RSCS - No Fabric Resources
233 The vport is not operational. The Fabric failed FDISC with a status
234 indicating that it does not have sufficient resources to complete
235 the operation.
236
237 FC_VPORT_FABRIC_LOGOUT - Fabric Logout
238 The vport is not operational. The Fabric has LOGO'd the N_Port_ID
239 associated with the vport.
240
241 FC_VPORT_FABRIC_REJ_WWN - Fabric Rejected WWN
242 The vport is not operational. The Fabric failed FDISC with a status
243 indicating that the WWN's are not valid.
244
245 FC_VPORT_FAILED - VPort Failed
246 The vport is not operational. This is a catchall for all other
247 error conditions.
248
249
250 The following state table indicates the different state transitions:
251
252 State Event New State
253 --------------------------------------------------------------------
254 n/a Initialization Unknown
255 Unknown: Link Down Linkdown
256 Link Up & Loop No Fabric Support
257 Link Up & no Fabric No Fabric Support
258 Link Up & FLOGI response No Fabric Support
259 indicates no NPIV support
260 Link Up & FDISC being sent Initializing
261 Disable request Disable
262 Linkdown: Link Up Unknown
263 Initializing: FDISC ACC Active
264 FDISC LS_RJT w/ no resources No Fabric Resources
265 FDISC LS_RJT w/ invalid Fabric Rejected WWN
266 pname or invalid nport_id
267 FDISC LS_RJT failed for Vport Failed
268 other reasons
269 Link Down Linkdown
270 Disable request Disable
271 Disable: Enable request Unknown
272 Active: LOGO received from fabric Fabric Logout
273 Link Down Linkdown
274 Disable request Disable
275 Fabric Logout: Link still up Unknown
276
277 The following 4 error states all have the same transitions:
278 No Fabric Support:
279 No Fabric Resources:
280 Fabric Rejected WWN:
281 Vport Failed:
282 Disable request Disable
283 Link goes down Linkdown
284
285
286Transport <-> LLDD Interfaces :
287-------------------------------
288
289Vport support by LLDD:
290
291 The LLDD indicates support for vports by supplying a vport_create()
292 function in the transport template. The presense of this function will
293 cause the creation of the new attributes on the fc_host. As part of
294 the physical port completing its initialization relative to the
295 transport, it should set the max_npiv_vports attribute to indicate the
296 maximum number of vports the driver and/or adapter supports.
297
298
299Vport Creation:
300
301 The LLDD vport_create() syntax is:
302
303 int vport_create(struct fc_vport *vport, bool disable)
304
305 where:
306 vport: Is the newly allocated vport object
307 disable: If "true", the vport is to be created in a disabled stated.
308 If "false", the vport is to be enabled upon creation.
309
310 When a request is made to create a new vport (via sgio/netlink, or the
311 vport_create fc_host attribute), the transport will validate that the LLDD
312 can support another vport (e.g. max_npiv_vports > npiv_vports_inuse).
313 If not, the create request will be failed. If space remains, the transport
314 will increment the vport count, create the vport object, and then call the
315 LLDD's vport_create() function with the newly allocated vport object.
316
317 As mentioned above, vport creation is divided into two parts:
318 - Creation with the kernel and LLDD. This means all transport and
319 driver data structures are built up, and device objects created.
320 This is equivalent to a driver "attach" on an adapter, which is
321 independent of the adapter's link state.
322 - Instantiation of the vport on the FC link via ELS traffic, etc.
323 This is equivalent to a "link up" and successfull link initialization.
324
325 The LLDD's vport_create() function will not synchronously wait for both
326 parts to be fully completed before returning. It must validate that the
327 infrastructure exists to support NPIV, and complete the first part of
328 vport creation (data structure build up) before returning. We do not
329 hinge vport_create() on the link-side operation mainly because:
330 - The link may be down. It is not a failure if it is. It simply
331 means the vport is in an inoperable state until the link comes up.
332 This is consistent with the link bouncing post vport creation.
333 - The vport may be created in a disabled state.
334 - This is consistent with a model where: the vport equates to a
335 FC adapter. The vport_create is synonymous with driver attachment
336 to the adapter, which is independent of link state.
337
338 Note: special error codes have been defined to delineate infrastructure
339 failure cases for quicker resolution.
340
341 The expected behavior for the LLDD's vport_create() function is:
342 - Validate Infrastructure:
343 - If the driver or adapter cannot support another vport, whether
344 due to improper firmware, (a lie about) max_npiv, or a lack of
345 some other resource - return VPCERR_UNSUPPORTED.
346 - If the driver validates the WWN's against those already active on
347 the adapter and detects an overlap - return VPCERR_BAD_WWN.
348 - If the driver detects the topology is loop, non-fabric, or the
349 FLOGI did not support NPIV - return VPCERR_NO_FABRIC_SUPP.
350 - Allocate data structures. If errors are encountered, such as out
351 of memory conditions, return the respective negative Exxx error code.
352 - If the role is FCP Initiator, the LLDD is to :
353 - Call scsi_host_alloc() to allocate a scsi_host for the vport.
354 - Call scsi_add_host(new_shost, &vport->dev) to start the scsi_host
355 and bind it as a child of the vport device.
356 - Initializes the fc_host attribute values.
357 - Kick of further vport state transitions based on the disable flag and
358 link state - and return success (zero).
359
360 LLDD Implementers Notes:
361 - It is suggested that there be a different fc_function_templates for
362 the physical port and the virtual port. The physical port's template
363 would have the vport_create, vport_delete, and vport_disable functions,
364 while the vports would not.
365 - It is suggested that there be different scsi_host_templates
366 for the physical port and virtual port. Likely, there are driver
367 attributes, embedded into the scsi_host_template, that are applicable
368 for the physical port only (link speed, topology setting, etc). This
369 ensures that the attributes are applicable to the respective scsi_host.
370
371
372Vport Disable/Enable:
373
374 The LLDD vport_disable() syntax is:
375
376 int vport_disable(struct fc_vport *vport, bool disable)
377
378 where:
379 vport: Is vport to to be enabled or disabled
380 disable: If "true", the vport is to be disabled.
381 If "false", the vport is to be enabled.
382
383 When a request is made to change the disabled state on a vport, the
384 transport will validate the request against the existing vport state.
385 If the request is to disable and the vport is already disabled, the
386 request will fail. Similarly, if the request is to enable, and the
387 vport is not in a disabled state, the request will fail. If the request
388 is valid for the vport state, the transport will call the LLDD to
389 change the vport's state.
390
391 Within the LLDD, if a vport is disabled, it remains instantiated with
392 the kernel and LLDD, but it is not active or visible on the FC link in
393 any way. (see Vport Creation and the 2 part instantiation discussion).
394 The vport will remain in this state until it is deleted or re-enabled.
395 When enabling a vport, the LLDD reinstantiates the vport on the FC
396 link - essentially restarting the LLDD statemachine (see Vport States
397 above).
398
399
400Vport Deletion:
401
402 The LLDD vport_delete() syntax is:
403
404 int vport_delete(struct fc_vport *vport)
405
406 where:
407 vport: Is vport to delete
408
409 When a request is made to delete a vport (via sgio/netlink, or via the
410 fc_host or fc_vport vport_delete attributes), the transport will call
411 the LLDD to terminate the vport on the FC link, and teardown all other
412 datastructures and references. If the LLDD completes successfully,
413 the transport will teardown the vport objects and complete the vport
414 removal. If the LLDD delete request fails, the vport object will remain,
415 but will be in an indeterminate state.
416
417 Within the LLDD, the normal code paths for a scsi_host teardown should
418 be followed. E.g. If the vport has a FCP Initiator role, the LLDD
419 will call fc_remove_host() for the vports scsi_host, followed by
420 scsi_remove_host() and scsi_host_put() for the vports scsi_host.
421
422
423Other:
424 fc_host port_type attribute:
425 There is a new fc_host port_type value - FC_PORTTYPE_NPIV. This value
426 must be set on all vport-based fc_hosts. Normally, on a physical port,
427 the port_type attribute would be set to NPORT, NLPORT, etc based on the
428 topology type and existence of the fabric. As this is not applicable to
429 a vport, it makes more sense to report the FC mechanism used to create
430 the vport.
431
432 Driver unload:
433 FC drivers are required to call fc_remove_host() prior to calling
434 scsi_remove_host(). This allows the fc_host to tear down all remote
435 ports prior the scsi_host being torn down. The fc_remove_host() call
436 was updated to remove all vports for the fc_host as well.
437
438
439Credits
440=======
441The following people have contributed to this document:
442
443
444
445
446
447
448James Smart
449james.smart@emulex.com
450
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index 90961a8ea895..4aca7ddfdddf 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -555,7 +555,6 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
555{ 555{
556 struct scsi_cmnd *cmd; 556 struct scsi_cmnd *cmd;
557 ctlr_info_t *ctlr; 557 ctlr_info_t *ctlr;
558 u64bit addr64;
559 ErrorInfo_struct *ei; 558 ErrorInfo_struct *ei;
560 559
561 ei = cp->err_info; 560 ei = cp->err_info;
@@ -569,20 +568,7 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
569 cmd = (struct scsi_cmnd *) cp->scsi_cmd; 568 cmd = (struct scsi_cmnd *) cp->scsi_cmd;
570 ctlr = hba[cp->ctlr]; 569 ctlr = hba[cp->ctlr];
571 570
572 /* undo the DMA mappings */ 571 scsi_dma_unmap(cmd);
573
574 if (cmd->use_sg) {
575 pci_unmap_sg(ctlr->pdev,
576 cmd->request_buffer, cmd->use_sg,
577 cmd->sc_data_direction);
578 }
579 else if (cmd->request_bufflen) {
580 addr64.val32.lower = cp->SG[0].Addr.lower;
581 addr64.val32.upper = cp->SG[0].Addr.upper;
582 pci_unmap_single(ctlr->pdev, (dma_addr_t) addr64.val,
583 cmd->request_bufflen,
584 cmd->sc_data_direction);
585 }
586 572
587 cmd->result = (DID_OK << 16); /* host byte */ 573 cmd->result = (DID_OK << 16); /* host byte */
588 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ 574 cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */
@@ -597,7 +583,7 @@ complete_scsi_command( CommandList_struct *cp, int timeout, __u32 tag)
597 ei->SenseLen > SCSI_SENSE_BUFFERSIZE ? 583 ei->SenseLen > SCSI_SENSE_BUFFERSIZE ?
598 SCSI_SENSE_BUFFERSIZE : 584 SCSI_SENSE_BUFFERSIZE :
599 ei->SenseLen); 585 ei->SenseLen);
600 cmd->resid = ei->ResidualCnt; 586 scsi_set_resid(cmd, ei->ResidualCnt);
601 587
602 if(ei->CommandStatus != 0) 588 if(ei->CommandStatus != 0)
603 { /* an error has occurred */ 589 { /* an error has occurred */
@@ -1204,46 +1190,29 @@ cciss_scatter_gather(struct pci_dev *pdev,
1204 CommandList_struct *cp, 1190 CommandList_struct *cp,
1205 struct scsi_cmnd *cmd) 1191 struct scsi_cmnd *cmd)
1206{ 1192{
1207 unsigned int use_sg, nsegs=0, len; 1193 unsigned int len;
1208 struct scatterlist *scatter = (struct scatterlist *) cmd->request_buffer; 1194 struct scatterlist *sg;
1209 __u64 addr64; 1195 __u64 addr64;
1210 1196 int use_sg, i;
1211 /* is it just one virtual address? */ 1197
1212 if (!cmd->use_sg) { 1198 BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
1213 if (cmd->request_bufflen) { /* anything to xfer? */ 1199
1214 1200 use_sg = scsi_dma_map(cmd);
1215 addr64 = (__u64) pci_map_single(pdev, 1201 if (use_sg) { /* not too many addrs? */
1216 cmd->request_buffer, 1202 scsi_for_each_sg(cmd, sg, use_sg, i) {
1217 cmd->request_bufflen, 1203 addr64 = (__u64) sg_dma_address(sg);
1218 cmd->sc_data_direction); 1204 len = sg_dma_len(sg);
1219 1205 cp->SG[i].Addr.lower =
1220 cp->SG[0].Addr.lower = 1206 (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
1221 (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF); 1207 cp->SG[i].Addr.upper =
1222 cp->SG[0].Addr.upper = 1208 (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
1223 (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF); 1209 cp->SG[i].Len = len;
1224 cp->SG[0].Len = cmd->request_bufflen; 1210 cp->SG[i].Ext = 0; // we are not chaining
1225 nsegs=1;
1226 }
1227 } /* else, must be a list of virtual addresses.... */
1228 else if (cmd->use_sg <= MAXSGENTRIES) { /* not too many addrs? */
1229
1230 use_sg = pci_map_sg(pdev, cmd->request_buffer, cmd->use_sg,
1231 cmd->sc_data_direction);
1232
1233 for (nsegs=0; nsegs < use_sg; nsegs++) {
1234 addr64 = (__u64) sg_dma_address(&scatter[nsegs]);
1235 len = sg_dma_len(&scatter[nsegs]);
1236 cp->SG[nsegs].Addr.lower =
1237 (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF);
1238 cp->SG[nsegs].Addr.upper =
1239 (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF);
1240 cp->SG[nsegs].Len = len;
1241 cp->SG[nsegs].Ext = 0; // we are not chaining
1242 } 1211 }
1243 } else BUG(); 1212 }
1244 1213
1245 cp->Header.SGList = (__u8) nsegs; /* no. SGs contig in this cmd */ 1214 cp->Header.SGList = (__u8) use_sg; /* no. SGs contig in this cmd */
1246 cp->Header.SGTotal = (__u16) nsegs; /* total sgs in this cmd list */ 1215 cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */
1247 return; 1216 return;
1248} 1217}
1249 1218
diff --git a/drivers/ieee1394/sbp2.c b/drivers/ieee1394/sbp2.c
index 875eadd5e8f5..ce86ff226a28 100644
--- a/drivers/ieee1394/sbp2.c
+++ b/drivers/ieee1394/sbp2.c
@@ -1489,69 +1489,6 @@ static void sbp2_prep_command_orb_sg(struct sbp2_command_orb *orb,
1489 } 1489 }
1490} 1490}
1491 1491
1492static void sbp2_prep_command_orb_no_sg(struct sbp2_command_orb *orb,
1493 struct sbp2_fwhost_info *hi,
1494 struct sbp2_command_info *cmd,
1495 struct scatterlist *sgpnt,
1496 u32 orb_direction,
1497 unsigned int scsi_request_bufflen,
1498 void *scsi_request_buffer,
1499 enum dma_data_direction dma_dir)
1500{
1501 cmd->dma_dir = dma_dir;
1502 cmd->dma_size = scsi_request_bufflen;
1503 cmd->dma_type = CMD_DMA_SINGLE;
1504 cmd->cmd_dma = dma_map_single(hi->host->device.parent,
1505 scsi_request_buffer,
1506 cmd->dma_size, cmd->dma_dir);
1507 orb->data_descriptor_hi = ORB_SET_NODE_ID(hi->host->node_id);
1508 orb->misc |= ORB_SET_DIRECTION(orb_direction);
1509
1510 /* handle case where we get a command w/o s/g enabled
1511 * (but check for transfers larger than 64K) */
1512 if (scsi_request_bufflen <= SBP2_MAX_SG_ELEMENT_LENGTH) {
1513
1514 orb->data_descriptor_lo = cmd->cmd_dma;
1515 orb->misc |= ORB_SET_DATA_SIZE(scsi_request_bufflen);
1516
1517 } else {
1518 /* The buffer is too large. Turn this into page tables. */
1519
1520 struct sbp2_unrestricted_page_table *sg_element =
1521 &cmd->scatter_gather_element[0];
1522 u32 sg_count, sg_len;
1523 dma_addr_t sg_addr;
1524
1525 orb->data_descriptor_lo = cmd->sge_dma;
1526 orb->misc |= ORB_SET_PAGE_TABLE_PRESENT(0x1);
1527
1528 /* fill out our SBP-2 page tables; split up the large buffer */
1529 sg_count = 0;
1530 sg_len = scsi_request_bufflen;
1531 sg_addr = cmd->cmd_dma;
1532 while (sg_len) {
1533 sg_element[sg_count].segment_base_lo = sg_addr;
1534 if (sg_len > SBP2_MAX_SG_ELEMENT_LENGTH) {
1535 sg_element[sg_count].length_segment_base_hi =
1536 PAGE_TABLE_SET_SEGMENT_LENGTH(SBP2_MAX_SG_ELEMENT_LENGTH);
1537 sg_addr += SBP2_MAX_SG_ELEMENT_LENGTH;
1538 sg_len -= SBP2_MAX_SG_ELEMENT_LENGTH;
1539 } else {
1540 sg_element[sg_count].length_segment_base_hi =
1541 PAGE_TABLE_SET_SEGMENT_LENGTH(sg_len);
1542 sg_len = 0;
1543 }
1544 sg_count++;
1545 }
1546
1547 orb->misc |= ORB_SET_DATA_SIZE(sg_count);
1548
1549 sbp2util_cpu_to_be32_buffer(sg_element,
1550 (sizeof(struct sbp2_unrestricted_page_table)) *
1551 sg_count);
1552 }
1553}
1554
1555static void sbp2_create_command_orb(struct sbp2_lu *lu, 1492static void sbp2_create_command_orb(struct sbp2_lu *lu,
1556 struct sbp2_command_info *cmd, 1493 struct sbp2_command_info *cmd,
1557 unchar *scsi_cmd, 1494 unchar *scsi_cmd,
@@ -1595,13 +1532,9 @@ static void sbp2_create_command_orb(struct sbp2_lu *lu,
1595 orb->data_descriptor_hi = 0x0; 1532 orb->data_descriptor_hi = 0x0;
1596 orb->data_descriptor_lo = 0x0; 1533 orb->data_descriptor_lo = 0x0;
1597 orb->misc |= ORB_SET_DIRECTION(1); 1534 orb->misc |= ORB_SET_DIRECTION(1);
1598 } else if (scsi_use_sg) 1535 } else
1599 sbp2_prep_command_orb_sg(orb, hi, cmd, scsi_use_sg, sgpnt, 1536 sbp2_prep_command_orb_sg(orb, hi, cmd, scsi_use_sg, sgpnt,
1600 orb_direction, dma_dir); 1537 orb_direction, dma_dir);
1601 else
1602 sbp2_prep_command_orb_no_sg(orb, hi, cmd, sgpnt, orb_direction,
1603 scsi_request_bufflen,
1604 scsi_request_buffer, dma_dir);
1605 1538
1606 sbp2util_cpu_to_be32_buffer(orb, sizeof(*orb)); 1539 sbp2util_cpu_to_be32_buffer(orb, sizeof(*orb));
1607 1540
@@ -1690,15 +1623,15 @@ static int sbp2_send_command(struct sbp2_lu *lu, struct scsi_cmnd *SCpnt,
1690 void (*done)(struct scsi_cmnd *)) 1623 void (*done)(struct scsi_cmnd *))
1691{ 1624{
1692 unchar *scsi_cmd = (unchar *)SCpnt->cmnd; 1625 unchar *scsi_cmd = (unchar *)SCpnt->cmnd;
1693 unsigned int request_bufflen = SCpnt->request_bufflen; 1626 unsigned int request_bufflen = scsi_bufflen(SCpnt);
1694 struct sbp2_command_info *cmd; 1627 struct sbp2_command_info *cmd;
1695 1628
1696 cmd = sbp2util_allocate_command_orb(lu, SCpnt, done); 1629 cmd = sbp2util_allocate_command_orb(lu, SCpnt, done);
1697 if (!cmd) 1630 if (!cmd)
1698 return -EIO; 1631 return -EIO;
1699 1632
1700 sbp2_create_command_orb(lu, cmd, scsi_cmd, SCpnt->use_sg, 1633 sbp2_create_command_orb(lu, cmd, scsi_cmd, scsi_sg_count(SCpnt),
1701 request_bufflen, SCpnt->request_buffer, 1634 request_bufflen, scsi_sglist(SCpnt),
1702 SCpnt->sc_data_direction); 1635 SCpnt->sc_data_direction);
1703 sbp2_link_orb_command(lu, cmd); 1636 sbp2_link_orb_command(lu, cmd);
1704 1637
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 3bd94f11e7d6..bc740a6dd93c 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -260,30 +260,13 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
260 /* Map the data portion, if any. 260 /* Map the data portion, if any.
261 * sges_left = 0 if no data transfer. 261 * sges_left = 0 if no data transfer.
262 */ 262 */
263 if ( (sges_left = SCpnt->use_sg) ) { 263 sges_left = scsi_dma_map(SCpnt);
264 sges_left = pci_map_sg(ioc->pcidev, 264 if (sges_left < 0)
265 (struct scatterlist *) SCpnt->request_buffer, 265 return FAILED;
266 SCpnt->use_sg,
267 SCpnt->sc_data_direction);
268 if (sges_left == 0)
269 return FAILED;
270 } else if (SCpnt->request_bufflen) {
271 SCpnt->SCp.dma_handle = pci_map_single(ioc->pcidev,
272 SCpnt->request_buffer,
273 SCpnt->request_bufflen,
274 SCpnt->sc_data_direction);
275 dsgprintk((MYIOC_s_INFO_FMT "SG: non-SG for %p, len=%d\n",
276 ioc->name, SCpnt, SCpnt->request_bufflen));
277 mptscsih_add_sge((char *) &pReq->SGL,
278 0xD1000000|MPT_SGE_FLAGS_ADDRESSING|sgdir|SCpnt->request_bufflen,
279 SCpnt->SCp.dma_handle);
280
281 return SUCCESS;
282 }
283 266
284 /* Handle the SG case. 267 /* Handle the SG case.
285 */ 268 */
286 sg = (struct scatterlist *) SCpnt->request_buffer; 269 sg = scsi_sglist(SCpnt);
287 sg_done = 0; 270 sg_done = 0;
288 sgeOffset = sizeof(SCSIIORequest_t) - sizeof(SGE_IO_UNION); 271 sgeOffset = sizeof(SCSIIORequest_t) - sizeof(SGE_IO_UNION);
289 chainSge = NULL; 272 chainSge = NULL;
@@ -662,7 +645,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
662 scsi_state = pScsiReply->SCSIState; 645 scsi_state = pScsiReply->SCSIState;
663 scsi_status = pScsiReply->SCSIStatus; 646 scsi_status = pScsiReply->SCSIStatus;
664 xfer_cnt = le32_to_cpu(pScsiReply->TransferCount); 647 xfer_cnt = le32_to_cpu(pScsiReply->TransferCount);
665 sc->resid = sc->request_bufflen - xfer_cnt; 648 scsi_set_resid(sc, scsi_bufflen(sc) - xfer_cnt);
666 log_info = le32_to_cpu(pScsiReply->IOCLogInfo); 649 log_info = le32_to_cpu(pScsiReply->IOCLogInfo);
667 650
668 /* 651 /*
@@ -767,7 +750,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
767 break; 750 break;
768 751
769 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */ 752 case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
770 sc->resid = sc->request_bufflen - xfer_cnt; 753 scsi_set_resid(sc, scsi_bufflen(sc) - xfer_cnt);
771 if((xfer_cnt==0)||(sc->underflow > xfer_cnt)) 754 if((xfer_cnt==0)||(sc->underflow > xfer_cnt))
772 sc->result=DID_SOFT_ERROR << 16; 755 sc->result=DID_SOFT_ERROR << 16;
773 else /* Sufficient data transfer occurred */ 756 else /* Sufficient data transfer occurred */
@@ -816,7 +799,7 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
816 break; 799 break;
817 800
818 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: /* 0x0044 */ 801 case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: /* 0x0044 */
819 sc->resid=0; 802 scsi_set_resid(sc, 0);
820 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */ 803 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
821 case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */ 804 case MPI_IOCSTATUS_SUCCESS: /* 0x0000 */
822 sc->result = (DID_OK << 16) | scsi_status; 805 sc->result = (DID_OK << 16) | scsi_status;
@@ -899,23 +882,18 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
899 scsi_state, scsi_status, log_info)); 882 scsi_state, scsi_status, log_info));
900 883
901 dreplyprintk(("%s: [%d:%d:%d:%d] resid=%d " 884 dreplyprintk(("%s: [%d:%d:%d:%d] resid=%d "
902 "bufflen=%d xfer_cnt=%d\n", __FUNCTION__, 885 "bufflen=%d xfer_cnt=%d\n", __FUNCTION__,
903 sc->device->host->host_no, sc->device->channel, sc->device->id, 886 sc->device->host->host_no,
904 sc->device->lun, sc->resid, sc->request_bufflen, 887 sc->device->channel, sc->device->id,
905 xfer_cnt)); 888 sc->device->lun, scsi_get_resid(sc),
889 scsi_bufflen(sc), xfer_cnt));
906 } 890 }
907#endif 891#endif
908 892
909 } /* end of address reply case */ 893 } /* end of address reply case */
910 894
911 /* Unmap the DMA buffers, if any. */ 895 /* Unmap the DMA buffers, if any. */
912 if (sc->use_sg) { 896 scsi_dma_unmap(sc);
913 pci_unmap_sg(ioc->pcidev, (struct scatterlist *) sc->request_buffer,
914 sc->use_sg, sc->sc_data_direction);
915 } else if (sc->request_bufflen) {
916 pci_unmap_single(ioc->pcidev, sc->SCp.dma_handle,
917 sc->request_bufflen, sc->sc_data_direction);
918 }
919 897
920 sc->scsi_done(sc); /* Issue the command callback */ 898 sc->scsi_done(sc); /* Issue the command callback */
921 899
@@ -970,17 +948,8 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
970 /* Set status, free OS resources (SG DMA buffers) 948 /* Set status, free OS resources (SG DMA buffers)
971 * Do OS callback 949 * Do OS callback
972 */ 950 */
973 if (SCpnt->use_sg) { 951 scsi_dma_unmap(SCpnt);
974 pci_unmap_sg(ioc->pcidev, 952
975 (struct scatterlist *) SCpnt->request_buffer,
976 SCpnt->use_sg,
977 SCpnt->sc_data_direction);
978 } else if (SCpnt->request_bufflen) {
979 pci_unmap_single(ioc->pcidev,
980 SCpnt->SCp.dma_handle,
981 SCpnt->request_bufflen,
982 SCpnt->sc_data_direction);
983 }
984 SCpnt->result = DID_RESET << 16; 953 SCpnt->result = DID_RESET << 16;
985 SCpnt->host_scribble = NULL; 954 SCpnt->host_scribble = NULL;
986 955
@@ -1039,17 +1008,8 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
1039 mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf); 1008 mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf);
1040 if ((unsigned char *)mf != sc->host_scribble) 1009 if ((unsigned char *)mf != sc->host_scribble)
1041 continue; 1010 continue;
1042 if (sc->use_sg) { 1011 scsi_dma_unmap(sc);
1043 pci_unmap_sg(hd->ioc->pcidev, 1012
1044 (struct scatterlist *) sc->request_buffer,
1045 sc->use_sg,
1046 sc->sc_data_direction);
1047 } else if (sc->request_bufflen) {
1048 pci_unmap_single(hd->ioc->pcidev,
1049 sc->SCp.dma_handle,
1050 sc->request_bufflen,
1051 sc->sc_data_direction);
1052 }
1053 sc->host_scribble = NULL; 1013 sc->host_scribble = NULL;
1054 sc->result = DID_NO_CONNECT << 16; 1014 sc->result = DID_NO_CONNECT << 16;
1055 sc->scsi_done(sc); 1015 sc->scsi_done(sc);
@@ -1380,10 +1340,10 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1380 * will be no data transfer! GRRRRR... 1340 * will be no data transfer! GRRRRR...
1381 */ 1341 */
1382 if (SCpnt->sc_data_direction == DMA_FROM_DEVICE) { 1342 if (SCpnt->sc_data_direction == DMA_FROM_DEVICE) {
1383 datalen = SCpnt->request_bufflen; 1343 datalen = scsi_bufflen(SCpnt);
1384 scsidir = MPI_SCSIIO_CONTROL_READ; /* DATA IN (host<--ioc<--dev) */ 1344 scsidir = MPI_SCSIIO_CONTROL_READ; /* DATA IN (host<--ioc<--dev) */
1385 } else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) { 1345 } else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) {
1386 datalen = SCpnt->request_bufflen; 1346 datalen = scsi_bufflen(SCpnt);
1387 scsidir = MPI_SCSIIO_CONTROL_WRITE; /* DATA OUT (host-->ioc-->dev) */ 1347 scsidir = MPI_SCSIIO_CONTROL_WRITE; /* DATA OUT (host-->ioc-->dev) */
1388 } else { 1348 } else {
1389 datalen = 0; 1349 datalen = 0;
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index eb766c3af1c8..113aaed490d4 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -1306,22 +1306,26 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1306 wake_up(&tw_dev->ioctl_wqueue); 1306 wake_up(&tw_dev->ioctl_wqueue);
1307 } 1307 }
1308 } else { 1308 } else {
1309 struct scsi_cmnd *cmd;
1310
1311 cmd = tw_dev->srb[request_id];
1312
1309 twa_scsiop_execute_scsi_complete(tw_dev, request_id); 1313 twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1310 /* If no error command was a success */ 1314 /* If no error command was a success */
1311 if (error == 0) { 1315 if (error == 0) {
1312 tw_dev->srb[request_id]->result = (DID_OK << 16); 1316 cmd->result = (DID_OK << 16);
1313 } 1317 }
1314 1318
1315 /* If error, command failed */ 1319 /* If error, command failed */
1316 if (error == 1) { 1320 if (error == 1) {
1317 /* Ask for a host reset */ 1321 /* Ask for a host reset */
1318 tw_dev->srb[request_id]->result = (DID_OK << 16) | (CHECK_CONDITION << 1); 1322 cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1319 } 1323 }
1320 1324
1321 /* Report residual bytes for single sgl */ 1325 /* Report residual bytes for single sgl */
1322 if ((tw_dev->srb[request_id]->use_sg <= 1) && (full_command_packet->command.newcommand.status == 0)) { 1326 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1323 if (full_command_packet->command.newcommand.sg_list[0].length < tw_dev->srb[request_id]->request_bufflen) 1327 if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1324 tw_dev->srb[request_id]->resid = tw_dev->srb[request_id]->request_bufflen - full_command_packet->command.newcommand.sg_list[0].length; 1328 scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
1325 } 1329 }
1326 1330
1327 /* Now complete the io */ 1331 /* Now complete the io */
@@ -1384,52 +1388,20 @@ static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
1384{ 1388{
1385 int use_sg; 1389 int use_sg;
1386 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1390 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1387 struct pci_dev *pdev = tw_dev->tw_pci_dev;
1388 int retval = 0;
1389
1390 if (cmd->use_sg == 0)
1391 goto out;
1392
1393 use_sg = pci_map_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL);
1394 1391
1395 if (use_sg == 0) { 1392 use_sg = scsi_dma_map(cmd);
1393 if (!use_sg)
1394 return 0;
1395 else if (use_sg < 0) {
1396 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list"); 1396 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
1397 goto out; 1397 return 0;
1398 } 1398 }
1399 1399
1400 cmd->SCp.phase = TW_PHASE_SGLIST; 1400 cmd->SCp.phase = TW_PHASE_SGLIST;
1401 cmd->SCp.have_data_in = use_sg; 1401 cmd->SCp.have_data_in = use_sg;
1402 retval = use_sg;
1403out:
1404 return retval;
1405} /* End twa_map_scsi_sg_data() */
1406 1402
1407/* This function will perform a pci-dma map for a single buffer */ 1403 return use_sg;
1408static dma_addr_t twa_map_scsi_single_data(TW_Device_Extension *tw_dev, int request_id) 1404} /* End twa_map_scsi_sg_data() */
1409{
1410 dma_addr_t mapping;
1411 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1412 struct pci_dev *pdev = tw_dev->tw_pci_dev;
1413 dma_addr_t retval = 0;
1414
1415 if (cmd->request_bufflen == 0) {
1416 retval = 0;
1417 goto out;
1418 }
1419
1420 mapping = pci_map_single(pdev, cmd->request_buffer, cmd->request_bufflen, DMA_BIDIRECTIONAL);
1421
1422 if (mapping == 0) {
1423 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1d, "Failed to map page");
1424 goto out;
1425 }
1426
1427 cmd->SCp.phase = TW_PHASE_SINGLE;
1428 cmd->SCp.have_data_in = mapping;
1429 retval = mapping;
1430out:
1431 return retval;
1432} /* End twa_map_scsi_single_data() */
1433 1405
1434/* This function will poll for a response interrupt of a request */ 1406/* This function will poll for a response interrupt of a request */
1435static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds) 1407static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
@@ -1815,15 +1787,13 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1815 u32 num_sectors = 0x0; 1787 u32 num_sectors = 0x0;
1816 int i, sg_count; 1788 int i, sg_count;
1817 struct scsi_cmnd *srb = NULL; 1789 struct scsi_cmnd *srb = NULL;
1818 struct scatterlist *sglist = NULL; 1790 struct scatterlist *sglist = NULL, *sg;
1819 dma_addr_t buffaddr = 0x0;
1820 int retval = 1; 1791 int retval = 1;
1821 1792
1822 if (tw_dev->srb[request_id]) { 1793 if (tw_dev->srb[request_id]) {
1823 if (tw_dev->srb[request_id]->request_buffer) {
1824 sglist = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
1825 }
1826 srb = tw_dev->srb[request_id]; 1794 srb = tw_dev->srb[request_id];
1795 if (scsi_sglist(srb))
1796 sglist = scsi_sglist(srb);
1827 } 1797 }
1828 1798
1829 /* Initialize command packet */ 1799 /* Initialize command packet */
@@ -1856,32 +1826,12 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1856 1826
1857 if (!sglistarg) { 1827 if (!sglistarg) {
1858 /* Map sglist from scsi layer to cmd packet */ 1828 /* Map sglist from scsi layer to cmd packet */
1859 if (tw_dev->srb[request_id]->use_sg == 0) {
1860 if (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH) {
1861 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1862 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1863 if (tw_dev->srb[request_id]->sc_data_direction == DMA_TO_DEVICE || tw_dev->srb[request_id]->sc_data_direction == DMA_BIDIRECTIONAL)
1864 memcpy(tw_dev->generic_buffer_virt[request_id], tw_dev->srb[request_id]->request_buffer, tw_dev->srb[request_id]->request_bufflen);
1865 } else {
1866 buffaddr = twa_map_scsi_single_data(tw_dev, request_id);
1867 if (buffaddr == 0)
1868 goto out;
1869
1870 command_packet->sg_list[0].address = TW_CPU_TO_SGL(buffaddr);
1871 command_packet->sg_list[0].length = cpu_to_le32(tw_dev->srb[request_id]->request_bufflen);
1872 }
1873 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), 1));
1874
1875 if (command_packet->sg_list[0].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1876 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2d, "Found unaligned address during execute scsi");
1877 goto out;
1878 }
1879 }
1880 1829
1881 if (tw_dev->srb[request_id]->use_sg > 0) { 1830 if (scsi_sg_count(srb)) {
1882 if ((tw_dev->srb[request_id]->use_sg == 1) && (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH)) { 1831 if ((scsi_sg_count(srb) == 1) &&
1883 if (tw_dev->srb[request_id]->sc_data_direction == DMA_TO_DEVICE || tw_dev->srb[request_id]->sc_data_direction == DMA_BIDIRECTIONAL) { 1832 (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
1884 struct scatterlist *sg = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer; 1833 if (srb->sc_data_direction == DMA_TO_DEVICE || srb->sc_data_direction == DMA_BIDIRECTIONAL) {
1834 struct scatterlist *sg = scsi_sglist(srb);
1885 char *buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 1835 char *buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1886 memcpy(tw_dev->generic_buffer_virt[request_id], buf, sg->length); 1836 memcpy(tw_dev->generic_buffer_virt[request_id], buf, sg->length);
1887 kunmap_atomic(buf - sg->offset, KM_IRQ0); 1837 kunmap_atomic(buf - sg->offset, KM_IRQ0);
@@ -1893,16 +1843,16 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1893 if (sg_count == 0) 1843 if (sg_count == 0)
1894 goto out; 1844 goto out;
1895 1845
1896 for (i = 0; i < sg_count; i++) { 1846 scsi_for_each_sg(srb, sg, sg_count, i) {
1897 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(&sglist[i])); 1847 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1898 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(&sglist[i])); 1848 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1899 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) { 1849 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1900 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi"); 1850 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1901 goto out; 1851 goto out;
1902 } 1852 }
1903 } 1853 }
1904 } 1854 }
1905 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), tw_dev->srb[request_id]->use_sg)); 1855 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1906 } 1856 }
1907 } else { 1857 } else {
1908 /* Internal cdb post */ 1858 /* Internal cdb post */
@@ -1932,7 +1882,7 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1932 1882
1933 /* Update SG statistics */ 1883 /* Update SG statistics */
1934 if (srb) { 1884 if (srb) {
1935 tw_dev->sgl_entries = tw_dev->srb[request_id]->use_sg; 1885 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1936 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries) 1886 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1937 tw_dev->max_sgl_entries = tw_dev->sgl_entries; 1887 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1938 } 1888 }
@@ -1951,16 +1901,19 @@ out:
1951/* This function completes an execute scsi operation */ 1901/* This function completes an execute scsi operation */
1952static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id) 1902static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1953{ 1903{
1954 if (tw_dev->srb[request_id]->request_bufflen < TW_MIN_SGL_LENGTH && 1904 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1955 (tw_dev->srb[request_id]->sc_data_direction == DMA_FROM_DEVICE || 1905 int use_sg = scsi_sg_count(cmd);
1956 tw_dev->srb[request_id]->sc_data_direction == DMA_BIDIRECTIONAL)) { 1906
1957 if (tw_dev->srb[request_id]->use_sg == 0) { 1907 if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
1958 memcpy(tw_dev->srb[request_id]->request_buffer, 1908 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1909 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1910 if (!use_sg)
1911 memcpy(scsi_sglist(cmd),
1959 tw_dev->generic_buffer_virt[request_id], 1912 tw_dev->generic_buffer_virt[request_id],
1960 tw_dev->srb[request_id]->request_bufflen); 1913 scsi_bufflen(cmd));
1961 } 1914
1962 if (tw_dev->srb[request_id]->use_sg == 1) { 1915 if (use_sg == 1) {
1963 struct scatterlist *sg = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer; 1916 struct scatterlist *sg = scsi_sglist(tw_dev->srb[request_id]);
1964 char *buf; 1917 char *buf;
1965 unsigned long flags = 0; 1918 unsigned long flags = 0;
1966 local_irq_save(flags); 1919 local_irq_save(flags);
@@ -2017,16 +1970,8 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code)
2017static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id) 1970static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
2018{ 1971{
2019 struct scsi_cmnd *cmd = tw_dev->srb[request_id]; 1972 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
2020 struct pci_dev *pdev = tw_dev->tw_pci_dev;
2021 1973
2022 switch(cmd->SCp.phase) { 1974 scsi_dma_unmap(cmd);
2023 case TW_PHASE_SINGLE:
2024 pci_unmap_single(pdev, cmd->SCp.have_data_in, cmd->request_bufflen, DMA_BIDIRECTIONAL);
2025 break;
2026 case TW_PHASE_SGLIST:
2027 pci_unmap_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL);
2028 break;
2029 }
2030} /* End twa_unmap_scsi_data() */ 1975} /* End twa_unmap_scsi_data() */
2031 1976
2032/* scsi_host_template initializer */ 1977/* scsi_host_template initializer */
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index 656bdb1352d8..c7995fc216e8 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1273,57 +1273,24 @@ static int tw_map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
1273 int use_sg; 1273 int use_sg;
1274 1274
1275 dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n"); 1275 dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n");
1276
1277 if (cmd->use_sg == 0)
1278 return 0;
1279 1276
1280 use_sg = pci_map_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL); 1277 use_sg = scsi_dma_map(cmd);
1281 1278 if (use_sg < 0) {
1282 if (use_sg == 0) {
1283 printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n"); 1279 printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n");
1284 return 0; 1280 return 0;
1285 } 1281 }
1286 1282
1287 cmd->SCp.phase = TW_PHASE_SGLIST; 1283 cmd->SCp.phase = TW_PHASE_SGLIST;
1288 cmd->SCp.have_data_in = use_sg; 1284 cmd->SCp.have_data_in = use_sg;
1289 1285
1290 return use_sg; 1286 return use_sg;
1291} /* End tw_map_scsi_sg_data() */ 1287} /* End tw_map_scsi_sg_data() */
1292 1288
1293static u32 tw_map_scsi_single_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
1294{
1295 dma_addr_t mapping;
1296
1297 dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_single_data()\n");
1298
1299 if (cmd->request_bufflen == 0)
1300 return 0;
1301
1302 mapping = pci_map_page(pdev, virt_to_page(cmd->request_buffer), offset_in_page(cmd->request_buffer), cmd->request_bufflen, DMA_BIDIRECTIONAL);
1303
1304 if (mapping == 0) {
1305 printk(KERN_WARNING "3w-xxxx: tw_map_scsi_single_data(): pci_map_page() failed.\n");
1306 return 0;
1307 }
1308
1309 cmd->SCp.phase = TW_PHASE_SINGLE;
1310 cmd->SCp.have_data_in = mapping;
1311
1312 return mapping;
1313} /* End tw_map_scsi_single_data() */
1314
1315static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd) 1289static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
1316{ 1290{
1317 dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n"); 1291 dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
1318 1292
1319 switch(cmd->SCp.phase) { 1293 scsi_dma_unmap(cmd);
1320 case TW_PHASE_SINGLE:
1321 pci_unmap_page(pdev, cmd->SCp.have_data_in, cmd->request_bufflen, DMA_BIDIRECTIONAL);
1322 break;
1323 case TW_PHASE_SGLIST:
1324 pci_unmap_sg(pdev, cmd->request_buffer, cmd->use_sg, DMA_BIDIRECTIONAL);
1325 break;
1326 }
1327} /* End tw_unmap_scsi_data() */ 1294} /* End tw_unmap_scsi_data() */
1328 1295
1329/* This function will reset a device extension */ 1296/* This function will reset a device extension */
@@ -1499,27 +1466,16 @@ static void tw_transfer_internal(TW_Device_Extension *tw_dev, int request_id,
1499 void *buf; 1466 void *buf;
1500 unsigned int transfer_len; 1467 unsigned int transfer_len;
1501 unsigned long flags = 0; 1468 unsigned long flags = 0;
1469 struct scatterlist *sg = scsi_sglist(cmd);
1502 1470
1503 if (cmd->use_sg) { 1471 local_irq_save(flags);
1504 struct scatterlist *sg = 1472 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1505 (struct scatterlist *)cmd->request_buffer; 1473 transfer_len = min(sg->length, len);
1506 local_irq_save(flags);
1507 buf = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1508 transfer_len = min(sg->length, len);
1509 } else {
1510 buf = cmd->request_buffer;
1511 transfer_len = min(cmd->request_bufflen, len);
1512 }
1513 1474
1514 memcpy(buf, data, transfer_len); 1475 memcpy(buf, data, transfer_len);
1515
1516 if (cmd->use_sg) {
1517 struct scatterlist *sg;
1518 1476
1519 sg = (struct scatterlist *)cmd->request_buffer; 1477 kunmap_atomic(buf - sg->offset, KM_IRQ0);
1520 kunmap_atomic(buf - sg->offset, KM_IRQ0); 1478 local_irq_restore(flags);
1521 local_irq_restore(flags);
1522 }
1523} 1479}
1524 1480
1525/* This function is called by the isr to complete an inquiry command */ 1481/* This function is called by the isr to complete an inquiry command */
@@ -1764,19 +1720,20 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
1764{ 1720{
1765 TW_Command *command_packet; 1721 TW_Command *command_packet;
1766 unsigned long command_que_value; 1722 unsigned long command_que_value;
1767 u32 lba = 0x0, num_sectors = 0x0, buffaddr = 0x0; 1723 u32 lba = 0x0, num_sectors = 0x0;
1768 int i, use_sg; 1724 int i, use_sg;
1769 struct scsi_cmnd *srb; 1725 struct scsi_cmnd *srb;
1770 struct scatterlist *sglist; 1726 struct scatterlist *sglist, *sg;
1771 1727
1772 dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_write()\n"); 1728 dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_write()\n");
1773 1729
1774 if (tw_dev->srb[request_id]->request_buffer == NULL) { 1730 srb = tw_dev->srb[request_id];
1731
1732 sglist = scsi_sglist(srb);
1733 if (!sglist) {
1775 printk(KERN_WARNING "3w-xxxx: tw_scsiop_read_write(): Request buffer NULL.\n"); 1734 printk(KERN_WARNING "3w-xxxx: tw_scsiop_read_write(): Request buffer NULL.\n");
1776 return 1; 1735 return 1;
1777 } 1736 }
1778 sglist = (struct scatterlist *)tw_dev->srb[request_id]->request_buffer;
1779 srb = tw_dev->srb[request_id];
1780 1737
1781 /* Initialize command packet */ 1738 /* Initialize command packet */
1782 command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id]; 1739 command_packet = (TW_Command *)tw_dev->command_packet_virtual_address[request_id];
@@ -1819,33 +1776,18 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
1819 command_packet->byte8.io.lba = lba; 1776 command_packet->byte8.io.lba = lba;
1820 command_packet->byte6.block_count = num_sectors; 1777 command_packet->byte6.block_count = num_sectors;
1821 1778
1822 /* Do this if there are no sg list entries */ 1779 use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
1823 if (tw_dev->srb[request_id]->use_sg == 0) { 1780 if (!use_sg)
1824 dprintk(KERN_NOTICE "3w-xxxx: tw_scsiop_read_write(): SG = 0\n"); 1781 return 1;
1825 buffaddr = tw_map_scsi_single_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
1826 if (buffaddr == 0)
1827 return 1;
1828 1782
1829 command_packet->byte8.io.sgl[0].address = buffaddr; 1783 scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) {
1830 command_packet->byte8.io.sgl[0].length = tw_dev->srb[request_id]->request_bufflen; 1784 command_packet->byte8.io.sgl[i].address = sg_dma_address(sg);
1785 command_packet->byte8.io.sgl[i].length = sg_dma_len(sg);
1831 command_packet->size+=2; 1786 command_packet->size+=2;
1832 } 1787 }
1833 1788
1834 /* Do this if we have multiple sg list entries */
1835 if (tw_dev->srb[request_id]->use_sg > 0) {
1836 use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
1837 if (use_sg == 0)
1838 return 1;
1839
1840 for (i=0;i<use_sg; i++) {
1841 command_packet->byte8.io.sgl[i].address = sg_dma_address(&sglist[i]);
1842 command_packet->byte8.io.sgl[i].length = sg_dma_len(&sglist[i]);
1843 command_packet->size+=2;
1844 }
1845 }
1846
1847 /* Update SG statistics */ 1789 /* Update SG statistics */
1848 tw_dev->sgl_entries = tw_dev->srb[request_id]->use_sg; 1790 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1849 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries) 1791 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1850 tw_dev->max_sgl_entries = tw_dev->sgl_entries; 1792 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1851 1793
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index cb02656eb54c..405d9d6f9653 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -585,16 +585,8 @@ NCR_700_unmap(struct NCR_700_Host_Parameters *hostdata, struct scsi_cmnd *SCp,
585 struct NCR_700_command_slot *slot) 585 struct NCR_700_command_slot *slot)
586{ 586{
587 if(SCp->sc_data_direction != DMA_NONE && 587 if(SCp->sc_data_direction != DMA_NONE &&
588 SCp->sc_data_direction != DMA_BIDIRECTIONAL) { 588 SCp->sc_data_direction != DMA_BIDIRECTIONAL)
589 if(SCp->use_sg) { 589 scsi_dma_unmap(SCp);
590 dma_unmap_sg(hostdata->dev, SCp->request_buffer,
591 SCp->use_sg, SCp->sc_data_direction);
592 } else {
593 dma_unmap_single(hostdata->dev, slot->dma_handle,
594 SCp->request_bufflen,
595 SCp->sc_data_direction);
596 }
597 }
598} 590}
599 591
600STATIC inline void 592STATIC inline void
@@ -1263,14 +1255,13 @@ process_script_interrupt(__u32 dsps, __u32 dsp, struct scsi_cmnd *SCp,
1263 host->host_no, pun, lun, NCR_700_condition[i], 1255 host->host_no, pun, lun, NCR_700_condition[i],
1264 NCR_700_phase[j], dsp - hostdata->pScript); 1256 NCR_700_phase[j], dsp - hostdata->pScript);
1265 if(SCp != NULL) { 1257 if(SCp != NULL) {
1266 scsi_print_command(SCp); 1258 struct scatterlist *sg;
1267 1259
1268 if(SCp->use_sg) { 1260 scsi_print_command(SCp);
1269 for(i = 0; i < SCp->use_sg + 1; i++) { 1261 scsi_for_each_sg(SCp, sg, scsi_sg_count(SCp) + 1, i) {
1270 printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, ((struct scatterlist *)SCp->request_buffer)[i].length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr); 1262 printk(KERN_INFO " SG[%d].length = %d, move_insn=%08x, addr %08x\n", i, sg->length, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].ins, ((struct NCR_700_command_slot *)SCp->host_scribble)->SG[i].pAddr);
1271 }
1272 } 1263 }
1273 } 1264 }
1274 NCR_700_internal_bus_reset(host); 1265 NCR_700_internal_bus_reset(host);
1275 } else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) { 1266 } else if((dsps & 0xfffff000) == A_DEBUG_INTERRUPT) {
1276 printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n", 1267 printk(KERN_NOTICE "scsi%d (%d:%d) DEBUG INTERRUPT %d AT %08x[%04x], continuing\n",
@@ -1844,8 +1835,8 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1844 } 1835 }
1845 /* sanity check: some of the commands generated by the mid-layer 1836 /* sanity check: some of the commands generated by the mid-layer
1846 * have an eccentric idea of their sc_data_direction */ 1837 * have an eccentric idea of their sc_data_direction */
1847 if(!SCp->use_sg && !SCp->request_bufflen 1838 if(!scsi_sg_count(SCp) && !scsi_bufflen(SCp) &&
1848 && SCp->sc_data_direction != DMA_NONE) { 1839 SCp->sc_data_direction != DMA_NONE) {
1849#ifdef NCR_700_DEBUG 1840#ifdef NCR_700_DEBUG
1850 printk("53c700: Command"); 1841 printk("53c700: Command");
1851 scsi_print_command(SCp); 1842 scsi_print_command(SCp);
@@ -1887,31 +1878,15 @@ NCR_700_queuecommand(struct scsi_cmnd *SCp, void (*done)(struct scsi_cmnd *))
1887 int i; 1878 int i;
1888 int sg_count; 1879 int sg_count;
1889 dma_addr_t vPtr = 0; 1880 dma_addr_t vPtr = 0;
1881 struct scatterlist *sg;
1890 __u32 count = 0; 1882 __u32 count = 0;
1891 1883
1892 if(SCp->use_sg) { 1884 sg_count = scsi_dma_map(SCp);
1893 sg_count = dma_map_sg(hostdata->dev, 1885 BUG_ON(sg_count < 0);
1894 SCp->request_buffer, SCp->use_sg,
1895 direction);
1896 } else {
1897 vPtr = dma_map_single(hostdata->dev,
1898 SCp->request_buffer,
1899 SCp->request_bufflen,
1900 direction);
1901 count = SCp->request_bufflen;
1902 slot->dma_handle = vPtr;
1903 sg_count = 1;
1904 }
1905
1906 1886
1907 for(i = 0; i < sg_count; i++) { 1887 scsi_for_each_sg(SCp, sg, sg_count, i) {
1908 1888 vPtr = sg_dma_address(sg);
1909 if(SCp->use_sg) { 1889 count = sg_dma_len(sg);
1910 struct scatterlist *sg = SCp->request_buffer;
1911
1912 vPtr = sg_dma_address(&sg[i]);
1913 count = sg_dma_len(&sg[i]);
1914 }
1915 1890
1916 slot->SG[i].ins = bS_to_host(move_ins | count); 1891 slot->SG[i].ins = bS_to_host(move_ins | count);
1917 DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n", 1892 DEBUG((" scatter block %d: move %d[%08x] from 0x%lx\n",
diff --git a/drivers/scsi/53c7xx.c b/drivers/scsi/53c7xx.c
deleted file mode 100644
index 93b41f45638a..000000000000
--- a/drivers/scsi/53c7xx.c
+++ /dev/null
@@ -1,6102 +0,0 @@
1/*
2 * 53c710 driver. Modified from Drew Eckhardts driver
3 * for 53c810 by Richard Hirst [richard@sleepie.demon.co.uk]
4 * Check out PERM_OPTIONS and EXPECTED_CLOCK, which may be defined in the
5 * relevant machine specific file (eg. mvme16x.[ch], amiga7xx.[ch]).
6 * There are also currently some defines at the top of 53c7xx.scr.
7 * The chip type is #defined in script_asm.pl, as well as the Makefile.
8 * Host scsi ID expected to be 7 - see NCR53c7x0_init().
9 *
10 * I have removed the PCI code and some of the 53c8xx specific code -
11 * simply to make this file smaller and easier to manage.
12 *
13 * MVME16x issues:
14 * Problems trying to read any chip registers in NCR53c7x0_init(), as they
15 * may never have been set by 16xBug (eg. If kernel has come in over tftp).
16 */
17
18/*
19 * Adapted for Linux/m68k Amiga platforms for the A4000T/A4091 and
20 * WarpEngine SCSI controllers.
21 * By Alan Hourihane <alanh@fairlite.demon.co.uk>
22 * Thanks to Richard Hirst for making it possible with the MVME additions
23 */
24
25/*
26 * 53c710 rev 0 doesn't support add with carry. Rev 1 and 2 does. To
27 * overcome this problem you can define FORCE_DSA_ALIGNMENT, which ensures
28 * that the DSA address is always xxxxxx00. If disconnection is not allowed,
29 * then the script only ever tries to add small (< 256) positive offsets to
30 * DSA, so lack of carry isn't a problem. FORCE_DSA_ALIGNMENT can, of course,
31 * be defined for all chip revisions at a small cost in memory usage.
32 */
33
34#define FORCE_DSA_ALIGNMENT
35
36/*
37 * Selection timer does not always work on the 53c710, depending on the
38 * timing at the last disconnect, if this is a problem for you, try
39 * using validids as detailed below.
40 *
41 * Options for the NCR7xx driver
42 *
43 * noasync:0 - disables sync and asynchronous negotiation
44 * nosync:0 - disables synchronous negotiation (does async)
45 * nodisconnect:0 - disables disconnection
46 * validids:0x?? - Bitmask field that disallows certain ID's.
47 * - e.g. 0x03 allows ID 0,1
48 * - 0x1F allows ID 0,1,2,3,4
49 * opthi:n - replace top word of options with 'n'
50 * optlo:n - replace bottom word of options with 'n'
51 * - ALWAYS SPECIFY opthi THEN optlo <<<<<<<<<<
52 */
53
54/*
55 * PERM_OPTIONS are driver options which will be enabled for all NCR boards
56 * in the system at driver initialization time.
57 *
58 * Don't THINK about touching these in PERM_OPTIONS :
59 * OPTION_MEMORY_MAPPED
60 * 680x0 doesn't have an IO map!
61 *
62 * OPTION_DEBUG_TEST1
63 * Test 1 does bus mastering and interrupt tests, which will help weed
64 * out brain damaged main boards.
65 *
66 * Other PERM_OPTIONS settings are listed below. Note the actual options
67 * required are set in the relevant file (mvme16x.c, amiga7xx.c, etc):
68 *
69 * OPTION_NO_ASYNC
70 * Don't negotiate for asynchronous transfers on the first command
71 * when OPTION_ALWAYS_SYNCHRONOUS is set. Useful for dain bramaged
72 * devices which do something bad rather than sending a MESSAGE
73 * REJECT back to us like they should if they can't cope.
74 *
75 * OPTION_SYNCHRONOUS
76 * Enable support for synchronous transfers. Target negotiated
77 * synchronous transfers will be responded to. To initiate
78 * a synchronous transfer request, call
79 *
80 * request_synchronous (hostno, target)
81 *
82 * from within KGDB.
83 *
84 * OPTION_ALWAYS_SYNCHRONOUS
85 * Negotiate for synchronous transfers with every target after
86 * driver initialization or a SCSI bus reset. This is a bit dangerous,
87 * since there are some dain bramaged SCSI devices which will accept
88 * SDTR messages but keep talking asynchronously.
89 *
90 * OPTION_DISCONNECT
91 * Enable support for disconnect/reconnect. To change the
92 * default setting on a given host adapter, call
93 *
94 * request_disconnect (hostno, allow)
95 *
96 * where allow is non-zero to allow, 0 to disallow.
97 *
98 * If you really want to run 10MHz FAST SCSI-II transfers, you should
99 * know that the NCR driver currently ignores parity information. Most
100 * systems do 5MHz SCSI fine. I've seen a lot that have problems faster
101 * than 8MHz. To play it safe, we only request 5MHz transfers.
102 *
103 * If you'd rather get 10MHz transfers, edit sdtr_message and change
104 * the fourth byte from 50 to 25.
105 */
106
107/*
108 * Sponsored by
109 * iX Multiuser Multitasking Magazine
110 * Hannover, Germany
111 * hm@ix.de
112 *
113 * Copyright 1993, 1994, 1995 Drew Eckhardt
114 * Visionary Computing
115 * (Unix and Linux consulting and custom programming)
116 * drew@PoohSticks.ORG
117 * +1 (303) 786-7975
118 *
119 * TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
120 *
121 * For more information, please consult
122 *
123 * NCR53C810
124 * SCSI I/O Processor
125 * Programmer's Guide
126 *
127 * NCR 53C810
128 * PCI-SCSI I/O Processor
129 * Data Manual
130 *
131 * NCR 53C810/53C820
132 * PCI-SCSI I/O Processor Design In Guide
133 *
134 * For literature on Symbios Logic Inc. formerly NCR, SCSI,
135 * and Communication products please call (800) 334-5454 or
136 * (719) 536-3300.
137 *
138 * PCI BIOS Specification Revision
139 * PCI Local Bus Specification
140 * PCI System Design Guide
141 *
142 * PCI Special Interest Group
143 * M/S HF3-15A
144 * 5200 N.E. Elam Young Parkway
145 * Hillsboro, Oregon 97124-6497
146 * +1 (503) 696-2000
147 * +1 (800) 433-5177
148 */
149
150/*
151 * Design issues :
152 * The cumulative latency needed to propagate a read/write request
153 * through the file system, buffer cache, driver stacks, SCSI host, and
154 * SCSI device is ultimately the limiting factor in throughput once we
155 * have a sufficiently fast host adapter.
156 *
157 * So, to maximize performance we want to keep the ratio of latency to data
158 * transfer time to a minimum by
159 * 1. Minimizing the total number of commands sent (typical command latency
160 * including drive and bus mastering host overhead is as high as 4.5ms)
161 * to transfer a given amount of data.
162 *
163 * This is accomplished by placing no arbitrary limit on the number
164 * of scatter/gather buffers supported, since we can transfer 1K
165 * per scatter/gather buffer without Eric's cluster patches,
166 * 4K with.
167 *
168 * 2. Minimizing the number of fatal interrupts serviced, since
169 * fatal interrupts halt the SCSI I/O processor. Basically,
170 * this means offloading the practical maximum amount of processing
171 * to the SCSI chip.
172 *
173 * On the NCR53c810/820/720, this is accomplished by using
174 * interrupt-on-the-fly signals when commands complete,
175 * and only handling fatal errors and SDTR / WDTR messages
176 * in the host code.
177 *
178 * On the NCR53c710, interrupts are generated as on the NCR53c8x0,
179 * only the lack of a interrupt-on-the-fly facility complicates
180 * things. Also, SCSI ID registers and commands are
181 * bit fielded rather than binary encoded.
182 *
183 * On the NCR53c700 and NCR53c700-66, operations that are done via
184 * indirect, table mode on the more advanced chips must be
185 * replaced by calls through a jump table which
186 * acts as a surrogate for the DSA. Unfortunately, this
187 * will mean that we must service an interrupt for each
188 * disconnect/reconnect.
189 *
190 * 3. Eliminating latency by pipelining operations at the different levels.
191 *
192 * This driver allows a configurable number of commands to be enqueued
193 * for each target/lun combination (experimentally, I have discovered
194 * that two seems to work best) and will ultimately allow for
195 * SCSI-II tagged queuing.
196 *
197 *
198 * Architecture :
199 * This driver is built around a Linux queue of commands waiting to
200 * be executed, and a shared Linux/NCR array of commands to start. Commands
201 * are transferred to the array by the run_process_issue_queue() function
202 * which is called whenever a command completes.
203 *
204 * As commands are completed, the interrupt routine is triggered,
205 * looks for commands in the linked list of completed commands with
206 * valid status, removes these commands from a list of running commands,
207 * calls the done routine, and flags their target/luns as not busy.
208 *
209 * Due to limitations in the intelligence of the NCR chips, certain
210 * concessions are made. In many cases, it is easier to dynamically
211 * generate/fix-up code rather than calculate on the NCR at run time.
212 * So, code is generated or fixed up for
213 *
214 * - Handling data transfers, using a variable number of MOVE instructions
215 * interspersed with CALL MSG_IN, WHEN MSGIN instructions.
216 *
217 * The DATAIN and DATAOUT routines are separate, so that an incorrect
218 * direction can be trapped, and space isn't wasted.
219 *
220 * It may turn out that we're better off using some sort
221 * of table indirect instruction in a loop with a variable
222 * sized table on the NCR53c710 and newer chips.
223 *
224 * - Checking for reselection (NCR53c710 and better)
225 *
226 * - Handling the details of SCSI context switches (NCR53c710 and better),
227 * such as reprogramming appropriate synchronous parameters,
228 * removing the dsa structure from the NCR's queue of outstanding
229 * commands, etc.
230 *
231 */
232
233#include <linux/module.h>
234
235
236#include <linux/types.h>
237#include <asm/setup.h>
238#include <asm/dma.h>
239#include <asm/io.h>
240#include <asm/system.h>
241#include <linux/delay.h>
242#include <linux/signal.h>
243#include <linux/sched.h>
244#include <linux/errno.h>
245#include <linux/string.h>
246#include <linux/slab.h>
247#include <linux/vmalloc.h>
248#include <linux/mm.h>
249#include <linux/ioport.h>
250#include <linux/time.h>
251#include <linux/blkdev.h>
252#include <linux/spinlock.h>
253#include <linux/interrupt.h>
254#include <asm/pgtable.h>
255
256#ifdef CONFIG_AMIGA
257#include <asm/amigahw.h>
258#include <asm/amigaints.h>
259#include <asm/irq.h>
260
261#define BIG_ENDIAN
262#define NO_IO_SPACE
263#endif
264
265#ifdef CONFIG_MVME16x
266#include <asm/mvme16xhw.h>
267
268#define BIG_ENDIAN
269#define NO_IO_SPACE
270#define VALID_IDS
271#endif
272
273#ifdef CONFIG_BVME6000
274#include <asm/bvme6000hw.h>
275
276#define BIG_ENDIAN
277#define NO_IO_SPACE
278#define VALID_IDS
279#endif
280
281#include "scsi.h"
282#include <scsi/scsi_dbg.h>
283#include <scsi/scsi_host.h>
284#include <scsi/scsi_transport_spi.h>
285#include "53c7xx.h"
286#include <linux/stat.h>
287#include <linux/stddef.h>
288
289#ifdef NO_IO_SPACE
290/*
291 * The following make the definitions in 53c7xx.h (write8, etc) smaller,
292 * we don't have separate i/o space anyway.
293 */
294#undef inb
295#undef outb
296#undef inw
297#undef outw
298#undef inl
299#undef outl
300#define inb(x) 1
301#define inw(x) 1
302#define inl(x) 1
303#define outb(x,y) 1
304#define outw(x,y) 1
305#define outl(x,y) 1
306#endif
307
308static int check_address (unsigned long addr, int size);
309static void dump_events (struct Scsi_Host *host, int count);
310static Scsi_Cmnd * return_outstanding_commands (struct Scsi_Host *host,
311 int free, int issue);
312static void hard_reset (struct Scsi_Host *host);
313static void ncr_scsi_reset (struct Scsi_Host *host);
314static void print_lots (struct Scsi_Host *host);
315static void set_synchronous (struct Scsi_Host *host, int target, int sxfer,
316 int scntl3, int now_connected);
317static int datapath_residual (struct Scsi_Host *host);
318static const char * sbcl_to_phase (int sbcl);
319static void print_progress (Scsi_Cmnd *cmd);
320static void print_queues (struct Scsi_Host *host);
321static void process_issue_queue (unsigned long flags);
322static int shutdown (struct Scsi_Host *host);
323static void abnormal_finished (struct NCR53c7x0_cmd *cmd, int result);
324static int disable (struct Scsi_Host *host);
325static int NCR53c7xx_run_tests (struct Scsi_Host *host);
326static irqreturn_t NCR53c7x0_intr(int irq, void *dev_id);
327static void NCR53c7x0_intfly (struct Scsi_Host *host);
328static int ncr_halt (struct Scsi_Host *host);
329static void intr_phase_mismatch (struct Scsi_Host *host, struct NCR53c7x0_cmd
330 *cmd);
331static void intr_dma (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd);
332static void print_dsa (struct Scsi_Host *host, u32 *dsa,
333 const char *prefix);
334static int print_insn (struct Scsi_Host *host, const u32 *insn,
335 const char *prefix, int kernel);
336
337static void NCR53c7xx_dsa_fixup (struct NCR53c7x0_cmd *cmd);
338static void NCR53c7x0_init_fixup (struct Scsi_Host *host);
339static int NCR53c7x0_dstat_sir_intr (struct Scsi_Host *host, struct
340 NCR53c7x0_cmd *cmd);
341static void NCR53c7x0_soft_reset (struct Scsi_Host *host);
342
343/* Size of event list (per host adapter) */
344static int track_events = 0;
345static struct Scsi_Host *first_host = NULL; /* Head of list of NCR boards */
346static struct scsi_host_template *the_template = NULL;
347
348/* NCR53c710 script handling code */
349
350#include "53c7xx_d.h"
351#ifdef A_int_debug_sync
352#define DEBUG_SYNC_INTR A_int_debug_sync
353#endif
354int NCR53c7xx_script_len = sizeof (SCRIPT);
355int NCR53c7xx_dsa_len = A_dsa_end + Ent_dsa_zero - Ent_dsa_code_template;
356#ifdef FORCE_DSA_ALIGNMENT
357int CmdPageStart = (0 - Ent_dsa_zero - sizeof(struct NCR53c7x0_cmd)) & 0xff;
358#endif
359
360static char *setup_strings[] =
361 {"","","","","","","",""};
362
363#define MAX_SETUP_STRINGS ARRAY_SIZE(setup_strings)
364#define SETUP_BUFFER_SIZE 200
365static char setup_buffer[SETUP_BUFFER_SIZE];
366static char setup_used[MAX_SETUP_STRINGS];
367
368void ncr53c7xx_setup (char *str, int *ints)
369{
370 int i;
371 char *p1, *p2;
372
373 p1 = setup_buffer;
374 *p1 = '\0';
375 if (str)
376 strncpy(p1, str, SETUP_BUFFER_SIZE - strlen(setup_buffer));
377 setup_buffer[SETUP_BUFFER_SIZE - 1] = '\0';
378 p1 = setup_buffer;
379 i = 0;
380 while (*p1 && (i < MAX_SETUP_STRINGS)) {
381 p2 = strchr(p1, ',');
382 if (p2) {
383 *p2 = '\0';
384 if (p1 != p2)
385 setup_strings[i] = p1;
386 p1 = p2 + 1;
387 i++;
388 }
389 else {
390 setup_strings[i] = p1;
391 break;
392 }
393 }
394 for (i=0; i<MAX_SETUP_STRINGS; i++)
395 setup_used[i] = 0;
396}
397
398
399/* check_setup_strings() returns index if key found, 0 if not
400 */
401
402static int check_setup_strings(char *key, int *flags, int *val, char *buf)
403{
404int x;
405char *cp;
406
407 for (x=0; x<MAX_SETUP_STRINGS; x++) {
408 if (setup_used[x])
409 continue;
410 if (!strncmp(setup_strings[x], key, strlen(key)))
411 break;
412 if (!strncmp(setup_strings[x], "next", strlen("next")))
413 return 0;
414 }
415 if (x == MAX_SETUP_STRINGS)
416 return 0;
417 setup_used[x] = 1;
418 cp = setup_strings[x] + strlen(key);
419 *val = -1;
420 if (*cp != ':')
421 return ++x;
422 cp++;
423 if ((*cp >= '0') && (*cp <= '9')) {
424 *val = simple_strtoul(cp,NULL,0);
425 }
426 return ++x;
427}
428
429
430
431/*
432 * KNOWN BUGS :
433 * - There is some sort of conflict when the PPP driver is compiled with
434 * support for 16 channels?
435 *
436 * - On systems which predate the 1.3.x initialization order change,
437 * the NCR driver will cause Cannot get free page messages to appear.
438 * These are harmless, but I don't know of an easy way to avoid them.
439 *
440 * - With OPTION_DISCONNECT, on two systems under unknown circumstances,
441 * we get a PHASE MISMATCH with DSA set to zero (suggests that we
442 * are occurring somewhere in the reselection code) where
443 * DSP=some value DCMD|DBC=same value.
444 *
445 * Closer inspection suggests that we may be trying to execute
446 * some portion of the DSA?
447 * scsi0 : handling residual transfer (+ 0 bytes from DMA FIFO)
448 * scsi0 : handling residual transfer (+ 0 bytes from DMA FIFO)
449 * scsi0 : no current command : unexpected phase MSGIN.
450 * DSP=0x1c46cc, DCMD|DBC=0x1c46ac, DSA=0x0
451 * DSPS=0x0, TEMP=0x1c3e70, DMODE=0x80
452 * scsi0 : DSP->
453 * 001c46cc : 0x001c46cc 0x00000000
454 * 001c46d4 : 0x001c5ea0 0x000011f8
455 *
456 * Changed the print code in the phase_mismatch handler so
457 * that we call print_lots to try to diagnose this.
458 *
459 */
460
461/*
462 * Possible future direction of architecture for max performance :
463 *
464 * We're using a single start array for the NCR chip. This is
465 * sub-optimal, because we cannot add a command which would conflict with
466 * an executing command to this start queue, and therefore must insert the
467 * next command for a given I/T/L combination after the first has completed;
468 * incurring our interrupt latency between SCSI commands.
469 *
470 * To allow further pipelining of the NCR and host CPU operation, we want
471 * to set things up so that immediately on termination of a command destined
472 * for a given LUN, we get that LUN busy again.
473 *
474 * To do this, we need to add a 32 bit pointer to which is jumped to
475 * on completion of a command. If no new command is available, this
476 * would point to the usual DSA issue queue select routine.
477 *
478 * If one were, it would point to a per-NCR53c7x0_cmd select routine
479 * which starts execution immediately, inserting the command at the head
480 * of the start queue if the NCR chip is selected or reselected.
481 *
482 * We would change so that we keep a list of outstanding commands
483 * for each unit, rather than a single running_list. We'd insert
484 * a new command into the right running list; if the NCR didn't
485 * have something running for that yet, we'd put it in the
486 * start queue as well. Some magic needs to happen to handle the
487 * race condition between the first command terminating before the
488 * new one is written.
489 *
490 * Potential for profiling :
491 * Call do_gettimeofday(struct timeval *tv) to get 800ns resolution.
492 */
493
494
495/*
496 * TODO :
497 * 1. To support WIDE transfers, not much needs to happen. We
498 * should do CHMOVE instructions instead of MOVEs when
499 * we have scatter/gather segments of uneven length. When
500 * we do this, we need to handle the case where we disconnect
501 * between segments.
502 *
503 * 2. Currently, when Icky things happen we do a FATAL(). Instead,
504 * we want to do an integrity check on the parts of the NCR hostdata
505 * structure which were initialized at boot time; FATAL() if that
506 * fails, and otherwise try to recover. Keep track of how many
507 * times this has happened within a single SCSI command; if it
508 * gets excessive, then FATAL().
509 *
510 * 3. Parity checking is currently disabled, and a few things should
511 * happen here now that we support synchronous SCSI transfers :
512 * 1. On soft-reset, we shoould set the EPC (Enable Parity Checking)
513 * and AAP (Assert SATN/ on parity error) bits in SCNTL0.
514 *
515 * 2. We should enable the parity interrupt in the SIEN0 register.
516 *
517 * 3. intr_phase_mismatch() needs to believe that message out is
518 * always an "acceptable" phase to have a mismatch in. If
519 * the old phase was MSG_IN, we should send a MESSAGE PARITY
520 * error. If the old phase was something else, we should send
521 * a INITIATOR_DETECTED_ERROR message. Note that this could
522 * cause a RESTORE POINTERS message; so we should handle that
523 * correctly first. Instead, we should probably do an
524 * initiator_abort.
525 *
526 * 4. MPEE bit of CTEST4 should be set so we get interrupted if
527 * we detect an error.
528 *
529 *
530 * 5. The initial code has been tested on the NCR53c810. I don't
531 * have access to NCR53c700, 700-66 (Forex boards), NCR53c710
532 * (NCR Pentium systems), NCR53c720, NCR53c820, or NCR53c825 boards to
533 * finish development on those platforms.
534 *
535 * NCR53c820/825/720 - need to add wide transfer support, including WDTR
536 * negotiation, programming of wide transfer capabilities
537 * on reselection and table indirect selection.
538 *
539 * NCR53c710 - need to add fatal interrupt or GEN code for
540 * command completion signaling. Need to modify all
541 * SDID, SCID, etc. registers, and table indirect select code
542 * since these use bit fielded (ie 1<<target) instead of
543 * binary encoded target ids. Need to accommodate
544 * different register mappings, probably scan through
545 * the SCRIPT code and change the non SFBR register operand
546 * of all MOVE instructions.
547 *
548 * It is rather worse than this actually, the 710 corrupts
549 * both TEMP and DSA when you do a MOVE MEMORY. This
550 * screws you up all over the place. MOVE MEMORY 4 with a
551 * destination of DSA seems to work OK, which helps some.
552 * Richard Hirst richard@sleepie.demon.co.uk
553 *
554 * NCR53c700/700-66 - need to add code to refix addresses on
555 * every nexus change, eliminate all table indirect code,
556 * very messy.
557 *
558 * 6. The NCR53c7x0 series is very popular on other platforms that
559 * could be running Linux - ie, some high performance AMIGA SCSI
560 * boards use it.
561 *
562 * So, I should include #ifdef'd code so that it is
563 * compatible with these systems.
564 *
565 * Specifically, the little Endian assumptions I made in my
566 * bit fields need to change, and if the NCR doesn't see memory
567 * the right way, we need to provide options to reverse words
568 * when the scripts are relocated.
569 *
570 * 7. Use vremap() to access memory mapped boards.
571 */
572
573/*
574 * Allow for simultaneous existence of multiple SCSI scripts so we
575 * can have a single driver binary for all of the family.
576 *
577 * - one for NCR53c700 and NCR53c700-66 chips (not yet supported)
578 * - one for rest (only the NCR53c810, 815, 820, and 825 are currently
579 * supported)
580 *
581 * So that we only need two SCSI scripts, we need to modify things so
582 * that we fixup register accesses in READ/WRITE instructions, and
583 * we'll also have to accommodate the bit vs. binary encoding of IDs
584 * with the 7xx chips.
585 */
586
587#define ROUNDUP(adr,type) \
588 ((void *) (((long) (adr) + sizeof(type) - 1) & ~(sizeof(type) - 1)))
589
590
591/*
592 * Function: issue_to_cmd
593 *
594 * Purpose: convert jump instruction in issue array to NCR53c7x0_cmd
595 * structure pointer.
596 *
597 * Inputs; issue - pointer to start of NOP or JUMP instruction
598 * in issue array.
599 *
600 * Returns: pointer to command on success; 0 if opcode is NOP.
601 */
602
603static inline struct NCR53c7x0_cmd *
604issue_to_cmd (struct Scsi_Host *host, struct NCR53c7x0_hostdata *hostdata,
605 u32 *issue)
606{
607 return (issue[0] != hostdata->NOP_insn) ?
608 /*
609 * If the IF TRUE bit is set, it's a JUMP instruction. The
610 * operand is a bus pointer to the dsa_begin routine for this DSA. The
611 * dsa field of the NCR53c7x0_cmd structure starts with the
612 * DSA code template. By converting to a virtual address,
613 * subtracting the code template size, and offset of the
614 * dsa field, we end up with a pointer to the start of the
615 * structure (alternatively, we could use the
616 * dsa_cmnd field, an anachronism from when we weren't
617 * sure what the relationship between the NCR structures
618 * and host structures were going to be.
619 */
620 (struct NCR53c7x0_cmd *) ((char *) bus_to_virt (issue[1]) -
621 (hostdata->E_dsa_code_begin - hostdata->E_dsa_code_template) -
622 offsetof(struct NCR53c7x0_cmd, dsa))
623 /* If the IF TRUE bit is not set, it's a NOP */
624 : NULL;
625}
626
627
628/*
629 * FIXME: we should junk these, in favor of synchronous_want and
630 * wide_want in the NCR53c7x0_hostdata structure.
631 */
632
633/* Template for "preferred" synchronous transfer parameters. */
634
635static const unsigned char sdtr_message[] = {
636#ifdef CONFIG_SCSI_NCR53C7xx_FAST
637 EXTENDED_MESSAGE, 3 /* length */, EXTENDED_SDTR, 25 /* *4ns */, 8 /* off */
638#else
639 EXTENDED_MESSAGE, 3 /* length */, EXTENDED_SDTR, 50 /* *4ns */, 8 /* off */
640#endif
641};
642
643/* Template to request asynchronous transfers */
644
645static const unsigned char async_message[] = {
646 EXTENDED_MESSAGE, 3 /* length */, EXTENDED_SDTR, 0, 0 /* asynchronous */
647};
648
649/* Template for "preferred" WIDE transfer parameters */
650
651static const unsigned char wdtr_message[] = {
652 EXTENDED_MESSAGE, 2 /* length */, EXTENDED_WDTR, 1 /* 2^1 bytes */
653};
654
655#if 0
656/*
657 * Function : struct Scsi_Host *find_host (int host)
658 *
659 * Purpose : KGDB support function which translates a host number
660 * to a host structure.
661 *
662 * Inputs : host - number of SCSI host
663 *
664 * Returns : NULL on failure, pointer to host structure on success.
665 */
666
667static struct Scsi_Host *
668find_host (int host) {
669 struct Scsi_Host *h;
670 for (h = first_host; h && h->host_no != host; h = h->next);
671 if (!h) {
672 printk (KERN_ALERT "scsi%d not found\n", host);
673 return NULL;
674 } else if (h->hostt != the_template) {
675 printk (KERN_ALERT "scsi%d is not a NCR board\n", host);
676 return NULL;
677 }
678 return h;
679}
680
681#if 0
682/*
683 * Function : request_synchronous (int host, int target)
684 *
685 * Purpose : KGDB interface which will allow us to negotiate for
686 * synchronous transfers. This ill be replaced with a more
687 * integrated function; perhaps a new entry in the scsi_host
688 * structure, accessible via an ioctl() or perhaps /proc/scsi.
689 *
690 * Inputs : host - number of SCSI host; target - number of target.
691 *
692 * Returns : 0 when negotiation has been setup for next SCSI command,
693 * -1 on failure.
694 */
695
696static int
697request_synchronous (int host, int target) {
698 struct Scsi_Host *h;
699 struct NCR53c7x0_hostdata *hostdata;
700 unsigned long flags;
701 if (target < 0) {
702 printk (KERN_ALERT "target %d is bogus\n", target);
703 return -1;
704 }
705 if (!(h = find_host (host)))
706 return -1;
707 else if (h->this_id == target) {
708 printk (KERN_ALERT "target %d is host ID\n", target);
709 return -1;
710 }
711 else if (target >= h->max_id) {
712 printk (KERN_ALERT "target %d exceeds maximum of %d\n", target,
713 h->max_id);
714 return -1;
715 }
716 hostdata = (struct NCR53c7x0_hostdata *)h->hostdata[0];
717
718 local_irq_save(flags);
719 if (hostdata->initiate_sdtr & (1 << target)) {
720 local_irq_restore(flags);
721 printk (KERN_ALERT "target %d already doing SDTR\n", target);
722 return -1;
723 }
724 hostdata->initiate_sdtr |= (1 << target);
725 local_irq_restore(flags);
726 return 0;
727}
728#endif
729
730/*
731 * Function : request_disconnect (int host, int on_or_off)
732 *
733 * Purpose : KGDB support function, tells us to allow or disallow
734 * disconnections.
735 *
736 * Inputs : host - number of SCSI host; on_or_off - non-zero to allow,
737 * zero to disallow.
738 *
739 * Returns : 0 on success, * -1 on failure.
740 */
741
742static int
743request_disconnect (int host, int on_or_off) {
744 struct Scsi_Host *h;
745 struct NCR53c7x0_hostdata *hostdata;
746 if (!(h = find_host (host)))
747 return -1;
748 hostdata = (struct NCR53c7x0_hostdata *) h->hostdata[0];
749 if (on_or_off)
750 hostdata->options |= OPTION_DISCONNECT;
751 else
752 hostdata->options &= ~OPTION_DISCONNECT;
753 return 0;
754}
755#endif
756
757/*
758 * Function : static void NCR53c7x0_driver_init (struct Scsi_Host *host)
759 *
760 * Purpose : Initialize internal structures, as required on startup, or
761 * after a SCSI bus reset.
762 *
763 * Inputs : host - pointer to this host adapter's structure
764 */
765
766static void
767NCR53c7x0_driver_init (struct Scsi_Host *host) {
768 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
769 host->hostdata[0];
770 int i, j;
771 u32 *ncrcurrent;
772
773 for (i = 0; i < 16; ++i) {
774 hostdata->request_sense[i] = 0;
775 for (j = 0; j < 8; ++j)
776 hostdata->busy[i][j] = 0;
777 set_synchronous (host, i, /* sxfer */ 0, hostdata->saved_scntl3, 0);
778 }
779 hostdata->issue_queue = NULL;
780 hostdata->running_list = hostdata->finished_queue =
781 hostdata->ncrcurrent = NULL;
782 for (i = 0, ncrcurrent = (u32 *) hostdata->schedule;
783 i < host->can_queue; ++i, ncrcurrent += 2) {
784 ncrcurrent[0] = hostdata->NOP_insn;
785 ncrcurrent[1] = 0xdeadbeef;
786 }
787 ncrcurrent[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP) << 24) | DBC_TCI_TRUE;
788 ncrcurrent[1] = (u32) virt_to_bus (hostdata->script) +
789 hostdata->E_wait_reselect;
790 hostdata->reconnect_dsa_head = 0;
791 hostdata->addr_reconnect_dsa_head = (u32)
792 virt_to_bus((void *) &(hostdata->reconnect_dsa_head));
793 hostdata->expecting_iid = 0;
794 hostdata->expecting_sto = 0;
795 if (hostdata->options & OPTION_ALWAYS_SYNCHRONOUS)
796 hostdata->initiate_sdtr = 0xffff;
797 else
798 hostdata->initiate_sdtr = 0;
799 hostdata->talked_to = 0;
800 hostdata->idle = 1;
801}
802
803/*
804 * Function : static int clock_to_ccf_710 (int clock)
805 *
806 * Purpose : Return the clock conversion factor for a given SCSI clock.
807 *
808 * Inputs : clock - SCSI clock expressed in Hz.
809 *
810 * Returns : ccf on success, -1 on failure.
811 */
812
813static int
814clock_to_ccf_710 (int clock) {
815 if (clock <= 16666666)
816 return -1;
817 if (clock <= 25000000)
818 return 2; /* Divide by 1.0 */
819 else if (clock <= 37500000)
820 return 1; /* Divide by 1.5 */
821 else if (clock <= 50000000)
822 return 0; /* Divide by 2.0 */
823 else if (clock <= 66000000)
824 return 3; /* Divide by 3.0 */
825 else
826 return -1;
827}
828
829/*
830 * Function : static int NCR53c7x0_init (struct Scsi_Host *host)
831 *
832 * Purpose : initialize the internal structures for a given SCSI host
833 *
834 * Inputs : host - pointer to this host adapter's structure
835 *
836 * Preconditions : when this function is called, the chip_type
837 * field of the hostdata structure MUST have been set.
838 *
839 * Returns : 0 on success, -1 on failure.
840 */
841
842int
843NCR53c7x0_init (struct Scsi_Host *host) {
844 NCR53c7x0_local_declare();
845 int i, ccf;
846 unsigned char revision;
847 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
848 host->hostdata[0];
849 /*
850 * There are some things which we need to know about in order to provide
851 * a semblance of support. Print 'em if they aren't what we expect,
852 * otherwise don't add to the noise.
853 *
854 * -1 means we don't know what to expect.
855 */
856 int val, flags;
857 char buf[32];
858 int expected_id = -1;
859 int expected_clock = -1;
860 int uninitialized = 0;
861#ifdef NO_IO_SPACE
862 int expected_mapping = OPTION_MEMORY_MAPPED;
863#else
864 int expected_mapping = OPTION_IO_MAPPED;
865#endif
866 for (i=0;i<7;i++)
867 hostdata->valid_ids[i] = 1; /* Default all ID's to scan */
868
869 /* Parse commandline flags */
870 if (check_setup_strings("noasync",&flags,&val,buf))
871 {
872 hostdata->options |= OPTION_NO_ASYNC;
873 hostdata->options &= ~(OPTION_SYNCHRONOUS | OPTION_ALWAYS_SYNCHRONOUS);
874 }
875
876 if (check_setup_strings("nosync",&flags,&val,buf))
877 {
878 hostdata->options &= ~(OPTION_SYNCHRONOUS | OPTION_ALWAYS_SYNCHRONOUS);
879 }
880
881 if (check_setup_strings("nodisconnect",&flags,&val,buf))
882 hostdata->options &= ~OPTION_DISCONNECT;
883
884 if (check_setup_strings("validids",&flags,&val,buf))
885 {
886 for (i=0;i<7;i++)
887 hostdata->valid_ids[i] = val & (1<<i);
888 }
889
890 if ((i = check_setup_strings("next",&flags,&val,buf)))
891 {
892 while (i)
893 setup_used[--i] = 1;
894 }
895
896 if (check_setup_strings("opthi",&flags,&val,buf))
897 hostdata->options = (long long)val << 32;
898 if (check_setup_strings("optlo",&flags,&val,buf))
899 hostdata->options |= val;
900
901 NCR53c7x0_local_setup(host);
902 switch (hostdata->chip) {
903 case 710:
904 case 770:
905 hostdata->dstat_sir_intr = NCR53c7x0_dstat_sir_intr;
906 hostdata->init_save_regs = NULL;
907 hostdata->dsa_fixup = NCR53c7xx_dsa_fixup;
908 hostdata->init_fixup = NCR53c7x0_init_fixup;
909 hostdata->soft_reset = NCR53c7x0_soft_reset;
910 hostdata->run_tests = NCR53c7xx_run_tests;
911 expected_clock = hostdata->scsi_clock;
912 expected_id = 7;
913 break;
914 default:
915 printk ("scsi%d : chip type of %d is not supported yet, detaching.\n",
916 host->host_no, hostdata->chip);
917 scsi_unregister (host);
918 return -1;
919 }
920
921 /* Assign constants accessed by NCR */
922 hostdata->NCR53c7xx_zero = 0;
923 hostdata->NCR53c7xx_msg_reject = MESSAGE_REJECT;
924 hostdata->NCR53c7xx_msg_abort = ABORT;
925 hostdata->NCR53c7xx_msg_nop = NOP;
926 hostdata->NOP_insn = (DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP) << 24;
927 if (expected_mapping == -1 ||
928 (hostdata->options & (OPTION_MEMORY_MAPPED)) !=
929 (expected_mapping & OPTION_MEMORY_MAPPED))
930 printk ("scsi%d : using %s mapped access\n", host->host_no,
931 (hostdata->options & OPTION_MEMORY_MAPPED) ? "memory" :
932 "io");
933
934 hostdata->dmode = (hostdata->chip == 700 || hostdata->chip == 70066) ?
935 DMODE_REG_00 : DMODE_REG_10;
936 hostdata->istat = ((hostdata->chip / 100) == 8) ?
937 ISTAT_REG_800 : ISTAT_REG_700;
938
939/* We have to assume that this may be the first access to the chip, so
940 * we must set EA in DCNTL. */
941
942 NCR53c7x0_write8 (DCNTL_REG, DCNTL_10_EA|DCNTL_10_COM);
943
944
945/* Only the ISTAT register is readable when the NCR is running, so make
946 sure it's halted. */
947 ncr_halt(host);
948
949/*
950 * XXX - the NCR53c700 uses bitfielded registers for SCID, SDID, etc,
951 * as does the 710 with one bit per SCSI ID. Conversely, the NCR
952 * uses a normal, 3 bit binary representation of these values.
953 *
954 * Get the rest of the NCR documentation, and FIND OUT where the change
955 * was.
956 */
957
958#if 0
959 /* May not be able to do this - chip my not have been set up yet */
960 tmp = hostdata->this_id_mask = NCR53c7x0_read8(SCID_REG);
961 for (host->this_id = 0; tmp != 1; tmp >>=1, ++host->this_id);
962#else
963 host->this_id = 7;
964#endif
965
966/*
967 * Note : we should never encounter a board setup for ID0. So,
968 * if we see ID0, assume that it was uninitialized and set it
969 * to the industry standard 7.
970 */
971 if (!host->this_id) {
972 printk("scsi%d : initiator ID was %d, changing to 7\n",
973 host->host_no, host->this_id);
974 host->this_id = 7;
975 hostdata->this_id_mask = 1 << 7;
976 uninitialized = 1;
977 };
978
979 if (expected_id == -1 || host->this_id != expected_id)
980 printk("scsi%d : using initiator ID %d\n", host->host_no,
981 host->this_id);
982
983 /*
984 * Save important registers to allow a soft reset.
985 */
986
987 /*
988 * CTEST7 controls cache snooping, burst mode, and support for
989 * external differential drivers. This isn't currently used - the
990 * default value may not be optimal anyway.
991 * Even worse, it may never have been set up since reset.
992 */
993 hostdata->saved_ctest7 = NCR53c7x0_read8(CTEST7_REG) & CTEST7_SAVE;
994 revision = (NCR53c7x0_read8(CTEST8_REG) & 0xF0) >> 4;
995 switch (revision) {
996 case 1: revision = 0; break;
997 case 2: revision = 1; break;
998 case 4: revision = 2; break;
999 case 8: revision = 3; break;
1000 default: revision = 255; break;
1001 }
1002 printk("scsi%d: Revision 0x%x\n",host->host_no,revision);
1003
1004 if ((revision == 0 || revision == 255) && (hostdata->options & (OPTION_SYNCHRONOUS|OPTION_DISCONNECT|OPTION_ALWAYS_SYNCHRONOUS)))
1005 {
1006 printk ("scsi%d: Disabling sync working and disconnect/reselect\n",
1007 host->host_no);
1008 hostdata->options &= ~(OPTION_SYNCHRONOUS|OPTION_DISCONNECT|OPTION_ALWAYS_SYNCHRONOUS);
1009 }
1010
1011 /*
1012 * On NCR53c700 series chips, DCNTL controls the SCSI clock divisor,
1013 * on 800 series chips, it allows for a totem-pole IRQ driver.
1014 * NOTE saved_dcntl currently overwritten in init function.
1015 * The value read here may be garbage anyway, MVME16x board at least
1016 * does not initialise chip if kernel arrived via tftp.
1017 */
1018
1019 hostdata->saved_dcntl = NCR53c7x0_read8(DCNTL_REG);
1020
1021 /*
1022 * DMODE controls DMA burst length, and on 700 series chips,
1023 * 286 mode and bus width
1024 * NOTE: On MVME16x, chip may have been reset, so this could be a
1025 * power-on/reset default value.
1026 */
1027 hostdata->saved_dmode = NCR53c7x0_read8(hostdata->dmode);
1028
1029 /*
1030 * Now that burst length and enabled/disabled status is known,
1031 * clue the user in on it.
1032 */
1033
1034 ccf = clock_to_ccf_710 (expected_clock);
1035
1036 for (i = 0; i < 16; ++i)
1037 hostdata->cmd_allocated[i] = 0;
1038
1039 if (hostdata->init_save_regs)
1040 hostdata->init_save_regs (host);
1041 if (hostdata->init_fixup)
1042 hostdata->init_fixup (host);
1043
1044 if (!the_template) {
1045 the_template = host->hostt;
1046 first_host = host;
1047 }
1048
1049 /*
1050 * Linux SCSI drivers have always been plagued with initialization
1051 * problems - some didn't work with the BIOS disabled since they expected
1052 * initialization from it, some didn't work when the networking code
1053 * was enabled and registers got scrambled, etc.
1054 *
1055 * To avoid problems like this, in the future, we will do a soft
1056 * reset on the SCSI chip, taking it back to a sane state.
1057 */
1058
1059 hostdata->soft_reset (host);
1060
1061#if 1
1062 hostdata->debug_count_limit = -1;
1063#else
1064 hostdata->debug_count_limit = 1;
1065#endif
1066 hostdata->intrs = -1;
1067 hostdata->resets = -1;
1068 memcpy ((void *) hostdata->synchronous_want, (void *) sdtr_message,
1069 sizeof (hostdata->synchronous_want));
1070
1071 NCR53c7x0_driver_init (host);
1072
1073 if (request_irq(host->irq, NCR53c7x0_intr, IRQF_SHARED, "53c7xx", host))
1074 {
1075 printk("scsi%d : IRQ%d not free, detaching\n",
1076 host->host_no, host->irq);
1077 goto err_unregister;
1078 }
1079
1080 if ((hostdata->run_tests && hostdata->run_tests(host) == -1) ||
1081 (hostdata->options & OPTION_DEBUG_TESTS_ONLY)) {
1082 /* XXX Should disable interrupts, etc. here */
1083 goto err_free_irq;
1084 } else {
1085 if (host->io_port) {
1086 host->n_io_port = 128;
1087 if (!request_region (host->io_port, host->n_io_port, "ncr53c7xx"))
1088 goto err_free_irq;
1089 }
1090 }
1091
1092 if (NCR53c7x0_read8 (SBCL_REG) & SBCL_BSY) {
1093 printk ("scsi%d : bus wedge, doing SCSI reset\n", host->host_no);
1094 hard_reset (host);
1095 }
1096 return 0;
1097
1098 err_free_irq:
1099 free_irq(host->irq, NCR53c7x0_intr);
1100 err_unregister:
1101 scsi_unregister(host);
1102 return -1;
1103}
1104
1105/*
1106 * Function : int ncr53c7xx_init(struct scsi_host_template *tpnt, int board, int chip,
1107 * unsigned long base, int io_port, int irq, int dma, long long options,
1108 * int clock);
1109 *
1110 * Purpose : initializes a NCR53c7,8x0 based on base addresses,
1111 * IRQ, and DMA channel.
1112 *
1113 * Inputs : tpnt - Template for this SCSI adapter, board - board level
1114 * product, chip - 710
1115 *
1116 * Returns : 0 on success, -1 on failure.
1117 *
1118 */
1119
1120int
1121ncr53c7xx_init (struct scsi_host_template *tpnt, int board, int chip,
1122 unsigned long base, int io_port, int irq, int dma,
1123 long long options, int clock)
1124{
1125 struct Scsi_Host *instance;
1126 struct NCR53c7x0_hostdata *hostdata;
1127 char chip_str[80];
1128 int script_len = 0, dsa_len = 0, size = 0, max_cmd_size = 0,
1129 schedule_size = 0, ok = 0;
1130 void *tmp;
1131 unsigned long page;
1132
1133 switch (chip) {
1134 case 710:
1135 case 770:
1136 schedule_size = (tpnt->can_queue + 1) * 8 /* JUMP instruction size */;
1137 script_len = NCR53c7xx_script_len;
1138 dsa_len = NCR53c7xx_dsa_len;
1139 options |= OPTION_INTFLY;
1140 sprintf (chip_str, "NCR53c%d", chip);
1141 break;
1142 default:
1143 printk("scsi-ncr53c7xx : unsupported SCSI chip %d\n", chip);
1144 return -1;
1145 }
1146
1147 printk("scsi-ncr53c7xx : %s at memory 0x%lx, io 0x%x, irq %d",
1148 chip_str, base, io_port, irq);
1149 if (dma == DMA_NONE)
1150 printk("\n");
1151 else
1152 printk(", dma %d\n", dma);
1153
1154 if (options & OPTION_DEBUG_PROBE_ONLY) {
1155 printk ("scsi-ncr53c7xx : probe only enabled, aborting initialization\n");
1156 return -1;
1157 }
1158
1159 max_cmd_size = sizeof(struct NCR53c7x0_cmd) + dsa_len +
1160 /* Size of dynamic part of command structure : */
1161 2 * /* Worst case : we don't know if we need DATA IN or DATA out */
1162 ( 2 * /* Current instructions per scatter/gather segment */
1163 tpnt->sg_tablesize +
1164 3 /* Current startup / termination required per phase */
1165 ) *
1166 8 /* Each instruction is eight bytes */;
1167
1168 /* Allocate fixed part of hostdata, dynamic part to hold appropriate
1169 SCSI SCRIPT(tm) plus a single, maximum-sized NCR53c7x0_cmd structure.
1170
1171 We need a NCR53c7x0_cmd structure for scan_scsis() when we are
1172 not loaded as a module, and when we're loaded as a module, we
1173 can't use a non-dynamically allocated structure because modules
1174 are vmalloc()'d, which can allow structures to cross page
1175 boundaries and breaks our physical/virtual address assumptions
1176 for DMA.
1177
1178 So, we stick it past the end of our hostdata structure.
1179
1180 ASSUMPTION :
1181 Regardless of how many simultaneous SCSI commands we allow,
1182 the probe code only executes a _single_ instruction at a time,
1183 so we only need one here, and don't need to allocate NCR53c7x0_cmd
1184 structures for each target until we are no longer in scan_scsis
1185 and kmalloc() has become functional (memory_init() happens
1186 after all device driver initialization).
1187 */
1188
1189 size = sizeof(struct NCR53c7x0_hostdata) + script_len +
1190 /* Note that alignment will be guaranteed, since we put the command
1191 allocated at probe time after the fixed-up SCSI script, which
1192 consists of 32 bit words, aligned on a 32 bit boundary. But
1193 on a 64bit machine we need 8 byte alignment for hostdata->free, so
1194 we add in another 4 bytes to take care of potential misalignment
1195 */
1196 (sizeof(void *) - sizeof(u32)) + max_cmd_size + schedule_size;
1197
1198 page = __get_free_pages(GFP_ATOMIC,1);
1199 if(page==0)
1200 {
1201 printk(KERN_ERR "53c7xx: out of memory.\n");
1202 return -ENOMEM;
1203 }
1204#ifdef FORCE_DSA_ALIGNMENT
1205 /*
1206 * 53c710 rev.0 doesn't have an add-with-carry instruction.
1207 * Ensure we allocate enough memory to force DSA alignment.
1208 */
1209 size += 256;
1210#endif
1211 /* Size should be < 8K, so we can fit it in two pages. */
1212 if (size > 8192) {
1213 printk(KERN_ERR "53c7xx: hostdata > 8K\n");
1214 return -1;
1215 }
1216
1217 instance = scsi_register (tpnt, 4);
1218 if (!instance)
1219 {
1220 free_page(page);
1221 return -1;
1222 }
1223 instance->hostdata[0] = page;
1224 memset((void *)instance->hostdata[0], 0, 8192);
1225 cache_push(virt_to_phys((void *)(instance->hostdata[0])), 8192);
1226 cache_clear(virt_to_phys((void *)(instance->hostdata[0])), 8192);
1227 kernel_set_cachemode((void *)instance->hostdata[0], 8192, IOMAP_NOCACHE_SER);
1228
1229 /* FIXME : if we ever support an ISA NCR53c7xx based board, we
1230 need to check if the chip is running in a 16 bit mode, and if so
1231 unregister it if it is past the 16M (0x1000000) mark */
1232
1233 hostdata = (struct NCR53c7x0_hostdata *)instance->hostdata[0];
1234 hostdata->size = size;
1235 hostdata->script_count = script_len / sizeof(u32);
1236 hostdata->board = board;
1237 hostdata->chip = chip;
1238
1239 /*
1240 * Being memory mapped is more desirable, since
1241 *
1242 * - Memory accesses may be faster.
1243 *
1244 * - The destination and source address spaces are the same for
1245 * all instructions, meaning we don't have to twiddle dmode or
1246 * any other registers.
1247 *
1248 * So, we try for memory mapped, and if we don't get it,
1249 * we go for port mapped, and that failing we tell the user
1250 * it can't work.
1251 */
1252
1253 if (base) {
1254 instance->base = base;
1255 /* Check for forced I/O mapping */
1256 if (!(options & OPTION_IO_MAPPED)) {
1257 options |= OPTION_MEMORY_MAPPED;
1258 ok = 1;
1259 }
1260 } else {
1261 options &= ~OPTION_MEMORY_MAPPED;
1262 }
1263
1264 if (io_port) {
1265 instance->io_port = io_port;
1266 options |= OPTION_IO_MAPPED;
1267 ok = 1;
1268 } else {
1269 options &= ~OPTION_IO_MAPPED;
1270 }
1271
1272 if (!ok) {
1273 printk ("scsi%d : not initializing, no I/O or memory mapping known \n",
1274 instance->host_no);
1275 scsi_unregister (instance);
1276 return -1;
1277 }
1278 instance->irq = irq;
1279 instance->dma_channel = dma;
1280
1281 hostdata->options = options;
1282 hostdata->dsa_len = dsa_len;
1283 hostdata->max_cmd_size = max_cmd_size;
1284 hostdata->num_cmds = 1;
1285 hostdata->scsi_clock = clock;
1286 /* Initialize single command */
1287 tmp = (hostdata->script + hostdata->script_count);
1288#ifdef FORCE_DSA_ALIGNMENT
1289 {
1290 void *t = ROUNDUP(tmp, void *);
1291 if (((u32)t & 0xff) > CmdPageStart)
1292 t = (void *)((u32)t + 255);
1293 t = (void *)(((u32)t & ~0xff) + CmdPageStart);
1294 hostdata->free = t;
1295#if 0
1296 printk ("scsi: Registered size increased by 256 to %d\n", size);
1297 printk ("scsi: CmdPageStart = 0x%02x\n", CmdPageStart);
1298 printk ("scsi: tmp = 0x%08x, hostdata->free set to 0x%08x\n",
1299 (u32)tmp, (u32)t);
1300#endif
1301 }
1302#else
1303 hostdata->free = ROUNDUP(tmp, void *);
1304#endif
1305 hostdata->free->real = tmp;
1306 hostdata->free->size = max_cmd_size;
1307 hostdata->free->free = NULL;
1308 hostdata->free->next = NULL;
1309 hostdata->extra_allocate = 0;
1310
1311 /* Allocate command start code space */
1312 hostdata->schedule = (chip == 700 || chip == 70066) ?
1313 NULL : (u32 *) ((char *)hostdata->free + max_cmd_size);
1314
1315/*
1316 * For diagnostic purposes, we don't really care how fast things blaze.
1317 * For profiling, we want to access the 800ns resolution system clock,
1318 * using a 'C' call on the host processor.
1319 *
1320 * Therefore, there's no need for the NCR chip to directly manipulate
1321 * this data, and we should put it wherever is most convenient for
1322 * Linux.
1323 */
1324 if (track_events)
1325 hostdata->events = (struct NCR53c7x0_event *) (track_events ?
1326 vmalloc (sizeof (struct NCR53c7x0_event) * track_events) : NULL);
1327 else
1328 hostdata->events = NULL;
1329
1330 if (hostdata->events) {
1331 memset ((void *) hostdata->events, 0, sizeof(struct NCR53c7x0_event) *
1332 track_events);
1333 hostdata->event_size = track_events;
1334 hostdata->event_index = 0;
1335 } else
1336 hostdata->event_size = 0;
1337
1338 return NCR53c7x0_init(instance);
1339}
1340
1341
1342/*
1343 * Function : static void NCR53c7x0_init_fixup (struct Scsi_Host *host)
1344 *
1345 * Purpose : copy and fixup the SCSI SCRIPTS(tm) code for this device.
1346 *
1347 * Inputs : host - pointer to this host adapter's structure
1348 *
1349 */
1350
1351static void
1352NCR53c7x0_init_fixup (struct Scsi_Host *host) {
1353 NCR53c7x0_local_declare();
1354 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
1355 host->hostdata[0];
1356 unsigned char tmp;
1357 int i, ncr_to_memory, memory_to_ncr;
1358 u32 base;
1359 NCR53c7x0_local_setup(host);
1360
1361
1362 /* XXX - NOTE : this code MUST be made endian aware */
1363 /* Copy code into buffer that was allocated at detection time. */
1364 memcpy ((void *) hostdata->script, (void *) SCRIPT,
1365 sizeof(SCRIPT));
1366 /* Fixup labels */
1367 for (i = 0; i < PATCHES; ++i)
1368 hostdata->script[LABELPATCHES[i]] +=
1369 virt_to_bus(hostdata->script);
1370 /* Fixup addresses of constants that used to be EXTERNAL */
1371
1372 patch_abs_32 (hostdata->script, 0, NCR53c7xx_msg_abort,
1373 virt_to_bus(&(hostdata->NCR53c7xx_msg_abort)));
1374 patch_abs_32 (hostdata->script, 0, NCR53c7xx_msg_reject,
1375 virt_to_bus(&(hostdata->NCR53c7xx_msg_reject)));
1376 patch_abs_32 (hostdata->script, 0, NCR53c7xx_zero,
1377 virt_to_bus(&(hostdata->NCR53c7xx_zero)));
1378 patch_abs_32 (hostdata->script, 0, NCR53c7xx_sink,
1379 virt_to_bus(&(hostdata->NCR53c7xx_sink)));
1380 patch_abs_32 (hostdata->script, 0, NOP_insn,
1381 virt_to_bus(&(hostdata->NOP_insn)));
1382 patch_abs_32 (hostdata->script, 0, schedule,
1383 virt_to_bus((void *) hostdata->schedule));
1384
1385 /* Fixup references to external variables: */
1386 for (i = 0; i < EXTERNAL_PATCHES_LEN; ++i)
1387 hostdata->script[EXTERNAL_PATCHES[i].offset] +=
1388 virt_to_bus(EXTERNAL_PATCHES[i].address);
1389
1390 /*
1391 * Fixup absolutes set at boot-time.
1392 *
1393 * All non-code absolute variables suffixed with "dsa_" and "int_"
1394 * are constants, and need no fixup provided the assembler has done
1395 * it for us (I don't know what the "real" NCR assembler does in
1396 * this case, my assembler does the right magic).
1397 */
1398
1399 patch_abs_rwri_data (hostdata->script, 0, dsa_save_data_pointer,
1400 Ent_dsa_code_save_data_pointer - Ent_dsa_zero);
1401 patch_abs_rwri_data (hostdata->script, 0, dsa_restore_pointers,
1402 Ent_dsa_code_restore_pointers - Ent_dsa_zero);
1403 patch_abs_rwri_data (hostdata->script, 0, dsa_check_reselect,
1404 Ent_dsa_code_check_reselect - Ent_dsa_zero);
1405
1406 /*
1407 * Just for the hell of it, preserve the settings of
1408 * Burst Length and Enable Read Line bits from the DMODE
1409 * register. Make sure SCRIPTS start automagically.
1410 */
1411
1412#if defined(CONFIG_MVME16x) || defined(CONFIG_BVME6000)
1413 /* We know better what we want than 16xBug does! */
1414 tmp = DMODE_10_BL_8 | DMODE_10_FC2;
1415#else
1416 tmp = NCR53c7x0_read8(DMODE_REG_10);
1417 tmp &= (DMODE_BL_MASK | DMODE_10_FC2 | DMODE_10_FC1 | DMODE_710_PD |
1418 DMODE_710_UO);
1419#endif
1420
1421 if (!(hostdata->options & OPTION_MEMORY_MAPPED)) {
1422 base = (u32) host->io_port;
1423 memory_to_ncr = tmp|DMODE_800_DIOM;
1424 ncr_to_memory = tmp|DMODE_800_SIOM;
1425 } else {
1426 base = virt_to_bus((void *)host->base);
1427 memory_to_ncr = ncr_to_memory = tmp;
1428 }
1429
1430 /* SCRATCHB_REG_10 == SCRATCHA_REG_800, as it happens */
1431 patch_abs_32 (hostdata->script, 0, addr_scratch, base + SCRATCHA_REG_800);
1432 patch_abs_32 (hostdata->script, 0, addr_temp, base + TEMP_REG);
1433 patch_abs_32 (hostdata->script, 0, addr_dsa, base + DSA_REG);
1434
1435 /*
1436 * I needed some variables in the script to be accessible to
1437 * both the NCR chip and the host processor. For these variables,
1438 * I made the arbitrary decision to store them directly in the
1439 * hostdata structure rather than in the RELATIVE area of the
1440 * SCRIPTS.
1441 */
1442
1443
1444 patch_abs_rwri_data (hostdata->script, 0, dmode_memory_to_memory, tmp);
1445 patch_abs_rwri_data (hostdata->script, 0, dmode_memory_to_ncr, memory_to_ncr);
1446 patch_abs_rwri_data (hostdata->script, 0, dmode_ncr_to_memory, ncr_to_memory);
1447
1448 patch_abs_32 (hostdata->script, 0, msg_buf,
1449 virt_to_bus((void *)&(hostdata->msg_buf)));
1450 patch_abs_32 (hostdata->script, 0, reconnect_dsa_head,
1451 virt_to_bus((void *)&(hostdata->reconnect_dsa_head)));
1452 patch_abs_32 (hostdata->script, 0, addr_reconnect_dsa_head,
1453 virt_to_bus((void *)&(hostdata->addr_reconnect_dsa_head)));
1454 patch_abs_32 (hostdata->script, 0, reselected_identify,
1455 virt_to_bus((void *)&(hostdata->reselected_identify)));
1456/* reselected_tag is currently unused */
1457#if 0
1458 patch_abs_32 (hostdata->script, 0, reselected_tag,
1459 virt_to_bus((void *)&(hostdata->reselected_tag)));
1460#endif
1461
1462 patch_abs_32 (hostdata->script, 0, test_dest,
1463 virt_to_bus((void*)&hostdata->test_dest));
1464 patch_abs_32 (hostdata->script, 0, test_src,
1465 virt_to_bus(&hostdata->test_source));
1466 patch_abs_32 (hostdata->script, 0, saved_dsa,
1467 virt_to_bus((void *)&hostdata->saved2_dsa));
1468 patch_abs_32 (hostdata->script, 0, emulfly,
1469 virt_to_bus((void *)&hostdata->emulated_intfly));
1470
1471 patch_abs_rwri_data (hostdata->script, 0, dsa_check_reselect,
1472 (unsigned char)(Ent_dsa_code_check_reselect - Ent_dsa_zero));
1473
1474/* These are for event logging; the ncr_event enum contains the
1475 actual interrupt numbers. */
1476#ifdef A_int_EVENT_SELECT
1477 patch_abs_32 (hostdata->script, 0, int_EVENT_SELECT, (u32) EVENT_SELECT);
1478#endif
1479#ifdef A_int_EVENT_DISCONNECT
1480 patch_abs_32 (hostdata->script, 0, int_EVENT_DISCONNECT, (u32) EVENT_DISCONNECT);
1481#endif
1482#ifdef A_int_EVENT_RESELECT
1483 patch_abs_32 (hostdata->script, 0, int_EVENT_RESELECT, (u32) EVENT_RESELECT);
1484#endif
1485#ifdef A_int_EVENT_COMPLETE
1486 patch_abs_32 (hostdata->script, 0, int_EVENT_COMPLETE, (u32) EVENT_COMPLETE);
1487#endif
1488#ifdef A_int_EVENT_IDLE
1489 patch_abs_32 (hostdata->script, 0, int_EVENT_IDLE, (u32) EVENT_IDLE);
1490#endif
1491#ifdef A_int_EVENT_SELECT_FAILED
1492 patch_abs_32 (hostdata->script, 0, int_EVENT_SELECT_FAILED,
1493 (u32) EVENT_SELECT_FAILED);
1494#endif
1495#ifdef A_int_EVENT_BEFORE_SELECT
1496 patch_abs_32 (hostdata->script, 0, int_EVENT_BEFORE_SELECT,
1497 (u32) EVENT_BEFORE_SELECT);
1498#endif
1499#ifdef A_int_EVENT_RESELECT_FAILED
1500 patch_abs_32 (hostdata->script, 0, int_EVENT_RESELECT_FAILED,
1501 (u32) EVENT_RESELECT_FAILED);
1502#endif
1503
1504 /*
1505 * Make sure the NCR and Linux code agree on the location of
1506 * certain fields.
1507 */
1508
1509 hostdata->E_accept_message = Ent_accept_message;
1510 hostdata->E_command_complete = Ent_command_complete;
1511 hostdata->E_cmdout_cmdout = Ent_cmdout_cmdout;
1512 hostdata->E_data_transfer = Ent_data_transfer;
1513 hostdata->E_debug_break = Ent_debug_break;
1514 hostdata->E_dsa_code_template = Ent_dsa_code_template;
1515 hostdata->E_dsa_code_template_end = Ent_dsa_code_template_end;
1516 hostdata->E_end_data_transfer = Ent_end_data_transfer;
1517 hostdata->E_initiator_abort = Ent_initiator_abort;
1518 hostdata->E_msg_in = Ent_msg_in;
1519 hostdata->E_other_transfer = Ent_other_transfer;
1520 hostdata->E_other_in = Ent_other_in;
1521 hostdata->E_other_out = Ent_other_out;
1522 hostdata->E_reject_message = Ent_reject_message;
1523 hostdata->E_respond_message = Ent_respond_message;
1524 hostdata->E_select = Ent_select;
1525 hostdata->E_select_msgout = Ent_select_msgout;
1526 hostdata->E_target_abort = Ent_target_abort;
1527#ifdef Ent_test_0
1528 hostdata->E_test_0 = Ent_test_0;
1529#endif
1530 hostdata->E_test_1 = Ent_test_1;
1531 hostdata->E_test_2 = Ent_test_2;
1532#ifdef Ent_test_3
1533 hostdata->E_test_3 = Ent_test_3;
1534#endif
1535 hostdata->E_wait_reselect = Ent_wait_reselect;
1536 hostdata->E_dsa_code_begin = Ent_dsa_code_begin;
1537
1538 hostdata->dsa_cmdout = A_dsa_cmdout;
1539 hostdata->dsa_cmnd = A_dsa_cmnd;
1540 hostdata->dsa_datain = A_dsa_datain;
1541 hostdata->dsa_dataout = A_dsa_dataout;
1542 hostdata->dsa_end = A_dsa_end;
1543 hostdata->dsa_msgin = A_dsa_msgin;
1544 hostdata->dsa_msgout = A_dsa_msgout;
1545 hostdata->dsa_msgout_other = A_dsa_msgout_other;
1546 hostdata->dsa_next = A_dsa_next;
1547 hostdata->dsa_select = A_dsa_select;
1548 hostdata->dsa_start = Ent_dsa_code_template - Ent_dsa_zero;
1549 hostdata->dsa_status = A_dsa_status;
1550 hostdata->dsa_jump_dest = Ent_dsa_code_fix_jump - Ent_dsa_zero +
1551 8 /* destination operand */;
1552
1553 /* sanity check */
1554 if (A_dsa_fields_start != Ent_dsa_code_template_end -
1555 Ent_dsa_zero)
1556 printk("scsi%d : NCR dsa_fields start is %d not %d\n",
1557 host->host_no, A_dsa_fields_start, Ent_dsa_code_template_end -
1558 Ent_dsa_zero);
1559
1560 printk("scsi%d : NCR code relocated to 0x%lx (virt 0x%p)\n", host->host_no,
1561 virt_to_bus(hostdata->script), hostdata->script);
1562}
1563
1564/*
1565 * Function : static int NCR53c7xx_run_tests (struct Scsi_Host *host)
1566 *
1567 * Purpose : run various verification tests on the NCR chip,
1568 * including interrupt generation, and proper bus mastering
1569 * operation.
1570 *
1571 * Inputs : host - a properly initialized Scsi_Host structure
1572 *
1573 * Preconditions : the NCR chip must be in a halted state.
1574 *
1575 * Returns : 0 if all tests were successful, -1 on error.
1576 *
1577 */
1578
1579static int
1580NCR53c7xx_run_tests (struct Scsi_Host *host) {
1581 NCR53c7x0_local_declare();
1582 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
1583 host->hostdata[0];
1584 unsigned long timeout;
1585 u32 start;
1586 int failed, i;
1587 unsigned long flags;
1588 NCR53c7x0_local_setup(host);
1589
1590 /* The NCR chip _must_ be idle to run the test scripts */
1591
1592 local_irq_save(flags);
1593 if (!hostdata->idle) {
1594 printk ("scsi%d : chip not idle, aborting tests\n", host->host_no);
1595 local_irq_restore(flags);
1596 return -1;
1597 }
1598
1599 /*
1600 * Check for functional interrupts, this could work as an
1601 * autoprobe routine.
1602 */
1603
1604 if ((hostdata->options & OPTION_DEBUG_TEST1) &&
1605 hostdata->state != STATE_DISABLED) {
1606 hostdata->idle = 0;
1607 hostdata->test_running = 1;
1608 hostdata->test_completed = -1;
1609 hostdata->test_dest = 0;
1610 hostdata->test_source = 0xdeadbeef;
1611 start = virt_to_bus (hostdata->script) + hostdata->E_test_1;
1612 hostdata->state = STATE_RUNNING;
1613 printk ("scsi%d : test 1", host->host_no);
1614 NCR53c7x0_write32 (DSP_REG, start);
1615 if (hostdata->options & OPTION_DEBUG_TRACE)
1616 NCR53c7x0_write8 (DCNTL_REG, hostdata->saved_dcntl | DCNTL_SSM |
1617 DCNTL_STD);
1618 printk (" started\n");
1619 local_irq_restore(flags);
1620
1621 /*
1622 * This is currently a .5 second timeout, since (in theory) no slow
1623 * board will take that long. In practice, we've seen one
1624 * pentium which occassionally fails with this, but works with
1625 * 10 times as much?
1626 */
1627
1628 timeout = jiffies + 5 * HZ / 10;
1629 while ((hostdata->test_completed == -1) && time_before(jiffies, timeout))
1630 barrier();
1631
1632 failed = 1;
1633 if (hostdata->test_completed == -1)
1634 printk ("scsi%d : driver test 1 timed out%s\n",host->host_no ,
1635 (hostdata->test_dest == 0xdeadbeef) ?
1636 " due to lost interrupt.\n"
1637 " Please verify that the correct IRQ is being used for your board,\n"
1638 : "");
1639 else if (hostdata->test_completed != 1)
1640 printk ("scsi%d : test 1 bad interrupt value (%d)\n",
1641 host->host_no, hostdata->test_completed);
1642 else
1643 failed = (hostdata->test_dest != 0xdeadbeef);
1644
1645 if (hostdata->test_dest != 0xdeadbeef) {
1646 printk ("scsi%d : driver test 1 read 0x%x instead of 0xdeadbeef indicating a\n"
1647 " probable cache invalidation problem. Please configure caching\n"
1648 " as write-through or disabled\n",
1649 host->host_no, hostdata->test_dest);
1650 }
1651
1652 if (failed) {
1653 printk ("scsi%d : DSP = 0x%p (script at 0x%p, start at 0x%x)\n",
1654 host->host_no, bus_to_virt(NCR53c7x0_read32(DSP_REG)),
1655 hostdata->script, start);
1656 printk ("scsi%d : DSPS = 0x%x\n", host->host_no,
1657 NCR53c7x0_read32(DSPS_REG));
1658 local_irq_restore(flags);
1659 return -1;
1660 }
1661 hostdata->test_running = 0;
1662 }
1663
1664 if ((hostdata->options & OPTION_DEBUG_TEST2) &&
1665 hostdata->state != STATE_DISABLED) {
1666 u32 dsa[48];
1667 unsigned char identify = IDENTIFY(0, 0);
1668 unsigned char cmd[6];
1669 unsigned char data[36];
1670 unsigned char status = 0xff;
1671 unsigned char msg = 0xff;
1672
1673 cmd[0] = INQUIRY;
1674 cmd[1] = cmd[2] = cmd[3] = cmd[5] = 0;
1675 cmd[4] = sizeof(data);
1676
1677 dsa[2] = 1;
1678 dsa[3] = virt_to_bus(&identify);
1679 dsa[4] = 6;
1680 dsa[5] = virt_to_bus(&cmd);
1681 dsa[6] = sizeof(data);
1682 dsa[7] = virt_to_bus(&data);
1683 dsa[8] = 1;
1684 dsa[9] = virt_to_bus(&status);
1685 dsa[10] = 1;
1686 dsa[11] = virt_to_bus(&msg);
1687
1688 for (i = 0; i < 6; ++i) {
1689#ifdef VALID_IDS
1690 if (!hostdata->valid_ids[i])
1691 continue;
1692#endif
1693 local_irq_disable();
1694 if (!hostdata->idle) {
1695 printk ("scsi%d : chip not idle, aborting tests\n", host->host_no);
1696 local_irq_restore(flags);
1697 return -1;
1698 }
1699
1700 /* 710: bit mapped scsi ID, async */
1701 dsa[0] = (1 << i) << 16;
1702 hostdata->idle = 0;
1703 hostdata->test_running = 2;
1704 hostdata->test_completed = -1;
1705 start = virt_to_bus(hostdata->script) + hostdata->E_test_2;
1706 hostdata->state = STATE_RUNNING;
1707 NCR53c7x0_write32 (DSA_REG, virt_to_bus(dsa));
1708 NCR53c7x0_write32 (DSP_REG, start);
1709 if (hostdata->options & OPTION_DEBUG_TRACE)
1710 NCR53c7x0_write8 (DCNTL_REG, hostdata->saved_dcntl |
1711 DCNTL_SSM | DCNTL_STD);
1712 local_irq_restore(flags);
1713
1714 timeout = jiffies + 5 * HZ; /* arbitrary */
1715 while ((hostdata->test_completed == -1) && time_before(jiffies, timeout))
1716 barrier();
1717
1718 NCR53c7x0_write32 (DSA_REG, 0);
1719
1720 if (hostdata->test_completed == 2) {
1721 data[35] = 0;
1722 printk ("scsi%d : test 2 INQUIRY to target %d, lun 0 : %s\n",
1723 host->host_no, i, data + 8);
1724 printk ("scsi%d : status ", host->host_no);
1725 scsi_print_status (status);
1726 printk ("\nscsi%d : message ", host->host_no);
1727 spi_print_msg(&msg);
1728 printk ("\n");
1729 } else if (hostdata->test_completed == 3) {
1730 printk("scsi%d : test 2 no connection with target %d\n",
1731 host->host_no, i);
1732 if (!hostdata->idle) {
1733 printk("scsi%d : not idle\n", host->host_no);
1734 local_irq_restore(flags);
1735 return -1;
1736 }
1737 } else if (hostdata->test_completed == -1) {
1738 printk ("scsi%d : test 2 timed out\n", host->host_no);
1739 local_irq_restore(flags);
1740 return -1;
1741 }
1742 hostdata->test_running = 0;
1743 }
1744 }
1745
1746 local_irq_restore(flags);
1747 return 0;
1748}
1749
1750/*
1751 * Function : static void NCR53c7xx_dsa_fixup (struct NCR53c7x0_cmd *cmd)
1752 *
1753 * Purpose : copy the NCR53c8xx dsa structure into cmd's dsa buffer,
1754 * performing all necessary relocation.
1755 *
1756 * Inputs : cmd, a NCR53c7x0_cmd structure with a dsa area large
1757 * enough to hold the NCR53c8xx dsa.
1758 */
1759
1760static void
1761NCR53c7xx_dsa_fixup (struct NCR53c7x0_cmd *cmd) {
1762 Scsi_Cmnd *c = cmd->cmd;
1763 struct Scsi_Host *host = c->device->host;
1764 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
1765 host->hostdata[0];
1766 int i;
1767
1768 memcpy (cmd->dsa, hostdata->script + (hostdata->E_dsa_code_template / 4),
1769 hostdata->E_dsa_code_template_end - hostdata->E_dsa_code_template);
1770
1771 /*
1772 * Note : within the NCR 'C' code, dsa points to the _start_
1773 * of the DSA structure, and _not_ the offset of dsa_zero within
1774 * that structure used to facilitate shorter signed offsets
1775 * for the 8 bit ALU.
1776 *
1777 * The implications of this are that
1778 *
1779 * - 32 bit A_dsa_* absolute values require an additional
1780 * dsa_zero added to their value to be correct, since they are
1781 * relative to dsa_zero which is in essentially a separate
1782 * space from the code symbols.
1783 *
1784 * - All other symbols require no special treatment.
1785 */
1786
1787 patch_abs_tci_data (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
1788 dsa_temp_lun, c->device->lun);
1789 patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
1790 dsa_temp_addr_next, virt_to_bus(&cmd->dsa_next_addr));
1791 patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
1792 dsa_temp_next, virt_to_bus(cmd->dsa) + Ent_dsa_zero -
1793 Ent_dsa_code_template + A_dsa_next);
1794 patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
1795 dsa_temp_sync, virt_to_bus((void *)hostdata->sync[c->device->id].script));
1796 patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
1797 dsa_sscf_710, virt_to_bus((void *)&hostdata->sync[c->device->id].sscf_710));
1798 patch_abs_tci_data (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
1799 dsa_temp_target, 1 << c->device->id);
1800 /* XXX - new pointer stuff */
1801 patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
1802 dsa_temp_addr_saved_pointer, virt_to_bus(&cmd->saved_data_pointer));
1803 patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
1804 dsa_temp_addr_saved_residual, virt_to_bus(&cmd->saved_residual));
1805 patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
1806 dsa_temp_addr_residual, virt_to_bus(&cmd->residual));
1807
1808 /* XXX - new start stuff */
1809
1810 patch_abs_32 (cmd->dsa, Ent_dsa_code_template / sizeof(u32),
1811 dsa_temp_addr_dsa_value, virt_to_bus(&cmd->dsa_addr));
1812}
1813
1814/*
1815 * Function : run_process_issue_queue (void)
1816 *
1817 * Purpose : insure that the coroutine is running and will process our
1818 * request. process_issue_queue_running is checked/set here (in an
1819 * inline function) rather than in process_issue_queue itself to reduce
1820 * the chances of stack overflow.
1821 *
1822 */
1823
1824static volatile int process_issue_queue_running = 0;
1825
1826static __inline__ void
1827run_process_issue_queue(void) {
1828 unsigned long flags;
1829 local_irq_save(flags);
1830 if (!process_issue_queue_running) {
1831 process_issue_queue_running = 1;
1832 process_issue_queue(flags);
1833 /*
1834 * process_issue_queue_running is cleared in process_issue_queue
1835 * once it can't do more work, and process_issue_queue exits with
1836 * interrupts disabled.
1837 */
1838 }
1839 local_irq_restore(flags);
1840}
1841
1842/*
1843 * Function : static void abnormal_finished (struct NCR53c7x0_cmd *cmd, int
1844 * result)
1845 *
1846 * Purpose : mark SCSI command as finished, OR'ing the host portion
1847 * of the result word into the result field of the corresponding
1848 * Scsi_Cmnd structure, and removing it from the internal queues.
1849 *
1850 * Inputs : cmd - command, result - entire result field
1851 *
1852 * Preconditions : the NCR chip should be in a halted state when
1853 * abnormal_finished is run, since it modifies structures which
1854 * the NCR expects to have exclusive access to.
1855 */
1856
1857static void
1858abnormal_finished (struct NCR53c7x0_cmd *cmd, int result) {
1859 Scsi_Cmnd *c = cmd->cmd;
1860 struct Scsi_Host *host = c->device->host;
1861 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
1862 host->hostdata[0];
1863 unsigned long flags;
1864 int left, found;
1865 volatile struct NCR53c7x0_cmd * linux_search;
1866 volatile struct NCR53c7x0_cmd * volatile *linux_prev;
1867 volatile u32 *ncr_prev, *ncrcurrent, ncr_search;
1868
1869#if 0
1870 printk ("scsi%d: abnormal finished\n", host->host_no);
1871#endif
1872
1873 local_irq_save(flags);
1874 found = 0;
1875 /*
1876 * Traverse the NCR issue array until we find a match or run out
1877 * of instructions. Instructions in the NCR issue array are
1878 * either JUMP or NOP instructions, which are 2 words in length.
1879 */
1880
1881
1882 for (found = 0, left = host->can_queue, ncrcurrent = hostdata->schedule;
1883 left > 0; --left, ncrcurrent += 2)
1884 {
1885 if (issue_to_cmd (host, hostdata, (u32 *) ncrcurrent) == cmd)
1886 {
1887 ncrcurrent[0] = hostdata->NOP_insn;
1888 ncrcurrent[1] = 0xdeadbeef;
1889 ++found;
1890 break;
1891 }
1892 }
1893
1894 /*
1895 * Traverse the NCR reconnect list of DSA structures until we find
1896 * a pointer to this dsa or have found too many command structures.
1897 * We let prev point at the next field of the previous element or
1898 * head of the list, so we don't do anything different for removing
1899 * the head element.
1900 */
1901
1902 for (left = host->can_queue,
1903 ncr_search = hostdata->reconnect_dsa_head,
1904 ncr_prev = &hostdata->reconnect_dsa_head;
1905 left >= 0 && ncr_search &&
1906 ((char*)bus_to_virt(ncr_search) + hostdata->dsa_start)
1907 != (char *) cmd->dsa;
1908 ncr_prev = (u32*) ((char*)bus_to_virt(ncr_search) +
1909 hostdata->dsa_next), ncr_search = *ncr_prev, --left);
1910
1911 if (left < 0)
1912 printk("scsi%d: loop detected in ncr reconncect list\n",
1913 host->host_no);
1914 else if (ncr_search) {
1915 if (found)
1916 printk("scsi%d: scsi %ld in ncr issue array and reconnect lists\n",
1917 host->host_no, c->pid);
1918 else {
1919 volatile u32 * next = (u32 *)
1920 ((char *)bus_to_virt(ncr_search) + hostdata->dsa_next);
1921 *ncr_prev = *next;
1922/* If we're at the tail end of the issue queue, update that pointer too. */
1923 found = 1;
1924 }
1925 }
1926
1927 /*
1928 * Traverse the host running list until we find this command or discover
1929 * we have too many elements, pointing linux_prev at the next field of the
1930 * linux_previous element or head of the list, search at this element.
1931 */
1932
1933 for (left = host->can_queue, linux_search = hostdata->running_list,
1934 linux_prev = &hostdata->running_list;
1935 left >= 0 && linux_search && linux_search != cmd;
1936 linux_prev = &(linux_search->next),
1937 linux_search = linux_search->next, --left);
1938
1939 if (left < 0)
1940 printk ("scsi%d: loop detected in host running list for scsi pid %ld\n",
1941 host->host_no, c->pid);
1942 else if (linux_search) {
1943 *linux_prev = linux_search->next;
1944 --hostdata->busy[c->device->id][c->device->lun];
1945 }
1946
1947 /* Return the NCR command structure to the free list */
1948 cmd->next = hostdata->free;
1949 hostdata->free = cmd;
1950 c->host_scribble = NULL;
1951
1952 /* And return */
1953 c->result = result;
1954 c->scsi_done(c);
1955
1956 local_irq_restore(flags);
1957 run_process_issue_queue();
1958}
1959
1960/*
1961 * Function : static void intr_break (struct Scsi_Host *host,
1962 * struct NCR53c7x0_cmd *cmd)
1963 *
1964 * Purpose : Handler for breakpoint interrupts from a SCSI script
1965 *
1966 * Inputs : host - pointer to this host adapter's structure,
1967 * cmd - pointer to the command (if any) dsa was pointing
1968 * to.
1969 *
1970 */
1971
1972static void
1973intr_break (struct Scsi_Host *host, struct
1974 NCR53c7x0_cmd *cmd) {
1975 NCR53c7x0_local_declare();
1976 struct NCR53c7x0_break *bp;
1977#if 0
1978 Scsi_Cmnd *c = cmd ? cmd->cmd : NULL;
1979#endif
1980 u32 *dsp;
1981 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
1982 host->hostdata[0];
1983 unsigned long flags;
1984 NCR53c7x0_local_setup(host);
1985
1986 /*
1987 * Find the break point corresponding to this address, and
1988 * dump the appropriate debugging information to standard
1989 * output.
1990 */
1991 local_irq_save(flags);
1992 dsp = (u32 *) bus_to_virt(NCR53c7x0_read32(DSP_REG));
1993 for (bp = hostdata->breakpoints; bp && bp->address != dsp;
1994 bp = bp->next);
1995 if (!bp)
1996 panic("scsi%d : break point interrupt from %p with no breakpoint!",
1997 host->host_no, dsp);
1998
1999 /*
2000 * Configure the NCR chip for manual start mode, so that we can
2001 * point the DSP register at the instruction that follows the
2002 * INT int_debug_break instruction.
2003 */
2004
2005 NCR53c7x0_write8 (hostdata->dmode,
2006 NCR53c7x0_read8(hostdata->dmode)|DMODE_MAN);
2007
2008 /*
2009 * And update the DSP register, using the size of the old
2010 * instruction in bytes.
2011 */
2012
2013 local_irq_restore(flags);
2014}
2015/*
2016 * Function : static void print_synchronous (const char *prefix,
2017 * const unsigned char *msg)
2018 *
2019 * Purpose : print a pretty, user and machine parsable representation
2020 * of a SDTR message, including the "real" parameters, data
2021 * clock so we can tell transfer rate at a glance.
2022 *
2023 * Inputs ; prefix - text to prepend, msg - SDTR message (5 bytes)
2024 */
2025
2026static void
2027print_synchronous (const char *prefix, const unsigned char *msg) {
2028 if (msg[4]) {
2029 int Hz = 1000000000 / (msg[3] * 4);
2030 int integer = Hz / 1000000;
2031 int fraction = (Hz - (integer * 1000000)) / 10000;
2032 printk ("%speriod %dns offset %d %d.%02dMHz %s SCSI%s\n",
2033 prefix, (int) msg[3] * 4, (int) msg[4], integer, fraction,
2034 (((msg[3] * 4) < 200) ? "FAST" : "synchronous"),
2035 (((msg[3] * 4) < 200) ? "-II" : ""));
2036 } else
2037 printk ("%sasynchronous SCSI\n", prefix);
2038}
2039
2040/*
2041 * Function : static void set_synchronous (struct Scsi_Host *host,
2042 * int target, int sxfer, int scntl3, int now_connected)
2043 *
2044 * Purpose : reprogram transfers between the selected SCSI initiator and
2045 * target with the given register values; in the indirect
2046 * select operand, reselection script, and chip registers.
2047 *
2048 * Inputs : host - NCR53c7,8xx SCSI host, target - number SCSI target id,
2049 * sxfer and scntl3 - NCR registers. now_connected - if non-zero,
2050 * we should reprogram the registers now too.
2051 *
2052 * NOTE: For 53c710, scntl3 is actually used for SCF bits from
2053 * SBCL, as we don't have a SCNTL3.
2054 */
2055
2056static void
2057set_synchronous (struct Scsi_Host *host, int target, int sxfer, int scntl3,
2058 int now_connected) {
2059 NCR53c7x0_local_declare();
2060 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
2061 host->hostdata[0];
2062 u32 *script;
2063 NCR53c7x0_local_setup(host);
2064
2065 /* These are eight bit registers */
2066 sxfer &= 0xff;
2067 scntl3 &= 0xff;
2068
2069 hostdata->sync[target].sxfer_sanity = sxfer;
2070 hostdata->sync[target].scntl3_sanity = scntl3;
2071
2072/*
2073 * HARD CODED : synchronous script is EIGHT words long. This
2074 * must agree with 53c7.8xx.h
2075 */
2076
2077 if ((hostdata->chip != 700) && (hostdata->chip != 70066)) {
2078 hostdata->sync[target].select_indirect = (1 << target) << 16 |
2079 (sxfer << 8);
2080 hostdata->sync[target].sscf_710 = scntl3;
2081
2082 script = (u32 *) hostdata->sync[target].script;
2083
2084 /* XXX - add NCR53c7x0 code to reprogram SCF bits if we want to */
2085 script[0] = ((DCMD_TYPE_RWRI | DCMD_RWRI_OPC_MODIFY |
2086 DCMD_RWRI_OP_MOVE) << 24) |
2087 (SBCL_REG << 16) | (scntl3 << 8);
2088 script[1] = 0;
2089 script += 2;
2090
2091 script[0] = ((DCMD_TYPE_RWRI | DCMD_RWRI_OPC_MODIFY |
2092 DCMD_RWRI_OP_MOVE) << 24) |
2093 (SXFER_REG << 16) | (sxfer << 8);
2094 script[1] = 0;
2095 script += 2;
2096
2097#ifdef DEBUG_SYNC_INTR
2098 if (hostdata->options & OPTION_DEBUG_DISCONNECT) {
2099 script[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_INT) << 24) | DBC_TCI_TRUE;
2100 script[1] = DEBUG_SYNC_INTR;
2101 script += 2;
2102 }
2103#endif
2104
2105 script[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_RETURN) << 24) | DBC_TCI_TRUE;
2106 script[1] = 0;
2107 script += 2;
2108 }
2109
2110 if (hostdata->options & OPTION_DEBUG_SYNCHRONOUS)
2111 printk ("scsi%d : target %d sync parameters are sxfer=0x%x, scntl3=0x%x\n",
2112 host->host_no, target, sxfer, scntl3);
2113
2114 if (now_connected) {
2115 NCR53c7x0_write8(SBCL_REG, scntl3);
2116 NCR53c7x0_write8(SXFER_REG, sxfer);
2117 }
2118}
2119
2120
2121/*
2122 * Function : static int asynchronous (struct Scsi_Host *host, int target)
2123 *
2124 * Purpose : reprogram between the selected SCSI Host adapter and target
2125 * (assumed to be currently connected) for asynchronous transfers.
2126 *
2127 * Inputs : host - SCSI host structure, target - numeric target ID.
2128 *
2129 * Preconditions : the NCR chip should be in one of the halted states
2130 */
2131
2132static void
2133asynchronous (struct Scsi_Host *host, int target) {
2134 NCR53c7x0_local_declare();
2135 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
2136 host->hostdata[0];
2137 NCR53c7x0_local_setup(host);
2138 set_synchronous (host, target, /* no offset */ 0, hostdata->saved_scntl3,
2139 1);
2140 printk ("scsi%d : setting target %d to asynchronous SCSI\n",
2141 host->host_no, target);
2142}
2143
2144/*
2145 * XXX - do we want to go out of our way (ie, add extra code to selection
2146 * in the NCR53c710/NCR53c720 script) to reprogram the synchronous
2147 * conversion bits, or can we be content in just setting the
2148 * sxfer bits? I chose to do so [richard@sleepie.demon.co.uk]
2149 */
2150
2151/* Table for NCR53c8xx synchronous values */
2152
2153/* This table is also correct for 710, allowing that scf=4 is equivalent
2154 * of SSCF=0 (ie use DCNTL, divide by 3) for a 50.01-66.00MHz clock.
2155 * For any other clock values, we cannot use entries with SCF values of
2156 * 4. I guess that for a 66MHz clock, the slowest it will set is 2MHz,
2157 * and for a 50MHz clock, the slowest will be 2.27Mhz. Should check
2158 * that a device doesn't try and negotiate sync below these limits!
2159 */
2160
2161static const struct {
2162 int div; /* Total clock divisor * 10 */
2163 unsigned char scf; /* */
2164 unsigned char tp; /* 4 + tp = xferp divisor */
2165} syncs[] = {
2166/* div scf tp div scf tp div scf tp */
2167 { 40, 1, 0}, { 50, 1, 1}, { 60, 1, 2},
2168 { 70, 1, 3}, { 75, 2, 1}, { 80, 1, 4},
2169 { 90, 1, 5}, { 100, 1, 6}, { 105, 2, 3},
2170 { 110, 1, 7}, { 120, 2, 4}, { 135, 2, 5},
2171 { 140, 3, 3}, { 150, 2, 6}, { 160, 3, 4},
2172 { 165, 2, 7}, { 180, 3, 5}, { 200, 3, 6},
2173 { 210, 4, 3}, { 220, 3, 7}, { 240, 4, 4},
2174 { 270, 4, 5}, { 300, 4, 6}, { 330, 4, 7}
2175};
2176
2177/*
2178 * Function : static void synchronous (struct Scsi_Host *host, int target,
2179 * char *msg)
2180 *
2181 * Purpose : reprogram transfers between the selected SCSI initiator and
2182 * target for synchronous SCSI transfers such that the synchronous
2183 * offset is less than that requested and period at least as long
2184 * as that requested. Also modify *msg such that it contains
2185 * an appropriate response.
2186 *
2187 * Inputs : host - NCR53c7,8xx SCSI host, target - number SCSI target id,
2188 * msg - synchronous transfer request.
2189 */
2190
2191
2192static void
2193synchronous (struct Scsi_Host *host, int target, char *msg) {
2194 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
2195 host->hostdata[0];
2196 int desire, divisor, i, limit;
2197 unsigned char scntl3, sxfer;
2198/* The diagnostic message fits on one line, even with max. width integers */
2199 char buf[80];
2200
2201/* Desired transfer clock in Hz */
2202 desire = 1000000000L / (msg[3] * 4);
2203/* Scale the available SCSI clock by 10 so we get tenths */
2204 divisor = (hostdata->scsi_clock * 10) / desire;
2205
2206/* NCR chips can handle at most an offset of 8 */
2207 if (msg[4] > 8)
2208 msg[4] = 8;
2209
2210 if (hostdata->options & OPTION_DEBUG_SDTR)
2211 printk("scsi%d : optimal synchronous divisor of %d.%01d\n",
2212 host->host_no, divisor / 10, divisor % 10);
2213
2214 limit = ARRAY_SIZE(syncs) - 1;
2215 for (i = 0; (i < limit) && (divisor > syncs[i].div); ++i);
2216
2217 if (hostdata->options & OPTION_DEBUG_SDTR)
2218 printk("scsi%d : selected synchronous divisor of %d.%01d\n",
2219 host->host_no, syncs[i].div / 10, syncs[i].div % 10);
2220
2221 msg[3] = ((1000000000L / hostdata->scsi_clock) * syncs[i].div / 10 / 4);
2222
2223 if (hostdata->options & OPTION_DEBUG_SDTR)
2224 printk("scsi%d : selected synchronous period of %dns\n", host->host_no,
2225 msg[3] * 4);
2226
2227 scntl3 = syncs[i].scf;
2228 sxfer = (msg[4] << SXFER_MO_SHIFT) | (syncs[i].tp << 4);
2229 if (hostdata->options & OPTION_DEBUG_SDTR)
2230 printk ("scsi%d : sxfer=0x%x scntl3=0x%x\n",
2231 host->host_no, (int) sxfer, (int) scntl3);
2232 set_synchronous (host, target, sxfer, scntl3, 1);
2233 sprintf (buf, "scsi%d : setting target %d to ", host->host_no, target);
2234 print_synchronous (buf, msg);
2235}
2236
2237/*
2238 * Function : static int NCR53c7x0_dstat_sir_intr (struct Scsi_Host *host,
2239 * struct NCR53c7x0_cmd *cmd)
2240 *
2241 * Purpose : Handler for INT generated instructions for the
2242 * NCR53c810/820 SCSI SCRIPT
2243 *
2244 * Inputs : host - pointer to this host adapter's structure,
2245 * cmd - pointer to the command (if any) dsa was pointing
2246 * to.
2247 *
2248 */
2249
2250static int
2251NCR53c7x0_dstat_sir_intr (struct Scsi_Host *host, struct
2252 NCR53c7x0_cmd *cmd) {
2253 NCR53c7x0_local_declare();
2254 int print;
2255 Scsi_Cmnd *c = cmd ? cmd->cmd : NULL;
2256 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
2257 host->hostdata[0];
2258 u32 dsps,*dsp; /* Argument of the INT instruction */
2259
2260 NCR53c7x0_local_setup(host);
2261 dsps = NCR53c7x0_read32(DSPS_REG);
2262 dsp = (u32 *) bus_to_virt(NCR53c7x0_read32(DSP_REG));
2263
2264 /* RGH 150597: Frig. Commands which fail with Check Condition are
2265 * Flagged as successful - hack dsps to indicate check condition */
2266#if 0
2267 /* RGH 200597: Need to disable for BVME6000, as it gets Check Conditions
2268 * and then dies. Seems to handle Check Condition at startup, but
2269 * not mid kernel build. */
2270 if (dsps == A_int_norm_emulateintfly && cmd && cmd->result == 2)
2271 dsps = A_int_err_check_condition;
2272#endif
2273
2274 if (hostdata->options & OPTION_DEBUG_INTR)
2275 printk ("scsi%d : DSPS = 0x%x\n", host->host_no, dsps);
2276
2277 switch (dsps) {
2278 case A_int_msg_1:
2279 print = 1;
2280 switch (hostdata->msg_buf[0]) {
2281 /*
2282 * Unless we've initiated synchronous negotiation, I don't
2283 * think that this should happen.
2284 */
2285 case MESSAGE_REJECT:
2286 hostdata->dsp = hostdata->script + hostdata->E_accept_message /
2287 sizeof(u32);
2288 hostdata->dsp_changed = 1;
2289 if (cmd && (cmd->flags & CMD_FLAG_SDTR)) {
2290 printk ("scsi%d : target %d rejected SDTR\n", host->host_no,
2291 c->device->id);
2292 cmd->flags &= ~CMD_FLAG_SDTR;
2293 asynchronous (host, c->device->id);
2294 print = 0;
2295 }
2296 break;
2297 case INITIATE_RECOVERY:
2298 printk ("scsi%d : extended contingent allegiance not supported yet, rejecting\n",
2299 host->host_no);
2300 /* Fall through to default */
2301 hostdata->dsp = hostdata->script + hostdata->E_reject_message /
2302 sizeof(u32);
2303 hostdata->dsp_changed = 1;
2304 break;
2305 default:
2306 printk ("scsi%d : unsupported message, rejecting\n",
2307 host->host_no);
2308 hostdata->dsp = hostdata->script + hostdata->E_reject_message /
2309 sizeof(u32);
2310 hostdata->dsp_changed = 1;
2311 }
2312 if (print) {
2313 printk ("scsi%d : received message", host->host_no);
2314 if (c)
2315 printk (" from target %d lun %d ", c->device->id, c->device->lun);
2316 spi_print_msg((unsigned char *) hostdata->msg_buf);
2317 printk("\n");
2318 }
2319
2320 return SPECIFIC_INT_NOTHING;
2321
2322
2323 case A_int_msg_sdtr:
2324/*
2325 * At this point, hostdata->msg_buf contains
2326 * 0 EXTENDED MESSAGE
2327 * 1 length
2328 * 2 SDTR
2329 * 3 period * 4ns
2330 * 4 offset
2331 */
2332
2333 if (cmd) {
2334 char buf[80];
2335 sprintf (buf, "scsi%d : target %d %s ", host->host_no, c->device->id,
2336 (cmd->flags & CMD_FLAG_SDTR) ? "accepting" : "requesting");
2337 print_synchronous (buf, (unsigned char *) hostdata->msg_buf);
2338
2339 /*
2340 * Initiator initiated, won't happen unless synchronous
2341 * transfers are enabled. If we get a SDTR message in
2342 * response to our SDTR, we should program our parameters
2343 * such that
2344 * offset <= requested offset
2345 * period >= requested period
2346 */
2347 if (cmd->flags & CMD_FLAG_SDTR) {
2348 cmd->flags &= ~CMD_FLAG_SDTR;
2349 if (hostdata->msg_buf[4])
2350 synchronous (host, c->device->id, (unsigned char *)
2351 hostdata->msg_buf);
2352 else
2353 asynchronous (host, c->device->id);
2354 hostdata->dsp = hostdata->script + hostdata->E_accept_message /
2355 sizeof(u32);
2356 hostdata->dsp_changed = 1;
2357 return SPECIFIC_INT_NOTHING;
2358 } else {
2359 if (hostdata->options & OPTION_SYNCHRONOUS) {
2360 cmd->flags |= CMD_FLAG_DID_SDTR;
2361 synchronous (host, c->device->id, (unsigned char *)
2362 hostdata->msg_buf);
2363 } else {
2364 hostdata->msg_buf[4] = 0; /* 0 offset = async */
2365 asynchronous (host, c->device->id);
2366 }
2367 patch_dsa_32 (cmd->dsa, dsa_msgout_other, 0, 5);
2368 patch_dsa_32 (cmd->dsa, dsa_msgout_other, 1, (u32)
2369 virt_to_bus ((void *)&hostdata->msg_buf));
2370 hostdata->dsp = hostdata->script +
2371 hostdata->E_respond_message / sizeof(u32);
2372 hostdata->dsp_changed = 1;
2373 }
2374 return SPECIFIC_INT_NOTHING;
2375 }
2376 /* Fall through to abort if we couldn't find a cmd, and
2377 therefore a dsa structure to twiddle */
2378 case A_int_msg_wdtr:
2379 hostdata->dsp = hostdata->script + hostdata->E_reject_message /
2380 sizeof(u32);
2381 hostdata->dsp_changed = 1;
2382 return SPECIFIC_INT_NOTHING;
2383 case A_int_err_unexpected_phase:
2384 if (hostdata->options & OPTION_DEBUG_INTR)
2385 printk ("scsi%d : unexpected phase\n", host->host_no);
2386 return SPECIFIC_INT_ABORT;
2387 case A_int_err_selected:
2388 if ((hostdata->chip / 100) == 8)
2389 printk ("scsi%d : selected by target %d\n", host->host_no,
2390 (int) NCR53c7x0_read8(SDID_REG_800) &7);
2391 else
2392 printk ("scsi%d : selected by target LCRC=0x%02x\n", host->host_no,
2393 (int) NCR53c7x0_read8(LCRC_REG_10));
2394 hostdata->dsp = hostdata->script + hostdata->E_target_abort /
2395 sizeof(u32);
2396 hostdata->dsp_changed = 1;
2397 return SPECIFIC_INT_NOTHING;
2398 case A_int_err_unexpected_reselect:
2399 if ((hostdata->chip / 100) == 8)
2400 printk ("scsi%d : unexpected reselect by target %d lun %d\n",
2401 host->host_no, (int) NCR53c7x0_read8(SDID_REG_800) & 7,
2402 hostdata->reselected_identify & 7);
2403 else
2404 printk ("scsi%d : unexpected reselect LCRC=0x%02x\n", host->host_no,
2405 (int) NCR53c7x0_read8(LCRC_REG_10));
2406 hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
2407 sizeof(u32);
2408 hostdata->dsp_changed = 1;
2409 return SPECIFIC_INT_NOTHING;
2410/*
2411 * Since contingent allegiance conditions are cleared by the next
2412 * command issued to a target, we must issue a REQUEST SENSE
2413 * command after receiving a CHECK CONDITION status, before
2414 * another command is issued.
2415 *
2416 * Since this NCR53c7x0_cmd will be freed after use, we don't
2417 * care if we step on the various fields, so modify a few things.
2418 */
2419 case A_int_err_check_condition:
2420#if 0
2421 if (hostdata->options & OPTION_DEBUG_INTR)
2422#endif
2423 printk ("scsi%d : CHECK CONDITION\n", host->host_no);
2424 if (!c) {
2425 printk("scsi%d : CHECK CONDITION with no SCSI command\n",
2426 host->host_no);
2427 return SPECIFIC_INT_PANIC;
2428 }
2429
2430 /*
2431 * FIXME : this uses the normal one-byte selection message.
2432 * We may want to renegotiate for synchronous & WIDE transfers
2433 * since these could be the crux of our problem.
2434 *
2435 hostdata->NOP_insn* FIXME : once SCSI-II tagged queuing is implemented, we'll
2436 * have to set this up so that the rest of the DSA
2437 * agrees with this being an untagged queue'd command.
2438 */
2439
2440 patch_dsa_32 (cmd->dsa, dsa_msgout, 0, 1);
2441
2442 /*
2443 * Modify the table indirect for COMMAND OUT phase, since
2444 * Request Sense is a six byte command.
2445 */
2446
2447 patch_dsa_32 (cmd->dsa, dsa_cmdout, 0, 6);
2448
2449 /*
2450 * The CDB is now mirrored in our local non-cached
2451 * structure, but keep the old structure up to date as well,
2452 * just in case anyone looks at it.
2453 */
2454
2455 /*
2456 * XXX Need to worry about data buffer alignment/cache state
2457 * XXX here, but currently never get A_int_err_check_condition,
2458 * XXX so ignore problem for now.
2459 */
2460 cmd->cmnd[0] = c->cmnd[0] = REQUEST_SENSE;
2461 cmd->cmnd[0] = c->cmnd[1] &= 0xe0; /* Zero all but LUN */
2462 cmd->cmnd[0] = c->cmnd[2] = 0;
2463 cmd->cmnd[0] = c->cmnd[3] = 0;
2464 cmd->cmnd[0] = c->cmnd[4] = sizeof(c->sense_buffer);
2465 cmd->cmnd[0] = c->cmnd[5] = 0;
2466
2467 /*
2468 * Disable dataout phase, and program datain to transfer to the
2469 * sense buffer, and add a jump to other_transfer after the
2470 * command so overflow/underrun conditions are detected.
2471 */
2472
2473 patch_dsa_32 (cmd->dsa, dsa_dataout, 0,
2474 virt_to_bus(hostdata->script) + hostdata->E_other_transfer);
2475 patch_dsa_32 (cmd->dsa, dsa_datain, 0,
2476 virt_to_bus(cmd->data_transfer_start));
2477 cmd->data_transfer_start[0] = (((DCMD_TYPE_BMI | DCMD_BMI_OP_MOVE_I |
2478 DCMD_BMI_IO)) << 24) | sizeof(c->sense_buffer);
2479 cmd->data_transfer_start[1] = (u32) virt_to_bus(c->sense_buffer);
2480
2481 cmd->data_transfer_start[2] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_JUMP)
2482 << 24) | DBC_TCI_TRUE;
2483 cmd->data_transfer_start[3] = (u32) virt_to_bus(hostdata->script) +
2484 hostdata->E_other_transfer;
2485
2486 /*
2487 * Currently, this command is flagged as completed, ie
2488 * it has valid status and message data. Reflag it as
2489 * incomplete. Q - need to do something so that original
2490 * status, etc are used.
2491 */
2492
2493 cmd->result = cmd->cmd->result = 0xffff;
2494
2495 /*
2496 * Restart command as a REQUEST SENSE.
2497 */
2498 hostdata->dsp = (u32 *) hostdata->script + hostdata->E_select /
2499 sizeof(u32);
2500 hostdata->dsp_changed = 1;
2501 return SPECIFIC_INT_NOTHING;
2502 case A_int_debug_break:
2503 return SPECIFIC_INT_BREAK;
2504 case A_int_norm_aborted:
2505 hostdata->dsp = (u32 *) hostdata->schedule;
2506 hostdata->dsp_changed = 1;
2507 if (cmd)
2508 abnormal_finished (cmd, DID_ERROR << 16);
2509 return SPECIFIC_INT_NOTHING;
2510 case A_int_norm_emulateintfly:
2511 NCR53c7x0_intfly(host);
2512 return SPECIFIC_INT_NOTHING;
2513 case A_int_test_1:
2514 case A_int_test_2:
2515 hostdata->idle = 1;
2516 hostdata->test_completed = (dsps - A_int_test_1) / 0x00010000 + 1;
2517 if (hostdata->options & OPTION_DEBUG_INTR)
2518 printk("scsi%d : test%d complete\n", host->host_no,
2519 hostdata->test_completed);
2520 return SPECIFIC_INT_NOTHING;
2521#ifdef A_int_debug_reselected_ok
2522 case A_int_debug_reselected_ok:
2523 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
2524 OPTION_DEBUG_DISCONNECT)) {
2525 /*
2526 * Note - this dsa is not based on location relative to
2527 * the command structure, but to location relative to the
2528 * DSA register
2529 */
2530 u32 *dsa;
2531 dsa = (u32 *) bus_to_virt (NCR53c7x0_read32(DSA_REG));
2532
2533 printk("scsi%d : reselected_ok (DSA = 0x%x (virt 0x%p)\n",
2534 host->host_no, NCR53c7x0_read32(DSA_REG), dsa);
2535 printk("scsi%d : resume address is 0x%x (virt 0x%p)\n",
2536 host->host_no, cmd->saved_data_pointer,
2537 bus_to_virt(cmd->saved_data_pointer));
2538 print_insn (host, hostdata->script + Ent_reselected_ok /
2539 sizeof(u32), "", 1);
2540 if ((hostdata->chip / 100) == 8)
2541 printk ("scsi%d : sxfer=0x%x, scntl3=0x%x\n",
2542 host->host_no, NCR53c7x0_read8(SXFER_REG),
2543 NCR53c7x0_read8(SCNTL3_REG_800));
2544 else
2545 printk ("scsi%d : sxfer=0x%x, cannot read SBCL\n",
2546 host->host_no, NCR53c7x0_read8(SXFER_REG));
2547 if (c) {
2548 print_insn (host, (u32 *)
2549 hostdata->sync[c->device->id].script, "", 1);
2550 print_insn (host, (u32 *)
2551 hostdata->sync[c->device->id].script + 2, "", 1);
2552 }
2553 }
2554 return SPECIFIC_INT_RESTART;
2555#endif
2556#ifdef A_int_debug_reselect_check
2557 case A_int_debug_reselect_check:
2558 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
2559 u32 *dsa;
2560#if 0
2561 u32 *code;
2562#endif
2563 /*
2564 * Note - this dsa is not based on location relative to
2565 * the command structure, but to location relative to the
2566 * DSA register
2567 */
2568 dsa = bus_to_virt (NCR53c7x0_read32(DSA_REG));
2569 printk("scsi%d : reselected_check_next (DSA = 0x%lx (virt 0x%p))\n",
2570 host->host_no, virt_to_bus(dsa), dsa);
2571 if (dsa) {
2572 printk("scsi%d : resume address is 0x%x (virt 0x%p)\n",
2573 host->host_no, cmd->saved_data_pointer,
2574 bus_to_virt (cmd->saved_data_pointer));
2575#if 0
2576 printk("scsi%d : template code :\n", host->host_no);
2577 for (code = dsa + (Ent_dsa_code_check_reselect - Ent_dsa_zero)
2578 / sizeof(u32); code < (dsa + Ent_dsa_zero / sizeof(u32));
2579 code += print_insn (host, code, "", 1));
2580#endif
2581 }
2582 print_insn (host, hostdata->script + Ent_reselected_ok /
2583 sizeof(u32), "", 1);
2584 }
2585 return SPECIFIC_INT_RESTART;
2586#endif
2587#ifdef A_int_debug_dsa_schedule
2588 case A_int_debug_dsa_schedule:
2589 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
2590 u32 *dsa;
2591 /*
2592 * Note - this dsa is not based on location relative to
2593 * the command structure, but to location relative to the
2594 * DSA register
2595 */
2596 dsa = (u32 *) bus_to_virt (NCR53c7x0_read32(DSA_REG));
2597 printk("scsi%d : dsa_schedule (old DSA = 0x%lx (virt 0x%p))\n",
2598 host->host_no, virt_to_bus(dsa), dsa);
2599 if (dsa)
2600 printk("scsi%d : resume address is 0x%x (virt 0x%p)\n"
2601 " (temp was 0x%x (virt 0x%p))\n",
2602 host->host_no, cmd->saved_data_pointer,
2603 bus_to_virt (cmd->saved_data_pointer),
2604 NCR53c7x0_read32 (TEMP_REG),
2605 bus_to_virt (NCR53c7x0_read32(TEMP_REG)));
2606 }
2607 return SPECIFIC_INT_RESTART;
2608#endif
2609#ifdef A_int_debug_scheduled
2610 case A_int_debug_scheduled:
2611 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
2612 printk("scsi%d : new I/O 0x%x (virt 0x%p) scheduled\n",
2613 host->host_no, NCR53c7x0_read32(DSA_REG),
2614 bus_to_virt(NCR53c7x0_read32(DSA_REG)));
2615 }
2616 return SPECIFIC_INT_RESTART;
2617#endif
2618#ifdef A_int_debug_idle
2619 case A_int_debug_idle:
2620 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
2621 printk("scsi%d : idle\n", host->host_no);
2622 }
2623 return SPECIFIC_INT_RESTART;
2624#endif
2625#ifdef A_int_debug_cmd
2626 case A_int_debug_cmd:
2627 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
2628 printk("scsi%d : command sent\n");
2629 }
2630 return SPECIFIC_INT_RESTART;
2631#endif
2632#ifdef A_int_debug_dsa_loaded
2633 case A_int_debug_dsa_loaded:
2634 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
2635 printk("scsi%d : DSA loaded with 0x%x (virt 0x%p)\n", host->host_no,
2636 NCR53c7x0_read32(DSA_REG),
2637 bus_to_virt(NCR53c7x0_read32(DSA_REG)));
2638 }
2639 return SPECIFIC_INT_RESTART;
2640#endif
2641#ifdef A_int_debug_reselected
2642 case A_int_debug_reselected:
2643 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
2644 OPTION_DEBUG_DISCONNECT)) {
2645 if ((hostdata->chip / 100) == 8)
2646 printk("scsi%d : reselected by target %d lun %d\n",
2647 host->host_no, (int) NCR53c7x0_read8(SDID_REG_800) & ~0x80,
2648 (int) hostdata->reselected_identify & 7);
2649 else
2650 printk("scsi%d : reselected by LCRC=0x%02x lun %d\n",
2651 host->host_no, (int) NCR53c7x0_read8(LCRC_REG_10),
2652 (int) hostdata->reselected_identify & 7);
2653 print_queues(host);
2654 }
2655 return SPECIFIC_INT_RESTART;
2656#endif
2657#ifdef A_int_debug_disconnect_msg
2658 case A_int_debug_disconnect_msg:
2659 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR)) {
2660 if (c)
2661 printk("scsi%d : target %d lun %d disconnecting\n",
2662 host->host_no, c->device->id, c->device->lun);
2663 else
2664 printk("scsi%d : unknown target disconnecting\n",
2665 host->host_no);
2666 }
2667 return SPECIFIC_INT_RESTART;
2668#endif
2669#ifdef A_int_debug_disconnected
2670 case A_int_debug_disconnected:
2671 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
2672 OPTION_DEBUG_DISCONNECT)) {
2673 printk ("scsi%d : disconnected, new queues are\n",
2674 host->host_no);
2675 print_queues(host);
2676#if 0
2677 /* Not valid on ncr53c710! */
2678 printk ("scsi%d : sxfer=0x%x, scntl3=0x%x\n",
2679 host->host_no, NCR53c7x0_read8(SXFER_REG),
2680 NCR53c7x0_read8(SCNTL3_REG_800));
2681#endif
2682 if (c) {
2683 print_insn (host, (u32 *)
2684 hostdata->sync[c->device->id].script, "", 1);
2685 print_insn (host, (u32 *)
2686 hostdata->sync[c->device->id].script + 2, "", 1);
2687 }
2688 }
2689 return SPECIFIC_INT_RESTART;
2690#endif
2691#ifdef A_int_debug_panic
2692 case A_int_debug_panic:
2693 printk("scsi%d : int_debug_panic received\n", host->host_no);
2694 print_lots (host);
2695 return SPECIFIC_INT_PANIC;
2696#endif
2697#ifdef A_int_debug_saved
2698 case A_int_debug_saved:
2699 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
2700 OPTION_DEBUG_DISCONNECT)) {
2701 printk ("scsi%d : saved data pointer 0x%x (virt 0x%p)\n",
2702 host->host_no, cmd->saved_data_pointer,
2703 bus_to_virt (cmd->saved_data_pointer));
2704 print_progress (c);
2705 }
2706 return SPECIFIC_INT_RESTART;
2707#endif
2708#ifdef A_int_debug_restored
2709 case A_int_debug_restored:
2710 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
2711 OPTION_DEBUG_DISCONNECT)) {
2712 if (cmd) {
2713 int size;
2714 printk ("scsi%d : restored data pointer 0x%x (virt 0x%p)\n",
2715 host->host_no, cmd->saved_data_pointer, bus_to_virt (
2716 cmd->saved_data_pointer));
2717 size = print_insn (host, (u32 *)
2718 bus_to_virt(cmd->saved_data_pointer), "", 1);
2719 size = print_insn (host, (u32 *)
2720 bus_to_virt(cmd->saved_data_pointer) + size, "", 1);
2721 print_progress (c);
2722 }
2723#if 0
2724 printk ("scsi%d : datapath residual %d\n",
2725 host->host_no, datapath_residual (host)) ;
2726#endif
2727 }
2728 return SPECIFIC_INT_RESTART;
2729#endif
2730#ifdef A_int_debug_sync
2731 case A_int_debug_sync:
2732 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
2733 OPTION_DEBUG_DISCONNECT|OPTION_DEBUG_SDTR)) {
2734 unsigned char sxfer = NCR53c7x0_read8 (SXFER_REG), scntl3;
2735 if ((hostdata->chip / 100) == 8) {
2736 scntl3 = NCR53c7x0_read8 (SCNTL3_REG_800);
2737 if (c) {
2738 if (sxfer != hostdata->sync[c->device->id].sxfer_sanity ||
2739 scntl3 != hostdata->sync[c->device->id].scntl3_sanity) {
2740 printk ("scsi%d : sync sanity check failed sxfer=0x%x, scntl3=0x%x",
2741 host->host_no, sxfer, scntl3);
2742 NCR53c7x0_write8 (SXFER_REG, sxfer);
2743 NCR53c7x0_write8 (SCNTL3_REG_800, scntl3);
2744 }
2745 } else
2746 printk ("scsi%d : unknown command sxfer=0x%x, scntl3=0x%x\n",
2747 host->host_no, (int) sxfer, (int) scntl3);
2748 } else {
2749 if (c) {
2750 if (sxfer != hostdata->sync[c->device->id].sxfer_sanity) {
2751 printk ("scsi%d : sync sanity check failed sxfer=0x%x",
2752 host->host_no, sxfer);
2753 NCR53c7x0_write8 (SXFER_REG, sxfer);
2754 NCR53c7x0_write8 (SBCL_REG,
2755 hostdata->sync[c->device->id].sscf_710);
2756 }
2757 } else
2758 printk ("scsi%d : unknown command sxfer=0x%x\n",
2759 host->host_no, (int) sxfer);
2760 }
2761 }
2762 return SPECIFIC_INT_RESTART;
2763#endif
2764#ifdef A_int_debug_datain
2765 case A_int_debug_datain:
2766 if (hostdata->options & (OPTION_DEBUG_SCRIPT|OPTION_DEBUG_INTR|
2767 OPTION_DEBUG_DISCONNECT|OPTION_DEBUG_SDTR)) {
2768 int size;
2769 if ((hostdata->chip / 100) == 8)
2770 printk ("scsi%d : In do_datain (%s) sxfer=0x%x, scntl3=0x%x\n"
2771 " datapath residual=%d\n",
2772 host->host_no, sbcl_to_phase (NCR53c7x0_read8 (SBCL_REG)),
2773 (int) NCR53c7x0_read8(SXFER_REG),
2774 (int) NCR53c7x0_read8(SCNTL3_REG_800),
2775 datapath_residual (host)) ;
2776 else
2777 printk ("scsi%d : In do_datain (%s) sxfer=0x%x\n"
2778 " datapath residual=%d\n",
2779 host->host_no, sbcl_to_phase (NCR53c7x0_read8 (SBCL_REG)),
2780 (int) NCR53c7x0_read8(SXFER_REG),
2781 datapath_residual (host)) ;
2782 print_insn (host, dsp, "", 1);
2783 size = print_insn (host, (u32 *) bus_to_virt(dsp[1]), "", 1);
2784 print_insn (host, (u32 *) bus_to_virt(dsp[1]) + size, "", 1);
2785 }
2786 return SPECIFIC_INT_RESTART;
2787#endif
2788#ifdef A_int_debug_check_dsa
2789 case A_int_debug_check_dsa:
2790 if (NCR53c7x0_read8 (SCNTL1_REG) & SCNTL1_CON) {
2791 int sdid;
2792 int tmp;
2793 char *where;
2794 if (hostdata->chip / 100 == 8)
2795 sdid = NCR53c7x0_read8 (SDID_REG_800) & 15;
2796 else {
2797 tmp = NCR53c7x0_read8 (SDID_REG_700);
2798 if (!tmp)
2799 panic ("SDID_REG_700 = 0");
2800 tmp >>= 1;
2801 sdid = 0;
2802 while (tmp) {
2803 tmp >>= 1;
2804 sdid++;
2805 }
2806 }
2807 where = dsp - NCR53c7x0_insn_size(NCR53c7x0_read8
2808 (DCMD_REG)) == hostdata->script +
2809 Ent_select_check_dsa / sizeof(u32) ?
2810 "selection" : "reselection";
2811 if (c && sdid != c->device->id) {
2812 printk ("scsi%d : SDID target %d != DSA target %d at %s\n",
2813 host->host_no, sdid, c->device->id, where);
2814 print_lots(host);
2815 dump_events (host, 20);
2816 return SPECIFIC_INT_PANIC;
2817 }
2818 }
2819 return SPECIFIC_INT_RESTART;
2820#endif
2821 default:
2822 if ((dsps & 0xff000000) == 0x03000000) {
2823 printk ("scsi%d : misc debug interrupt 0x%x\n",
2824 host->host_no, dsps);
2825 return SPECIFIC_INT_RESTART;
2826 } else if ((dsps & 0xff000000) == 0x05000000) {
2827 if (hostdata->events) {
2828 struct NCR53c7x0_event *event;
2829 ++hostdata->event_index;
2830 if (hostdata->event_index >= hostdata->event_size)
2831 hostdata->event_index = 0;
2832 event = (struct NCR53c7x0_event *) hostdata->events +
2833 hostdata->event_index;
2834 event->event = (enum ncr_event) dsps;
2835 event->dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
2836 if (NCR53c7x0_read8 (SCNTL1_REG) & SCNTL1_CON) {
2837 if (hostdata->chip / 100 == 8)
2838 event->target = NCR53c7x0_read8(SSID_REG_800);
2839 else {
2840 unsigned char tmp, sdid;
2841 tmp = NCR53c7x0_read8 (SDID_REG_700);
2842 if (!tmp)
2843 panic ("SDID_REG_700 = 0");
2844 tmp >>= 1;
2845 sdid = 0;
2846 while (tmp) {
2847 tmp >>= 1;
2848 sdid++;
2849 }
2850 event->target = sdid;
2851 }
2852 }
2853 else
2854 event->target = 255;
2855
2856 if (event->event == EVENT_RESELECT)
2857 event->lun = hostdata->reselected_identify & 0xf;
2858 else if (c)
2859 event->lun = c->device->lun;
2860 else
2861 event->lun = 255;
2862 do_gettimeofday(&(event->time));
2863 if (c) {
2864 event->pid = c->pid;
2865 memcpy ((void *) event->cmnd, (void *) c->cmnd,
2866 sizeof (event->cmnd));
2867 } else {
2868 event->pid = -1;
2869 }
2870 }
2871 return SPECIFIC_INT_RESTART;
2872 }
2873
2874 printk ("scsi%d : unknown user interrupt 0x%x\n",
2875 host->host_no, (unsigned) dsps);
2876 return SPECIFIC_INT_PANIC;
2877 }
2878}
2879
2880/*
2881 * XXX - the stock NCR assembler won't output the scriptu.h file,
2882 * which undefine's all #define'd CPP symbols from the script.h
2883 * file, which will create problems if you use multiple scripts
2884 * with the same symbol names.
2885 *
2886 * If you insist on using NCR's assembler, you could generate
2887 * scriptu.h from script.h using something like
2888 *
2889 * grep #define script.h | \
2890 * sed 's/#define[ ][ ]*\([_a-zA-Z][_a-zA-Z0-9]*\).*$/#undefine \1/' \
2891 * > scriptu.h
2892 */
2893
2894#include "53c7xx_u.h"
2895
2896/* XXX - add alternate script handling code here */
2897
2898
2899/*
2900 * Function : static void NCR537xx_soft_reset (struct Scsi_Host *host)
2901 *
2902 * Purpose : perform a soft reset of the NCR53c7xx chip
2903 *
2904 * Inputs : host - pointer to this host adapter's structure
2905 *
2906 * Preconditions : NCR53c7x0_init must have been called for this
2907 * host.
2908 *
2909 */
2910
2911static void
2912NCR53c7x0_soft_reset (struct Scsi_Host *host) {
2913 NCR53c7x0_local_declare();
2914 unsigned long flags;
2915 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
2916 host->hostdata[0];
2917 NCR53c7x0_local_setup(host);
2918
2919 local_irq_save(flags);
2920
2921 /* Disable scsi chip and s/w level 7 ints */
2922
2923#ifdef CONFIG_MVME16x
2924 if (MACH_IS_MVME16x)
2925 {
2926 volatile unsigned long v;
2927
2928 v = *(volatile unsigned long *)0xfff4006c;
2929 v &= ~0x8000;
2930 *(volatile unsigned long *)0xfff4006c = v;
2931 v = *(volatile unsigned long *)0xfff4202c;
2932 v &= ~0x10;
2933 *(volatile unsigned long *)0xfff4202c = v;
2934 }
2935#endif
2936 /* Anything specific for your hardware? */
2937
2938 /*
2939 * Do a soft reset of the chip so that everything is
2940 * reinitialized to the power-on state.
2941 *
2942 * Basically follow the procedure outlined in the NCR53c700
2943 * data manual under Chapter Six, How to Use, Steps Necessary to
2944 * Start SCRIPTS, with the exception of actually starting the
2945 * script and setting up the synchronous transfer gunk.
2946 */
2947
2948 /* Should we reset the scsi bus here??????????????????? */
2949
2950 NCR53c7x0_write8(ISTAT_REG_700, ISTAT_10_SRST);
2951 NCR53c7x0_write8(ISTAT_REG_700, 0);
2952
2953 /*
2954 * saved_dcntl is set up in NCR53c7x0_init() before it is overwritten
2955 * here. We should have some better way of working out the CF bit
2956 * setting..
2957 */
2958
2959 hostdata->saved_dcntl = DCNTL_10_EA|DCNTL_10_COM;
2960 if (hostdata->scsi_clock > 50000000)
2961 hostdata->saved_dcntl |= DCNTL_700_CF_3;
2962 else
2963 if (hostdata->scsi_clock > 37500000)
2964 hostdata->saved_dcntl |= DCNTL_700_CF_2;
2965#if 0
2966 else
2967 /* Any clocks less than 37.5MHz? */
2968#endif
2969
2970 if (hostdata->options & OPTION_DEBUG_TRACE)
2971 NCR53c7x0_write8(DCNTL_REG, hostdata->saved_dcntl | DCNTL_SSM);
2972 else
2973 NCR53c7x0_write8(DCNTL_REG, hostdata->saved_dcntl);
2974 /* Following disables snooping - snooping is not required, as non-
2975 * cached pages are used for shared data, and appropriate use is
2976 * made of cache_push/cache_clear. Indeed, for 68060
2977 * enabling snooping causes disk corruption of ext2fs free block
2978 * bitmaps and the like. If you have a 68060 with snooping hardwared
2979 * on, then you need to enable CONFIG_060_WRITETHROUGH.
2980 */
2981 NCR53c7x0_write8(CTEST7_REG, CTEST7_10_TT1|CTEST7_STD);
2982 /* Actually burst of eight, according to my 53c710 databook */
2983 NCR53c7x0_write8(hostdata->dmode, DMODE_10_BL_8 | DMODE_10_FC2);
2984 NCR53c7x0_write8(SCID_REG, 1 << host->this_id);
2985 NCR53c7x0_write8(SBCL_REG, 0);
2986 NCR53c7x0_write8(SCNTL1_REG, SCNTL1_ESR_700);
2987 NCR53c7x0_write8(SCNTL0_REG, ((hostdata->options & OPTION_PARITY) ?
2988 SCNTL0_EPC : 0) | SCNTL0_EPG_700 | SCNTL0_ARB1 | SCNTL0_ARB2);
2989
2990 /*
2991 * Enable all interrupts, except parity which we only want when
2992 * the user requests it.
2993 */
2994
2995 NCR53c7x0_write8(DIEN_REG, DIEN_700_BF |
2996 DIEN_ABRT | DIEN_SSI | DIEN_SIR | DIEN_700_OPC);
2997
2998 NCR53c7x0_write8(SIEN_REG_700, ((hostdata->options & OPTION_PARITY) ?
2999 SIEN_PAR : 0) | SIEN_700_STO | SIEN_RST | SIEN_UDC |
3000 SIEN_SGE | SIEN_MA);
3001
3002#ifdef CONFIG_MVME16x
3003 if (MACH_IS_MVME16x)
3004 {
3005 volatile unsigned long v;
3006
3007 /* Enable scsi chip and s/w level 7 ints */
3008 v = *(volatile unsigned long *)0xfff40080;
3009 v = (v & ~(0xf << 28)) | (4 << 28);
3010 *(volatile unsigned long *)0xfff40080 = v;
3011 v = *(volatile unsigned long *)0xfff4006c;
3012 v |= 0x8000;
3013 *(volatile unsigned long *)0xfff4006c = v;
3014 v = *(volatile unsigned long *)0xfff4202c;
3015 v = (v & ~0xff) | 0x10 | 4;
3016 *(volatile unsigned long *)0xfff4202c = v;
3017 }
3018#endif
3019 /* Anything needed for your hardware? */
3020 local_irq_restore(flags);
3021}
3022
3023
3024/*
3025 * Function static struct NCR53c7x0_cmd *allocate_cmd (Scsi_Cmnd *cmd)
3026 *
3027 * Purpose : Return the first free NCR53c7x0_cmd structure (which are
3028 * reused in a LIFO manner to minimize cache thrashing).
3029 *
3030 * Side effects : If we haven't yet scheduled allocation of NCR53c7x0_cmd
3031 * structures for this device, do so. Attempt to complete all scheduled
3032 * allocations using get_zeroed_page(), putting NCR53c7x0_cmd structures on
3033 * the free list. Teach programmers not to drink and hack.
3034 *
3035 * Inputs : cmd - SCSI command
3036 *
3037 * Returns : NCR53c7x0_cmd structure allocated on behalf of cmd;
3038 * NULL on failure.
3039 */
3040
3041static void
3042my_free_page (void *addr, int dummy)
3043{
3044 /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, which
3045 * XXX may be invalid (CONFIG_060_WRITETHROUGH)
3046 */
3047 kernel_set_cachemode((void *)addr, 4096, IOMAP_FULL_CACHING);
3048 free_page ((u32)addr);
3049}
3050
3051static struct NCR53c7x0_cmd *
3052allocate_cmd (Scsi_Cmnd *cmd) {
3053 struct Scsi_Host *host = cmd->device->host;
3054 struct NCR53c7x0_hostdata *hostdata =
3055 (struct NCR53c7x0_hostdata *) host->hostdata[0];
3056 u32 real; /* Real address */
3057 int size; /* Size of *tmp */
3058 struct NCR53c7x0_cmd *tmp;
3059 unsigned long flags;
3060
3061 if (hostdata->options & OPTION_DEBUG_ALLOCATION)
3062 printk ("scsi%d : num_cmds = %d, can_queue = %d\n"
3063 " target = %d, lun = %d, %s\n",
3064 host->host_no, hostdata->num_cmds, host->can_queue,
3065 cmd->device->id, cmd->device->lun, (hostdata->cmd_allocated[cmd->device->id] &
3066 (1 << cmd->device->lun)) ? "already allocated" : "not allocated");
3067
3068/*
3069 * If we have not yet reserved commands for this I_T_L nexus, and
3070 * the device exists (as indicated by permanent Scsi_Cmnd structures
3071 * being allocated under 1.3.x, or being outside of scan_scsis in
3072 * 1.2.x), do so now.
3073 */
3074 if (!(hostdata->cmd_allocated[cmd->device->id] & (1 << cmd->device->lun)) &&
3075 cmd->device && cmd->device->has_cmdblocks) {
3076 if ((hostdata->extra_allocate + hostdata->num_cmds) < host->can_queue)
3077 hostdata->extra_allocate += host->cmd_per_lun;
3078 hostdata->cmd_allocated[cmd->device->id] |= (1 << cmd->device->lun);
3079 }
3080
3081 for (; hostdata->extra_allocate > 0 ; --hostdata->extra_allocate,
3082 ++hostdata->num_cmds) {
3083 /* historically, kmalloc has returned unaligned addresses; pad so we
3084 have enough room to ROUNDUP */
3085 size = hostdata->max_cmd_size + sizeof (void *);
3086#ifdef FORCE_DSA_ALIGNMENT
3087 /*
3088 * 53c710 rev.0 doesn't have an add-with-carry instruction.
3089 * Ensure we allocate enough memory to force alignment.
3090 */
3091 size += 256;
3092#endif
3093/* FIXME: for ISA bus '7xx chips, we need to or GFP_DMA in here */
3094
3095 if (size > 4096) {
3096 printk (KERN_ERR "53c7xx: allocate_cmd size > 4K\n");
3097 return NULL;
3098 }
3099 real = get_zeroed_page(GFP_ATOMIC);
3100 if (real == 0)
3101 return NULL;
3102 cache_push(virt_to_phys((void *)real), 4096);
3103 cache_clear(virt_to_phys((void *)real), 4096);
3104 kernel_set_cachemode((void *)real, 4096, IOMAP_NOCACHE_SER);
3105 tmp = ROUNDUP(real, void *);
3106#ifdef FORCE_DSA_ALIGNMENT
3107 {
3108 if (((u32)tmp & 0xff) > CmdPageStart)
3109 tmp = (struct NCR53c7x0_cmd *)((u32)tmp + 255);
3110 tmp = (struct NCR53c7x0_cmd *)(((u32)tmp & ~0xff) + CmdPageStart);
3111#if 0
3112 printk ("scsi: size = %d, real = 0x%08x, tmp set to 0x%08x\n",
3113 size, real, (u32)tmp);
3114#endif
3115 }
3116#endif
3117 tmp->real = (void *)real;
3118 tmp->size = size;
3119 tmp->free = ((void (*)(void *, int)) my_free_page);
3120 local_irq_save(flags);
3121 tmp->next = hostdata->free;
3122 hostdata->free = tmp;
3123 local_irq_restore(flags);
3124 }
3125 local_irq_save(flags);
3126 tmp = (struct NCR53c7x0_cmd *) hostdata->free;
3127 if (tmp) {
3128 hostdata->free = tmp->next;
3129 }
3130 local_irq_restore(flags);
3131 if (!tmp)
3132 printk ("scsi%d : can't allocate command for target %d lun %d\n",
3133 host->host_no, cmd->device->id, cmd->device->lun);
3134 return tmp;
3135}
3136
3137/*
3138 * Function static struct NCR53c7x0_cmd *create_cmd (Scsi_Cmnd *cmd)
3139 *
3140 *
3141 * Purpose : allocate a NCR53c7x0_cmd structure, initialize it based on the
3142 * Scsi_Cmnd structure passed in cmd, including dsa and Linux field
3143 * initialization, and dsa code relocation.
3144 *
3145 * Inputs : cmd - SCSI command
3146 *
3147 * Returns : NCR53c7x0_cmd structure corresponding to cmd,
3148 * NULL on failure.
3149 */
3150static struct NCR53c7x0_cmd *
3151create_cmd (Scsi_Cmnd *cmd) {
3152 NCR53c7x0_local_declare();
3153 struct Scsi_Host *host = cmd->device->host;
3154 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
3155 host->hostdata[0];
3156 struct NCR53c7x0_cmd *tmp; /* NCR53c7x0_cmd structure for this command */
3157 int datain, /* Number of instructions per phase */
3158 dataout;
3159 int data_transfer_instructions, /* Count of dynamic instructions */
3160 i; /* Counter */
3161 u32 *cmd_datain, /* Address of datain/dataout code */
3162 *cmd_dataout; /* Incremented as we assemble */
3163#ifdef notyet
3164 unsigned char *msgptr; /* Current byte in select message */
3165 int msglen; /* Length of whole select message */
3166#endif
3167 unsigned long flags;
3168 u32 exp_select_indirect; /* Used in sanity check */
3169 NCR53c7x0_local_setup(cmd->device->host);
3170
3171 if (!(tmp = allocate_cmd (cmd)))
3172 return NULL;
3173
3174 /*
3175 * Copy CDB and initialised result fields from Scsi_Cmnd to NCR53c7x0_cmd.
3176 * We do this because NCR53c7x0_cmd may have a special cache mode
3177 * selected to cope with lack of bus snooping, etc.
3178 */
3179
3180 memcpy(tmp->cmnd, cmd->cmnd, 12);
3181 tmp->result = cmd->result;
3182
3183 /*
3184 * Decide whether we need to generate commands for DATA IN,
3185 * DATA OUT, neither, or both based on the SCSI command
3186 */
3187
3188 switch (cmd->cmnd[0]) {
3189 /* These commands do DATA IN */
3190 case INQUIRY:
3191 case MODE_SENSE:
3192 case READ_6:
3193 case READ_10:
3194 case READ_CAPACITY:
3195 case REQUEST_SENSE:
3196 case READ_BLOCK_LIMITS:
3197 case READ_TOC:
3198 datain = 2 * (cmd->use_sg ? cmd->use_sg : 1) + 3;
3199 dataout = 0;
3200 break;
3201 /* These commands do DATA OUT */
3202 case MODE_SELECT:
3203 case WRITE_6:
3204 case WRITE_10:
3205#if 0
3206 printk("scsi%d : command is ", host->host_no);
3207 __scsi_print_command(cmd->cmnd);
3208#endif
3209#if 0
3210 printk ("scsi%d : %d scatter/gather segments\n", host->host_no,
3211 cmd->use_sg);
3212#endif
3213 datain = 0;
3214 dataout = 2 * (cmd->use_sg ? cmd->use_sg : 1) + 3;
3215#if 0
3216 hostdata->options |= OPTION_DEBUG_INTR;
3217#endif
3218 break;
3219 /*
3220 * These commands do no data transfer, we should force an
3221 * interrupt if a data phase is attempted on them.
3222 */
3223 case TEST_UNIT_READY:
3224 case ALLOW_MEDIUM_REMOVAL:
3225 case START_STOP:
3226 datain = dataout = 0;
3227 break;
3228 /*
3229 * We don't know about these commands, so generate code to handle
3230 * both DATA IN and DATA OUT phases. More efficient to identify them
3231 * and add them to the above cases.
3232 */
3233 default:
3234 printk("scsi%d : datain+dataout for command ", host->host_no);
3235 __scsi_print_command(cmd->cmnd);
3236 datain = dataout = 2 * (cmd->use_sg ? cmd->use_sg : 1) + 3;
3237 }
3238
3239 /*
3240 * New code : so that active pointers work correctly regardless
3241 * of where the saved data pointer is at, we want to immediately
3242 * enter the dynamic code after selection, and on a non-data
3243 * phase perform a CALL to the non-data phase handler, with
3244 * returns back to this address.
3245 *
3246 * If a phase mismatch is encountered in the middle of a
3247 * Block MOVE instruction, we want to _leave_ that instruction
3248 * unchanged as the current case is, modify a temporary buffer,
3249 * and point the active pointer (TEMP) at that.
3250 *
3251 * Furthermore, we want to implement a saved data pointer,
3252 * set by the SAVE_DATA_POINTERs message.
3253 *
3254 * So, the data transfer segments will change to
3255 * CALL data_transfer, WHEN NOT data phase
3256 * MOVE x, x, WHEN data phase
3257 * ( repeat )
3258 * JUMP other_transfer
3259 */
3260
3261 data_transfer_instructions = datain + dataout;
3262
3263 /*
3264 * When we perform a request sense, we overwrite various things,
3265 * including the data transfer code. Make sure we have enough
3266 * space to do that.
3267 */
3268
3269 if (data_transfer_instructions < 2)
3270 data_transfer_instructions = 2;
3271
3272
3273 /*
3274 * The saved data pointer is set up so that a RESTORE POINTERS message
3275 * will start the data transfer over at the beginning.
3276 */
3277
3278 tmp->saved_data_pointer = virt_to_bus (hostdata->script) +
3279 hostdata->E_data_transfer;
3280
3281 /*
3282 * Initialize Linux specific fields.
3283 */
3284
3285 tmp->cmd = cmd;
3286 tmp->next = NULL;
3287 tmp->flags = 0;
3288 tmp->dsa_next_addr = virt_to_bus(tmp->dsa) + hostdata->dsa_next -
3289 hostdata->dsa_start;
3290 tmp->dsa_addr = virt_to_bus(tmp->dsa) - hostdata->dsa_start;
3291
3292 /*
3293 * Calculate addresses of dynamic code to fill in DSA
3294 */
3295
3296 tmp->data_transfer_start = tmp->dsa + (hostdata->dsa_end -
3297 hostdata->dsa_start) / sizeof(u32);
3298 tmp->data_transfer_end = tmp->data_transfer_start +
3299 2 * data_transfer_instructions;
3300
3301 cmd_datain = datain ? tmp->data_transfer_start : NULL;
3302 cmd_dataout = dataout ? (datain ? cmd_datain + 2 * datain : tmp->
3303 data_transfer_start) : NULL;
3304
3305 /*
3306 * Fill in the NCR53c7x0_cmd structure as follows
3307 * dsa, with fixed up DSA code
3308 * datain code
3309 * dataout code
3310 */
3311
3312 /* Copy template code into dsa and perform all necessary fixups */
3313 if (hostdata->dsa_fixup)
3314 hostdata->dsa_fixup(tmp);
3315
3316 patch_dsa_32(tmp->dsa, dsa_next, 0, 0);
3317 /*
3318 * XXX is this giving 53c710 access to the Scsi_Cmnd in some way?
3319 * Do we need to change it for caching reasons?
3320 */
3321 patch_dsa_32(tmp->dsa, dsa_cmnd, 0, virt_to_bus(cmd));
3322
3323 if (hostdata->options & OPTION_DEBUG_SYNCHRONOUS) {
3324
3325 exp_select_indirect = ((1 << cmd->device->id) << 16) |
3326 (hostdata->sync[cmd->device->id].sxfer_sanity << 8);
3327
3328 if (hostdata->sync[cmd->device->id].select_indirect !=
3329 exp_select_indirect) {
3330 printk ("scsi%d : sanity check failed select_indirect=0x%x\n",
3331 host->host_no, hostdata->sync[cmd->device->id].select_indirect);
3332 FATAL(host);
3333
3334 }
3335 }
3336
3337 patch_dsa_32(tmp->dsa, dsa_select, 0,
3338 hostdata->sync[cmd->device->id].select_indirect);
3339
3340 /*
3341 * Right now, we'll do the WIDE and SYNCHRONOUS negotiations on
3342 * different commands; although it should be trivial to do them
3343 * both at the same time.
3344 */
3345 if (hostdata->initiate_wdtr & (1 << cmd->device->id)) {
3346 memcpy ((void *) (tmp->select + 1), (void *) wdtr_message,
3347 sizeof(wdtr_message));
3348 patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1 + sizeof(wdtr_message));
3349 local_irq_save(flags);
3350 hostdata->initiate_wdtr &= ~(1 << cmd->device->id);
3351 local_irq_restore(flags);
3352 } else if (hostdata->initiate_sdtr & (1 << cmd->device->id)) {
3353 memcpy ((void *) (tmp->select + 1), (void *) sdtr_message,
3354 sizeof(sdtr_message));
3355 patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1 + sizeof(sdtr_message));
3356 tmp->flags |= CMD_FLAG_SDTR;
3357 local_irq_save(flags);
3358 hostdata->initiate_sdtr &= ~(1 << cmd->device->id);
3359 local_irq_restore(flags);
3360
3361 }
3362#if 1
3363 else if (!(hostdata->talked_to & (1 << cmd->device->id)) &&
3364 !(hostdata->options & OPTION_NO_ASYNC)) {
3365
3366 memcpy ((void *) (tmp->select + 1), (void *) async_message,
3367 sizeof(async_message));
3368 patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1 + sizeof(async_message));
3369 tmp->flags |= CMD_FLAG_SDTR;
3370 }
3371#endif
3372 else
3373 patch_dsa_32(tmp->dsa, dsa_msgout, 0, 1);
3374
3375 hostdata->talked_to |= (1 << cmd->device->id);
3376 tmp->select[0] = (hostdata->options & OPTION_DISCONNECT) ?
3377 IDENTIFY (1, cmd->device->lun) : IDENTIFY (0, cmd->device->lun);
3378 patch_dsa_32(tmp->dsa, dsa_msgout, 1, virt_to_bus(tmp->select));
3379 patch_dsa_32(tmp->dsa, dsa_cmdout, 0, cmd->cmd_len);
3380 patch_dsa_32(tmp->dsa, dsa_cmdout, 1, virt_to_bus(tmp->cmnd));
3381 patch_dsa_32(tmp->dsa, dsa_dataout, 0, cmd_dataout ?
3382 virt_to_bus (cmd_dataout)
3383 : virt_to_bus (hostdata->script) + hostdata->E_other_transfer);
3384 patch_dsa_32(tmp->dsa, dsa_datain, 0, cmd_datain ?
3385 virt_to_bus (cmd_datain)
3386 : virt_to_bus (hostdata->script) + hostdata->E_other_transfer);
3387 /*
3388 * XXX - need to make endian aware, should use separate variables
3389 * for both status and message bytes.
3390 */
3391 patch_dsa_32(tmp->dsa, dsa_msgin, 0, 1);
3392/*
3393 * FIXME : these only works for little endian. We probably want to
3394 * provide message and status fields in the NCR53c7x0_cmd
3395 * structure, and assign them to cmd->result when we're done.
3396 */
3397#ifdef BIG_ENDIAN
3398 patch_dsa_32(tmp->dsa, dsa_msgin, 1, virt_to_bus(&tmp->result) + 2);
3399 patch_dsa_32(tmp->dsa, dsa_status, 0, 1);
3400 patch_dsa_32(tmp->dsa, dsa_status, 1, virt_to_bus(&tmp->result) + 3);
3401#else
3402 patch_dsa_32(tmp->dsa, dsa_msgin, 1, virt_to_bus(&tmp->result) + 1);
3403 patch_dsa_32(tmp->dsa, dsa_status, 0, 1);
3404 patch_dsa_32(tmp->dsa, dsa_status, 1, virt_to_bus(&tmp->result));
3405#endif
3406 patch_dsa_32(tmp->dsa, dsa_msgout_other, 0, 1);
3407 patch_dsa_32(tmp->dsa, dsa_msgout_other, 1,
3408 virt_to_bus(&(hostdata->NCR53c7xx_msg_nop)));
3409
3410 /*
3411 * Generate code for zero or more of the DATA IN, DATA OUT phases
3412 * in the format
3413 *
3414 * CALL data_transfer, WHEN NOT phase
3415 * MOVE first buffer length, first buffer address, WHEN phase
3416 * ...
3417 * MOVE last buffer length, last buffer address, WHEN phase
3418 * JUMP other_transfer
3419 */
3420
3421/*
3422 * See if we're getting to data transfer by generating an unconditional
3423 * interrupt.
3424 */
3425#if 0
3426 if (datain) {
3427 cmd_datain[0] = 0x98080000;
3428 cmd_datain[1] = 0x03ffd00d;
3429 cmd_datain += 2;
3430 }
3431#endif
3432
3433/*
3434 * XXX - I'm undecided whether all of this nonsense is faster
3435 * in the long run, or whether I should just go and implement a loop
3436 * on the NCR chip using table indirect mode?
3437 *
3438 * In any case, this is how it _must_ be done for 53c700/700-66 chips,
3439 * so this stays even when we come up with something better.
3440 *
3441 * When we're limited to 1 simultaneous command, no overlapping processing,
3442 * we're seeing 630K/sec, with 7% CPU usage on a slow Syquest 45M
3443 * drive.
3444 *
3445 * Not bad, not good. We'll see.
3446 */
3447
3448 tmp->bounce.len = 0; /* Assume aligned buffer */
3449
3450 for (i = 0; cmd->use_sg ? (i < cmd->use_sg) : !i; cmd_datain += 4,
3451 cmd_dataout += 4, ++i) {
3452 u32 vbuf = cmd->use_sg
3453 ? (u32)page_address(((struct scatterlist *)cmd->request_buffer)[i].page)+
3454 ((struct scatterlist *)cmd->request_buffer)[i].offset
3455 : (u32)(cmd->request_buffer);
3456 u32 bbuf = virt_to_bus((void *)vbuf);
3457 u32 count = cmd->use_sg ?
3458 ((struct scatterlist *)cmd->request_buffer)[i].length :
3459 cmd->request_bufflen;
3460
3461 /*
3462 * If we have buffers which are not aligned with 16 byte cache
3463 * lines, then we just hope nothing accesses the other parts of
3464 * those cache lines while the transfer is in progress. That would
3465 * fill the cache, and subsequent reads of the dma data would pick
3466 * up the wrong thing.
3467 * XXX We need a bounce buffer to handle that correctly.
3468 */
3469
3470 if (((bbuf & 15) || (count & 15)) && (datain || dataout))
3471 {
3472 /* Bounce buffer needed */
3473 if (cmd->use_sg)
3474 printk ("53c7xx: Non-aligned buffer with use_sg\n");
3475 else if (datain && dataout)
3476 printk ("53c7xx: Non-aligned buffer with datain && dataout\n");
3477 else if (count > 256)
3478 printk ("53c7xx: Non-aligned transfer > 256 bytes\n");
3479 else
3480 {
3481 if (datain)
3482 {
3483 tmp->bounce.len = count;
3484 tmp->bounce.addr = vbuf;
3485 bbuf = virt_to_bus(tmp->bounce.buf);
3486 tmp->bounce.buf[0] = 0xff;
3487 tmp->bounce.buf[1] = 0xfe;
3488 tmp->bounce.buf[2] = 0xfd;
3489 tmp->bounce.buf[3] = 0xfc;
3490 }
3491 if (dataout)
3492 {
3493 memcpy ((void *)tmp->bounce.buf, (void *)vbuf, count);
3494 bbuf = virt_to_bus(tmp->bounce.buf);
3495 }
3496 }
3497 }
3498
3499 if (datain) {
3500 cache_clear(virt_to_phys((void *)vbuf), count);
3501 /* CALL other_in, WHEN NOT DATA_IN */
3502 cmd_datain[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_CALL |
3503 DCMD_TCI_IO) << 24) |
3504 DBC_TCI_WAIT_FOR_VALID | DBC_TCI_COMPARE_PHASE;
3505 cmd_datain[1] = virt_to_bus (hostdata->script) +
3506 hostdata->E_other_in;
3507 /* MOVE count, buf, WHEN DATA_IN */
3508 cmd_datain[2] = ((DCMD_TYPE_BMI | DCMD_BMI_OP_MOVE_I | DCMD_BMI_IO)
3509 << 24) | count;
3510 cmd_datain[3] = bbuf;
3511#if 0
3512 print_insn (host, cmd_datain, "dynamic ", 1);
3513 print_insn (host, cmd_datain + 2, "dynamic ", 1);
3514#endif
3515 }
3516 if (dataout) {
3517 cache_push(virt_to_phys((void *)vbuf), count);
3518 /* CALL other_out, WHEN NOT DATA_OUT */
3519 cmd_dataout[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_CALL) << 24) |
3520 DBC_TCI_WAIT_FOR_VALID | DBC_TCI_COMPARE_PHASE;
3521 cmd_dataout[1] = virt_to_bus(hostdata->script) +
3522 hostdata->E_other_out;
3523 /* MOVE count, buf, WHEN DATA+OUT */
3524 cmd_dataout[2] = ((DCMD_TYPE_BMI | DCMD_BMI_OP_MOVE_I) << 24)
3525 | count;
3526 cmd_dataout[3] = bbuf;
3527#if 0
3528 print_insn (host, cmd_dataout, "dynamic ", 1);
3529 print_insn (host, cmd_dataout + 2, "dynamic ", 1);
3530#endif
3531 }
3532 }
3533
3534 /*
3535 * Install JUMP instructions after the data transfer routines to return
3536 * control to the do_other_transfer routines.
3537 */
3538
3539
3540 if (datain) {
3541 cmd_datain[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_JUMP) << 24) |
3542 DBC_TCI_TRUE;
3543 cmd_datain[1] = virt_to_bus(hostdata->script) +
3544 hostdata->E_other_transfer;
3545#if 0
3546 print_insn (host, cmd_datain, "dynamic jump ", 1);
3547#endif
3548 cmd_datain += 2;
3549 }
3550#if 0
3551 if (datain) {
3552 cmd_datain[0] = 0x98080000;
3553 cmd_datain[1] = 0x03ffdeed;
3554 cmd_datain += 2;
3555 }
3556#endif
3557 if (dataout) {
3558 cmd_dataout[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_JUMP) << 24) |
3559 DBC_TCI_TRUE;
3560 cmd_dataout[1] = virt_to_bus(hostdata->script) +
3561 hostdata->E_other_transfer;
3562#if 0
3563 print_insn (host, cmd_dataout, "dynamic jump ", 1);
3564#endif
3565 cmd_dataout += 2;
3566 }
3567
3568 return tmp;
3569}
3570
3571/*
3572 * Function : int NCR53c7xx_queue_command (Scsi_Cmnd *cmd,
3573 * void (*done)(Scsi_Cmnd *))
3574 *
3575 * Purpose : enqueues a SCSI command
3576 *
3577 * Inputs : cmd - SCSI command, done - function called on completion, with
3578 * a pointer to the command descriptor.
3579 *
3580 * Returns : 0
3581 *
3582 * Side effects :
3583 * cmd is added to the per instance driver issue_queue, with major
3584 * twiddling done to the host specific fields of cmd. If the
3585 * process_issue_queue coroutine isn't running, it is restarted.
3586 *
3587 * NOTE : we use the host_scribble field of the Scsi_Cmnd structure to
3588 * hold our own data, and pervert the ptr field of the SCp field
3589 * to create a linked list.
3590 */
3591
3592int
3593NCR53c7xx_queue_command (Scsi_Cmnd *cmd, void (* done)(Scsi_Cmnd *)) {
3594 struct Scsi_Host *host = cmd->device->host;
3595 struct NCR53c7x0_hostdata *hostdata =
3596 (struct NCR53c7x0_hostdata *) host->hostdata[0];
3597 unsigned long flags;
3598 Scsi_Cmnd *tmp;
3599
3600 cmd->scsi_done = done;
3601 cmd->host_scribble = NULL;
3602 cmd->SCp.ptr = NULL;
3603 cmd->SCp.buffer = NULL;
3604
3605#ifdef VALID_IDS
3606 /* Ignore commands on invalid IDs */
3607 if (!hostdata->valid_ids[cmd->device->id]) {
3608 printk("scsi%d : ignoring target %d lun %d\n", host->host_no,
3609 cmd->device->id, cmd->device->lun);
3610 cmd->result = (DID_BAD_TARGET << 16);
3611 done(cmd);
3612 return 0;
3613 }
3614#endif
3615
3616 local_irq_save(flags);
3617 if ((hostdata->options & (OPTION_DEBUG_INIT_ONLY|OPTION_DEBUG_PROBE_ONLY))
3618 || ((hostdata->options & OPTION_DEBUG_TARGET_LIMIT) &&
3619 !(hostdata->debug_lun_limit[cmd->device->id] & (1 << cmd->device->lun)))
3620#ifdef LINUX_1_2
3621 || cmd->device->id > 7
3622#else
3623 || cmd->device->id >= host->max_id
3624#endif
3625 || cmd->device->id == host->this_id
3626 || hostdata->state == STATE_DISABLED) {
3627 printk("scsi%d : disabled or bad target %d lun %d\n", host->host_no,
3628 cmd->device->id, cmd->device->lun);
3629 cmd->result = (DID_BAD_TARGET << 16);
3630 done(cmd);
3631 local_irq_restore(flags);
3632 return 0;
3633 }
3634
3635 if ((hostdata->options & OPTION_DEBUG_NCOMMANDS_LIMIT) &&
3636 (hostdata->debug_count_limit == 0)) {
3637 printk("scsi%d : maximum commands exceeded\n", host->host_no);
3638 cmd->result = (DID_BAD_TARGET << 16);
3639 done(cmd);
3640 local_irq_restore(flags);
3641 return 0;
3642 }
3643
3644 if (hostdata->options & OPTION_DEBUG_READ_ONLY) {
3645 switch (cmd->cmnd[0]) {
3646 case WRITE_6:
3647 case WRITE_10:
3648 printk("scsi%d : WRITE attempted with NO_WRITE debugging flag set\n",
3649 host->host_no);
3650 cmd->result = (DID_BAD_TARGET << 16);
3651 done(cmd);
3652 local_irq_restore(flags);
3653 return 0;
3654 }
3655 }
3656
3657 if ((hostdata->options & OPTION_DEBUG_TARGET_LIMIT) &&
3658 hostdata->debug_count_limit != -1)
3659 --hostdata->debug_count_limit;
3660
3661 cmd->result = 0xffff; /* The NCR will overwrite message
3662 and status with valid data */
3663 cmd->host_scribble = (unsigned char *) tmp = create_cmd (cmd);
3664
3665 /*
3666 * REQUEST SENSE commands are inserted at the head of the queue
3667 * so that we do not clear the contingent allegiance condition
3668 * they may be looking at.
3669 */
3670
3671 if (!(hostdata->issue_queue) || (cmd->cmnd[0] == REQUEST_SENSE)) {
3672 cmd->SCp.ptr = (unsigned char *) hostdata->issue_queue;
3673 hostdata->issue_queue = cmd;
3674 } else {
3675 for (tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp->SCp.ptr;
3676 tmp = (Scsi_Cmnd *) tmp->SCp.ptr);
3677 tmp->SCp.ptr = (unsigned char *) cmd;
3678 }
3679 local_irq_restore(flags);
3680 run_process_issue_queue();
3681 return 0;
3682}
3683
3684/*
3685 * Function : void to_schedule_list (struct Scsi_Host *host,
3686 * struct NCR53c7x0_hostdata * hostdata, Scsi_Cmnd *cmd)
3687 *
3688 * Purpose : takes a SCSI command which was just removed from the
3689 * issue queue, and deals with it by inserting it in the first
3690 * free slot in the schedule list or by terminating it immediately.
3691 *
3692 * Inputs :
3693 * host - SCSI host adapter; hostdata - hostdata structure for
3694 * this adapter; cmd - a pointer to the command; should have
3695 * the host_scribble field initialized to point to a valid
3696 *
3697 * Side effects :
3698 * cmd is added to the per instance schedule list, with minor
3699 * twiddling done to the host specific fields of cmd.
3700 *
3701 */
3702
3703static __inline__ void
3704to_schedule_list (struct Scsi_Host *host, struct NCR53c7x0_hostdata *hostdata,
3705 struct NCR53c7x0_cmd *cmd) {
3706 NCR53c7x0_local_declare();
3707 Scsi_Cmnd *tmp = cmd->cmd;
3708 unsigned long flags;
3709 /* dsa start is negative, so subtraction is used */
3710 volatile u32 *ncrcurrent;
3711
3712 int i;
3713 NCR53c7x0_local_setup(host);
3714#if 0
3715 printk("scsi%d : new dsa is 0x%lx (virt 0x%p)\n", host->host_no,
3716 virt_to_bus(hostdata->dsa), hostdata->dsa);
3717#endif
3718
3719 local_irq_save(flags);
3720
3721 /*
3722 * Work around race condition : if an interrupt fired and we
3723 * got disabled forget about this command.
3724 */
3725
3726 if (hostdata->state == STATE_DISABLED) {
3727 printk("scsi%d : driver disabled\n", host->host_no);
3728 tmp->result = (DID_BAD_TARGET << 16);
3729 cmd->next = (struct NCR53c7x0_cmd *) hostdata->free;
3730 hostdata->free = cmd;
3731 tmp->scsi_done(tmp);
3732 local_irq_restore(flags);
3733 return;
3734 }
3735
3736 for (i = host->can_queue, ncrcurrent = hostdata->schedule;
3737 i > 0 && ncrcurrent[0] != hostdata->NOP_insn;
3738 --i, ncrcurrent += 2 /* JUMP instructions are two words */);
3739
3740 if (i > 0) {
3741 ++hostdata->busy[tmp->device->id][tmp->device->lun];
3742 cmd->next = hostdata->running_list;
3743 hostdata->running_list = cmd;
3744
3745 /* Restore this instruction to a NOP once the command starts */
3746 cmd->dsa [(hostdata->dsa_jump_dest - hostdata->dsa_start) /
3747 sizeof(u32)] = (u32) virt_to_bus ((void *)ncrcurrent);
3748 /* Replace the current jump operand. */
3749 ncrcurrent[1] =
3750 virt_to_bus ((void *) cmd->dsa) + hostdata->E_dsa_code_begin -
3751 hostdata->E_dsa_code_template;
3752 /* Replace the NOP instruction with a JUMP */
3753 ncrcurrent[0] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP) << 24) |
3754 DBC_TCI_TRUE;
3755 } else {
3756 printk ("scsi%d: no free slot\n", host->host_no);
3757 disable(host);
3758 tmp->result = (DID_ERROR << 16);
3759 cmd->next = (struct NCR53c7x0_cmd *) hostdata->free;
3760 hostdata->free = cmd;
3761 tmp->scsi_done(tmp);
3762 local_irq_restore(flags);
3763 return;
3764 }
3765
3766 /*
3767 * If the NCR chip is in an idle state, start it running the scheduler
3768 * immediately. Otherwise, signal the chip to jump to schedule as
3769 * soon as it is idle.
3770 */
3771
3772 if (hostdata->idle) {
3773 hostdata->idle = 0;
3774 hostdata->state = STATE_RUNNING;
3775 NCR53c7x0_write32 (DSP_REG, virt_to_bus ((void *)hostdata->schedule));
3776 if (hostdata->options & OPTION_DEBUG_TRACE)
3777 NCR53c7x0_write8 (DCNTL_REG, hostdata->saved_dcntl |
3778 DCNTL_SSM | DCNTL_STD);
3779 } else {
3780 NCR53c7x0_write8(hostdata->istat, ISTAT_10_SIGP);
3781 }
3782
3783 local_irq_restore(flags);
3784}
3785
3786/*
3787 * Function : busyp (struct Scsi_Host *host, struct NCR53c7x0_hostdata
3788 * *hostdata, Scsi_Cmnd *cmd)
3789 *
3790 * Purpose : decide if we can pass the given SCSI command on to the
3791 * device in question or not.
3792 *
3793 * Returns : non-zero when we're busy, 0 when we aren't.
3794 */
3795
3796static __inline__ int
3797busyp (struct Scsi_Host *host, struct NCR53c7x0_hostdata *hostdata,
3798 Scsi_Cmnd *cmd) {
3799 /* FIXME : in the future, this needs to accommodate SCSI-II tagged
3800 queuing, and we may be able to play with fairness here a bit.
3801 */
3802 return hostdata->busy[cmd->device->id][cmd->device->lun];
3803}
3804
3805/*
3806 * Function : process_issue_queue (void)
3807 *
3808 * Purpose : transfer commands from the issue queue to NCR start queue
3809 * of each NCR53c7/8xx in the system, avoiding kernel stack
3810 * overflows when the scsi_done() function is invoked recursively.
3811 *
3812 * NOTE : process_issue_queue exits with interrupts *disabled*, so the
3813 * caller must reenable them if it desires.
3814 *
3815 * NOTE : process_issue_queue should be called from both
3816 * NCR53c7x0_queue_command() and from the interrupt handler
3817 * after command completion in case NCR53c7x0_queue_command()
3818 * isn't invoked again but we've freed up resources that are
3819 * needed.
3820 */
3821
3822static void
3823process_issue_queue (unsigned long flags) {
3824 Scsi_Cmnd *tmp, *prev;
3825 struct Scsi_Host *host;
3826 struct NCR53c7x0_hostdata *hostdata;
3827 int done;
3828
3829 /*
3830 * We run (with interrupts disabled) until we're sure that none of
3831 * the host adapters have anything that can be done, at which point
3832 * we set process_issue_queue_running to 0 and exit.
3833 *
3834 * Interrupts are enabled before doing various other internal
3835 * instructions, after we've decided that we need to run through
3836 * the loop again.
3837 *
3838 */
3839
3840 do {
3841 local_irq_disable(); /* Freeze request queues */
3842 done = 1;
3843 for (host = first_host; host && host->hostt == the_template;
3844 host = host->next) {
3845 hostdata = (struct NCR53c7x0_hostdata *) host->hostdata[0];
3846 local_irq_disable();
3847 if (hostdata->issue_queue) {
3848 if (hostdata->state == STATE_DISABLED) {
3849 tmp = (Scsi_Cmnd *) hostdata->issue_queue;
3850 hostdata->issue_queue = (Scsi_Cmnd *) tmp->SCp.ptr;
3851 tmp->result = (DID_BAD_TARGET << 16);
3852 if (tmp->host_scribble) {
3853 ((struct NCR53c7x0_cmd *)tmp->host_scribble)->next =
3854 hostdata->free;
3855 hostdata->free =
3856 (struct NCR53c7x0_cmd *)tmp->host_scribble;
3857 tmp->host_scribble = NULL;
3858 }
3859 tmp->scsi_done (tmp);
3860 done = 0;
3861 } else
3862 for (tmp = (Scsi_Cmnd *) hostdata->issue_queue,
3863 prev = NULL; tmp; prev = tmp, tmp = (Scsi_Cmnd *)
3864 tmp->SCp.ptr)
3865 if (!tmp->host_scribble ||
3866 !busyp (host, hostdata, tmp)) {
3867 if (prev)
3868 prev->SCp.ptr = tmp->SCp.ptr;
3869 else
3870 hostdata->issue_queue = (Scsi_Cmnd *)
3871 tmp->SCp.ptr;
3872 tmp->SCp.ptr = NULL;
3873 if (tmp->host_scribble) {
3874 if (hostdata->options & OPTION_DEBUG_QUEUES)
3875 printk ("scsi%d : moving command for target %d lun %d to start list\n",
3876 host->host_no, tmp->device->id, tmp->device->lun);
3877
3878
3879 to_schedule_list (host, hostdata,
3880 (struct NCR53c7x0_cmd *)
3881 tmp->host_scribble);
3882 } else {
3883 if (((tmp->result & 0xff) == 0xff) ||
3884 ((tmp->result & 0xff00) == 0xff00)) {
3885 printk ("scsi%d : danger Will Robinson!\n",
3886 host->host_no);
3887 tmp->result = DID_ERROR << 16;
3888 disable (host);
3889 }
3890 tmp->scsi_done(tmp);
3891 }
3892 done = 0;
3893 } /* if target/lun is not busy */
3894 } /* if hostdata->issue_queue */
3895 if (!done)
3896 local_irq_restore(flags);
3897 } /* for host */
3898 } while (!done);
3899 process_issue_queue_running = 0;
3900}
3901
3902/*
3903 * Function : static void intr_scsi (struct Scsi_Host *host,
3904 * struct NCR53c7x0_cmd *cmd)
3905 *
3906 * Purpose : handle all SCSI interrupts, indicated by the setting
3907 * of the SIP bit in the ISTAT register.
3908 *
3909 * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
3910 * may be NULL.
3911 */
3912
3913static void
3914intr_scsi (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
3915 NCR53c7x0_local_declare();
3916 struct NCR53c7x0_hostdata *hostdata =
3917 (struct NCR53c7x0_hostdata *) host->hostdata[0];
3918 unsigned char sstat0_sist0, sist1, /* Registers */
3919 fatal; /* Did a fatal interrupt
3920 occur ? */
3921
3922 NCR53c7x0_local_setup(host);
3923
3924 fatal = 0;
3925
3926 sstat0_sist0 = NCR53c7x0_read8(SSTAT0_REG);
3927 sist1 = 0;
3928
3929 if (hostdata->options & OPTION_DEBUG_INTR)
3930 printk ("scsi%d : SIST0 0x%0x, SIST1 0x%0x\n", host->host_no,
3931 sstat0_sist0, sist1);
3932
3933 /* 250ms selection timeout */
3934 if (sstat0_sist0 & SSTAT0_700_STO) {
3935 fatal = 1;
3936 if (hostdata->options & OPTION_DEBUG_INTR) {
3937 printk ("scsi%d : Selection Timeout\n", host->host_no);
3938 if (cmd) {
3939 printk("scsi%d : target %d, lun %d, command ",
3940 host->host_no, cmd->cmd->device->id, cmd->cmd->device->lun);
3941 __scsi_print_command (cmd->cmd->cmnd);
3942 printk("scsi%d : dsp = 0x%x (virt 0x%p)\n", host->host_no,
3943 NCR53c7x0_read32(DSP_REG),
3944 bus_to_virt(NCR53c7x0_read32(DSP_REG)));
3945 } else {
3946 printk("scsi%d : no command\n", host->host_no);
3947 }
3948 }
3949/*
3950 * XXX - question : how do we want to handle the Illegal Instruction
3951 * interrupt, which may occur before or after the Selection Timeout
3952 * interrupt?
3953 */
3954
3955 if (1) {
3956 hostdata->idle = 1;
3957 hostdata->expecting_sto = 0;
3958
3959 if (hostdata->test_running) {
3960 hostdata->test_running = 0;
3961 hostdata->test_completed = 3;
3962 } else if (cmd) {
3963 abnormal_finished(cmd, DID_BAD_TARGET << 16);
3964 }
3965#if 0
3966 hostdata->intrs = 0;
3967#endif
3968 }
3969 }
3970
3971/*
3972 * FIXME : in theory, we can also get a UDC when a STO occurs.
3973 */
3974 if (sstat0_sist0 & SSTAT0_UDC) {
3975 fatal = 1;
3976 if (cmd) {
3977 printk("scsi%d : target %d lun %d unexpected disconnect\n",
3978 host->host_no, cmd->cmd->device->id, cmd->cmd->device->lun);
3979 print_lots (host);
3980 abnormal_finished(cmd, DID_ERROR << 16);
3981 } else
3982 printk("scsi%d : unexpected disconnect (no command)\n",
3983 host->host_no);
3984
3985 hostdata->dsp = (u32 *) hostdata->schedule;
3986 hostdata->dsp_changed = 1;
3987 }
3988
3989 /* SCSI PARITY error */
3990 if (sstat0_sist0 & SSTAT0_PAR) {
3991 fatal = 1;
3992 if (cmd && cmd->cmd) {
3993 printk("scsi%d : target %d lun %d parity error.\n",
3994 host->host_no, cmd->cmd->device->id, cmd->cmd->device->lun);
3995 abnormal_finished (cmd, DID_PARITY << 16);
3996 } else
3997 printk("scsi%d : parity error\n", host->host_no);
3998 /* Should send message out, parity error */
3999
4000 /* XXX - Reduce synchronous transfer rate! */
4001 hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
4002 sizeof(u32);
4003 hostdata->dsp_changed = 1;
4004 /* SCSI GROSS error */
4005 }
4006
4007 if (sstat0_sist0 & SSTAT0_SGE) {
4008 fatal = 1;
4009 printk("scsi%d : gross error, saved2_dsa = 0x%x\n", host->host_no,
4010 (unsigned int)hostdata->saved2_dsa);
4011 print_lots (host);
4012
4013 /*
4014 * A SCSI gross error may occur when we have
4015 *
4016 * - A synchronous offset which causes the SCSI FIFO to be overwritten.
4017 *
4018 * - A REQ which causes the maximum synchronous offset programmed in
4019 * the SXFER register to be exceeded.
4020 *
4021 * - A phase change with an outstanding synchronous offset.
4022 *
4023 * - Residual data in the synchronous data FIFO, with a transfer
4024 * other than a synchronous receive is started.$#
4025 */
4026
4027
4028 /* XXX Should deduce synchronous transfer rate! */
4029 hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
4030 sizeof(u32);
4031 hostdata->dsp_changed = 1;
4032 /* Phase mismatch */
4033 }
4034
4035 if (sstat0_sist0 & SSTAT0_MA) {
4036 fatal = 1;
4037 if (hostdata->options & OPTION_DEBUG_INTR)
4038 printk ("scsi%d : SSTAT0_MA\n", host->host_no);
4039 intr_phase_mismatch (host, cmd);
4040 }
4041
4042#if 0
4043 if (sstat0_sist0 & SIST0_800_RSL)
4044 printk ("scsi%d : Oh no Mr. Bill!\n", host->host_no);
4045#endif
4046
4047/*
4048 * If a fatal SCSI interrupt occurs, we must insure that the DMA and
4049 * SCSI FIFOs were flushed.
4050 */
4051
4052 if (fatal) {
4053 if (!hostdata->dstat_valid) {
4054 hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
4055 hostdata->dstat_valid = 1;
4056 }
4057
4058 if (!(hostdata->dstat & DSTAT_DFE)) {
4059 printk ("scsi%d : DMA FIFO not empty\n", host->host_no);
4060 /*
4061 * Really need to check this code for 710 RGH.
4062 * Havn't seen any problems, but maybe we should FLUSH before
4063 * clearing sometimes.
4064 */
4065 NCR53c7x0_write8 (CTEST8_REG, CTEST8_10_CLF);
4066 while (NCR53c7x0_read8 (CTEST8_REG) & CTEST8_10_CLF)
4067 ;
4068 hostdata->dstat |= DSTAT_DFE;
4069 }
4070 }
4071}
4072
4073#ifdef CYCLIC_TRACE
4074
4075/*
4076 * The following implements a cyclic log of instructions executed, if you turn
4077 * TRACE on. It will also print the log for you. Very useful when debugging
4078 * 53c710 support, possibly not really needed any more.
4079 */
4080
4081u32 insn_log[4096];
4082u32 insn_log_index = 0;
4083
4084void log1 (u32 i)
4085{
4086 insn_log[insn_log_index++] = i;
4087 if (insn_log_index == 4096)
4088 insn_log_index = 0;
4089}
4090
4091void log_insn (u32 *ip)
4092{
4093 log1 ((u32)ip);
4094 log1 (*ip);
4095 log1 (*(ip+1));
4096 if (((*ip >> 24) & DCMD_TYPE_MASK) == DCMD_TYPE_MMI)
4097 log1 (*(ip+2));
4098}
4099
4100void dump_log(void)
4101{
4102 int cnt = 0;
4103 int i = insn_log_index;
4104 int size;
4105 struct Scsi_Host *host = first_host;
4106
4107 while (cnt < 4096) {
4108 printk ("%08x (+%6x): ", insn_log[i], (insn_log[i] - (u32)&(((struct NCR53c7x0_hostdata *)host->hostdata[0])->script))/4);
4109 if (++i == 4096)
4110 i = 0;
4111 cnt++;
4112 if (((insn_log[i] >> 24) & DCMD_TYPE_MASK) == DCMD_TYPE_MMI)
4113 size = 3;
4114 else
4115 size = 2;
4116 while (size--) {
4117 printk ("%08x ", insn_log[i]);
4118 if (++i == 4096)
4119 i = 0;
4120 cnt++;
4121 }
4122 printk ("\n");
4123 }
4124}
4125#endif
4126
4127
4128/*
4129 * Function : static void NCR53c7x0_intfly (struct Scsi_Host *host)
4130 *
4131 * Purpose : Scan command queue for specified host, looking for completed
4132 * commands.
4133 *
4134 * Inputs : Scsi_Host pointer.
4135 *
4136 * This is called from the interrupt handler, when a simulated INTFLY
4137 * interrupt occurs.
4138 */
4139
4140static void
4141NCR53c7x0_intfly (struct Scsi_Host *host)
4142{
4143 NCR53c7x0_local_declare();
4144 struct NCR53c7x0_hostdata *hostdata; /* host->hostdata[0] */
4145 struct NCR53c7x0_cmd *cmd, /* command which halted */
4146 **cmd_prev_ptr;
4147 unsigned long flags;
4148 char search_found = 0; /* Got at least one ? */
4149
4150 hostdata = (struct NCR53c7x0_hostdata *) host->hostdata[0];
4151 NCR53c7x0_local_setup(host);
4152
4153 if (hostdata->options & OPTION_DEBUG_INTR)
4154 printk ("scsi%d : INTFLY\n", host->host_no);
4155
4156 /*
4157 * Traverse our list of running commands, and look
4158 * for those with valid (non-0xff ff) status and message
4159 * bytes encoded in the result which signify command
4160 * completion.
4161 */
4162
4163 local_irq_save(flags);
4164restart:
4165 for (cmd_prev_ptr = (struct NCR53c7x0_cmd **)&(hostdata->running_list),
4166 cmd = (struct NCR53c7x0_cmd *) hostdata->running_list; cmd ;
4167 cmd_prev_ptr = (struct NCR53c7x0_cmd **) &(cmd->next),
4168 cmd = (struct NCR53c7x0_cmd *) cmd->next)
4169 {
4170 Scsi_Cmnd *tmp;
4171
4172 if (!cmd) {
4173 printk("scsi%d : very weird.\n", host->host_no);
4174 break;
4175 }
4176
4177 if (!(tmp = cmd->cmd)) {
4178 printk("scsi%d : weird. NCR53c7x0_cmd has no Scsi_Cmnd\n",
4179 host->host_no);
4180 continue;
4181 }
4182 /* Copy the result over now; may not be complete,
4183 * but subsequent tests may as well be done on
4184 * cached memory.
4185 */
4186 tmp->result = cmd->result;
4187
4188 if (((tmp->result & 0xff) == 0xff) ||
4189 ((tmp->result & 0xff00) == 0xff00))
4190 continue;
4191
4192 search_found = 1;
4193
4194 if (cmd->bounce.len)
4195 memcpy ((void *)cmd->bounce.addr,
4196 (void *)cmd->bounce.buf, cmd->bounce.len);
4197
4198 /* Important - remove from list _before_ done is called */
4199 if (cmd_prev_ptr)
4200 *cmd_prev_ptr = (struct NCR53c7x0_cmd *) cmd->next;
4201
4202 --hostdata->busy[tmp->device->id][tmp->device->lun];
4203 cmd->next = hostdata->free;
4204 hostdata->free = cmd;
4205
4206 tmp->host_scribble = NULL;
4207
4208 if (hostdata->options & OPTION_DEBUG_INTR) {
4209 printk ("scsi%d : command complete : pid %lu, id %d,lun %d result 0x%x ",
4210 host->host_no, tmp->pid, tmp->device->id, tmp->device->lun, tmp->result);
4211 __scsi_print_command (tmp->cmnd);
4212 }
4213
4214 tmp->scsi_done(tmp);
4215 goto restart;
4216 }
4217 local_irq_restore(flags);
4218
4219 if (!search_found) {
4220 printk ("scsi%d : WARNING : INTFLY with no completed commands.\n",
4221 host->host_no);
4222 } else {
4223 run_process_issue_queue();
4224 }
4225 return;
4226}
4227
4228/*
4229 * Function : static irqreturn_t NCR53c7x0_intr (int irq, void *dev_id)
4230 *
4231 * Purpose : handle NCR53c7x0 interrupts for all NCR devices sharing
4232 * the same IRQ line.
4233 *
4234 * Inputs : Since we're using the IRQF_DISABLED interrupt handler
4235 * semantics, irq indicates the interrupt which invoked
4236 * this handler.
4237 *
4238 * On the 710 we simualte an INTFLY with a script interrupt, and the
4239 * script interrupt handler will call back to this function.
4240 */
4241
4242static irqreturn_t
4243NCR53c7x0_intr (int irq, void *dev_id)
4244{
4245 NCR53c7x0_local_declare();
4246 struct Scsi_Host *host; /* Host we are looking at */
4247 unsigned char istat; /* Values of interrupt regs */
4248 struct NCR53c7x0_hostdata *hostdata; /* host->hostdata[0] */
4249 struct NCR53c7x0_cmd *cmd; /* command which halted */
4250 u32 *dsa; /* DSA */
4251 int handled = 0;
4252
4253#ifdef NCR_DEBUG
4254 char buf[80]; /* Debugging sprintf buffer */
4255 size_t buflen; /* Length of same */
4256#endif
4257
4258 host = (struct Scsi_Host *)dev_id;
4259 hostdata = (struct NCR53c7x0_hostdata *) host->hostdata[0];
4260 NCR53c7x0_local_setup(host);
4261
4262 /*
4263 * Only read istat once per loop, since reading it again will unstack
4264 * interrupts
4265 */
4266
4267 while ((istat = NCR53c7x0_read8(hostdata->istat)) & (ISTAT_SIP|ISTAT_DIP)) {
4268 handled = 1;
4269 hostdata->dsp_changed = 0;
4270 hostdata->dstat_valid = 0;
4271 hostdata->state = STATE_HALTED;
4272
4273 if (NCR53c7x0_read8 (SSTAT2_REG) & SSTAT2_FF_MASK)
4274 printk ("scsi%d : SCSI FIFO not empty\n", host->host_no);
4275
4276 /*
4277 * NCR53c700 and NCR53c700-66 change the current SCSI
4278 * process, hostdata->ncrcurrent, in the Linux driver so
4279 * cmd = hostdata->ncrcurrent.
4280 *
4281 * With other chips, we must look through the commands
4282 * executing and find the command structure which
4283 * corresponds to the DSA register.
4284 */
4285
4286 if (hostdata->options & OPTION_700) {
4287 cmd = (struct NCR53c7x0_cmd *) hostdata->ncrcurrent;
4288 } else {
4289 dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
4290 for (cmd = (struct NCR53c7x0_cmd *) hostdata->running_list;
4291 cmd && (dsa + (hostdata->dsa_start / sizeof(u32))) != cmd->dsa;
4292 cmd = (struct NCR53c7x0_cmd *)(cmd->next))
4293 ;
4294 }
4295 if (hostdata->options & OPTION_DEBUG_INTR) {
4296 if (cmd) {
4297 printk("scsi%d : interrupt for pid %lu, id %d, lun %d ",
4298 host->host_no, cmd->cmd->pid, (int) cmd->cmd->device->id,
4299 (int) cmd->cmd->device->lun);
4300 __scsi_print_command (cmd->cmd->cmnd);
4301 } else {
4302 printk("scsi%d : no active command\n", host->host_no);
4303 }
4304 }
4305
4306 if (istat & ISTAT_SIP) {
4307 if (hostdata->options & OPTION_DEBUG_INTR)
4308 printk ("scsi%d : ISTAT_SIP\n", host->host_no);
4309 intr_scsi (host, cmd);
4310 }
4311
4312 if (istat & ISTAT_DIP) {
4313 if (hostdata->options & OPTION_DEBUG_INTR)
4314 printk ("scsi%d : ISTAT_DIP\n", host->host_no);
4315 intr_dma (host, cmd);
4316 }
4317
4318 if (!hostdata->dstat_valid) {
4319 hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
4320 hostdata->dstat_valid = 1;
4321 }
4322
4323 if (!(hostdata->dstat & DSTAT_DFE)) {
4324 printk ("scsi%d : DMA FIFO not empty\n", host->host_no);
4325 /* Really need to check this out for 710 RGH */
4326 NCR53c7x0_write8 (CTEST8_REG, CTEST8_10_CLF);
4327 while (NCR53c7x0_read8 (CTEST8_REG) & CTEST8_10_CLF)
4328 ;
4329 hostdata->dstat |= DSTAT_DFE;
4330 }
4331
4332 if (!hostdata->idle && hostdata->state == STATE_HALTED) {
4333 if (!hostdata->dsp_changed)
4334 hostdata->dsp = (u32 *)bus_to_virt(NCR53c7x0_read32(DSP_REG));
4335#if 0
4336 printk("scsi%d : new dsp is 0x%lx (virt 0x%p)\n",
4337 host->host_no, virt_to_bus(hostdata->dsp), hostdata->dsp);
4338#endif
4339
4340 hostdata->state = STATE_RUNNING;
4341 NCR53c7x0_write32 (DSP_REG, virt_to_bus(hostdata->dsp));
4342 if (hostdata->options & OPTION_DEBUG_TRACE) {
4343#ifdef CYCLIC_TRACE
4344 log_insn (hostdata->dsp);
4345#else
4346 print_insn (host, hostdata->dsp, "t ", 1);
4347#endif
4348 NCR53c7x0_write8 (DCNTL_REG,
4349 hostdata->saved_dcntl | DCNTL_SSM | DCNTL_STD);
4350 }
4351 }
4352 }
4353 return IRQ_HANDLED;
4354}
4355
4356
4357/*
4358 * Function : static int abort_connected (struct Scsi_Host *host)
4359 *
4360 * Purpose : Assuming that the NCR SCSI processor is currently
4361 * halted, break the currently established nexus. Clean
4362 * up of the NCR53c7x0_cmd and Scsi_Cmnd structures should
4363 * be done on receipt of the abort interrupt.
4364 *
4365 * Inputs : host - SCSI host
4366 *
4367 */
4368
4369static int
4370abort_connected (struct Scsi_Host *host) {
4371#ifdef NEW_ABORT
4372 NCR53c7x0_local_declare();
4373#endif
4374 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
4375 host->hostdata[0];
4376/* FIXME : this probably should change for production kernels; at the
4377 least, counter should move to a per-host structure. */
4378 static int counter = 5;
4379#ifdef NEW_ABORT
4380 int sstat, phase, offset;
4381 u32 *script;
4382 NCR53c7x0_local_setup(host);
4383#endif
4384
4385 if (--counter <= 0) {
4386 disable(host);
4387 return 0;
4388 }
4389
4390 printk ("scsi%d : DANGER : abort_connected() called \n",
4391 host->host_no);
4392
4393#ifdef NEW_ABORT
4394
4395/*
4396 * New strategy : Rather than using a generic abort routine,
4397 * we'll specifically try to source or sink the appropriate
4398 * amount of data for the phase we're currently in (taking into
4399 * account the current synchronous offset)
4400 */
4401
4402 sstat = NCR53c8x0_read8 (SSTAT2_REG);
4403 offset = OFFSET (sstat & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT;
4404 phase = sstat & SSTAT2_PHASE_MASK;
4405
4406/*
4407 * SET ATN
4408 * MOVE source_or_sink, WHEN CURRENT PHASE
4409 * < repeat for each outstanding byte >
4410 * JUMP send_abort_message
4411 */
4412
4413 script = hostdata->abort_script = kmalloc (
4414 8 /* instruction size */ * (
4415 1 /* set ATN */ +
4416 (!offset ? 1 : offset) /* One transfer per outstanding byte */ +
4417 1 /* send abort message */),
4418 GFP_ATOMIC);
4419
4420
4421#else /* def NEW_ABORT */
4422 hostdata->dsp = hostdata->script + hostdata->E_initiator_abort /
4423 sizeof(u32);
4424#endif /* def NEW_ABORT */
4425 hostdata->dsp_changed = 1;
4426
4427/* XXX - need to flag the command as aborted after the abort_connected
4428 code runs
4429 */
4430 return 0;
4431}
4432
4433/*
4434 * Function : static int datapath_residual (Scsi_Host *host)
4435 *
4436 * Purpose : return residual data count of what's in the chip.
4437 *
4438 * Inputs : host - SCSI host
4439 */
4440
4441static int
4442datapath_residual (struct Scsi_Host *host) {
4443 NCR53c7x0_local_declare();
4444 int count, synchronous, sstat;
4445 unsigned int ddir;
4446
4447 NCR53c7x0_local_setup(host);
4448 /* COMPAT : the 700 and 700-66 need to use DFIFO_00_BO_MASK */
4449 count = ((NCR53c7x0_read8 (DFIFO_REG) & DFIFO_10_BO_MASK) -
4450 (NCR53c7x0_read32 (DBC_REG) & DFIFO_10_BO_MASK)) & DFIFO_10_BO_MASK;
4451 synchronous = NCR53c7x0_read8 (SXFER_REG) & SXFER_MO_MASK;
4452 /* COMPAT : DDIR is elsewhere on non-'8xx chips. */
4453 ddir = NCR53c7x0_read8 (CTEST0_REG_700) & CTEST0_700_DDIR;
4454
4455 if (ddir) {
4456 /* Receive */
4457 if (synchronous)
4458 count += (NCR53c7x0_read8 (SSTAT2_REG) & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT;
4459 else
4460 if (NCR53c7x0_read8 (SSTAT1_REG) & SSTAT1_ILF)
4461 ++count;
4462 } else {
4463 /* Send */
4464 sstat = NCR53c7x0_read8 (SSTAT1_REG);
4465 if (sstat & SSTAT1_OLF)
4466 ++count;
4467 if (synchronous && (sstat & SSTAT1_ORF))
4468 ++count;
4469 }
4470 return count;
4471}
4472
4473/*
4474 * Function : static const char * sbcl_to_phase (int sbcl)_
4475 *
4476 * Purpose : Convert SBCL register to user-parsable phase representation
4477 *
4478 * Inputs : sbcl - value of sbcl register
4479 */
4480
4481
4482static const char *
4483sbcl_to_phase (int sbcl) {
4484 switch (sbcl & SBCL_PHASE_MASK) {
4485 case SBCL_PHASE_DATAIN:
4486 return "DATAIN";
4487 case SBCL_PHASE_DATAOUT:
4488 return "DATAOUT";
4489 case SBCL_PHASE_MSGIN:
4490 return "MSGIN";
4491 case SBCL_PHASE_MSGOUT:
4492 return "MSGOUT";
4493 case SBCL_PHASE_CMDOUT:
4494 return "CMDOUT";
4495 case SBCL_PHASE_STATIN:
4496 return "STATUSIN";
4497 default:
4498 return "unknown";
4499 }
4500}
4501
4502/*
4503 * Function : static const char * sstat2_to_phase (int sstat)_
4504 *
4505 * Purpose : Convert SSTAT2 register to user-parsable phase representation
4506 *
4507 * Inputs : sstat - value of sstat register
4508 */
4509
4510
4511static const char *
4512sstat2_to_phase (int sstat) {
4513 switch (sstat & SSTAT2_PHASE_MASK) {
4514 case SSTAT2_PHASE_DATAIN:
4515 return "DATAIN";
4516 case SSTAT2_PHASE_DATAOUT:
4517 return "DATAOUT";
4518 case SSTAT2_PHASE_MSGIN:
4519 return "MSGIN";
4520 case SSTAT2_PHASE_MSGOUT:
4521 return "MSGOUT";
4522 case SSTAT2_PHASE_CMDOUT:
4523 return "CMDOUT";
4524 case SSTAT2_PHASE_STATIN:
4525 return "STATUSIN";
4526 default:
4527 return "unknown";
4528 }
4529}
4530
4531/*
4532 * Function : static void intr_phase_mismatch (struct Scsi_Host *host,
4533 * struct NCR53c7x0_cmd *cmd)
4534 *
4535 * Purpose : Handle phase mismatch interrupts
4536 *
4537 * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
4538 * may be NULL.
4539 *
4540 * Side effects : The abort_connected() routine is called or the NCR chip
4541 * is restarted, jumping to the command_complete entry point, or
4542 * patching the address and transfer count of the current instruction
4543 * and calling the msg_in entry point as appropriate.
4544 */
4545
4546static void
4547intr_phase_mismatch (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
4548 NCR53c7x0_local_declare();
4549 u32 dbc_dcmd, *dsp, *dsp_next;
4550 unsigned char dcmd, sbcl;
4551 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
4552 host->hostdata[0];
4553 int residual;
4554 enum {ACTION_ABORT, ACTION_ABORT_PRINT, ACTION_CONTINUE} action =
4555 ACTION_ABORT_PRINT;
4556 const char *where = NULL;
4557
4558 NCR53c7x0_local_setup(host);
4559
4560 /*
4561 * Corrective action is based on where in the SCSI SCRIPT(tm) the error
4562 * occurred, as well as which SCSI phase we are currently in.
4563 */
4564 dsp_next = bus_to_virt(NCR53c7x0_read32(DSP_REG));
4565
4566 /*
4567 * Fetch the current instruction, and remove the operands for easier
4568 * interpretation.
4569 */
4570 dbc_dcmd = NCR53c7x0_read32(DBC_REG);
4571 dcmd = (dbc_dcmd & 0xff000000) >> 24;
4572 /*
4573 * Like other processors, the NCR adjusts the instruction pointer before
4574 * instruction decode. Set the DSP address back to what it should
4575 * be for this instruction based on its size (2 or 3 32 bit words).
4576 */
4577 dsp = dsp_next - NCR53c7x0_insn_size(dcmd);
4578
4579
4580 /*
4581 * Read new SCSI phase from the SBCL lines. Since all of our code uses
4582 * a WHEN conditional instead of an IF conditional, we don't need to
4583 * wait for a new REQ.
4584 */
4585 sbcl = NCR53c7x0_read8(SBCL_REG) & SBCL_PHASE_MASK;
4586
4587 if (!cmd) {
4588 action = ACTION_ABORT_PRINT;
4589 where = "no current command";
4590 /*
4591 * The way my SCSI SCRIPTS(tm) are architected, recoverable phase
4592 * mismatches should only occur where we're doing a multi-byte
4593 * BMI instruction. Specifically, this means
4594 *
4595 * - select messages (a SCSI-I target may ignore additional messages
4596 * after the IDENTIFY; any target may reject a SDTR or WDTR)
4597 *
4598 * - command out (targets may send a message to signal an error
4599 * condition, or go into STATUSIN after they've decided
4600 * they don't like the command.
4601 *
4602 * - reply_message (targets may reject a multi-byte message in the
4603 * middle)
4604 *
4605 * - data transfer routines (command completion with buffer space
4606 * left, disconnect message, or error message)
4607 */
4608 } else if (((dsp >= cmd->data_transfer_start &&
4609 dsp < cmd->data_transfer_end)) || dsp == (cmd->residual + 2)) {
4610 if ((dcmd & (DCMD_TYPE_MASK|DCMD_BMI_OP_MASK|DCMD_BMI_INDIRECT|
4611 DCMD_BMI_MSG|DCMD_BMI_CD)) == (DCMD_TYPE_BMI|
4612 DCMD_BMI_OP_MOVE_I)) {
4613 residual = datapath_residual (host);
4614 if (hostdata->options & OPTION_DEBUG_DISCONNECT)
4615 printk ("scsi%d : handling residual transfer (+ %d bytes from DMA FIFO)\n",
4616 host->host_no, residual);
4617
4618 /*
4619 * The first instruction is a CALL to the alternate handler for
4620 * this data transfer phase, so we can do calls to
4621 * munge_msg_restart as we would if control were passed
4622 * from normal dynamic code.
4623 */
4624 if (dsp != cmd->residual + 2) {
4625 cmd->residual[0] = ((DCMD_TYPE_TCI | DCMD_TCI_OP_CALL |
4626 ((dcmd & DCMD_BMI_IO) ? DCMD_TCI_IO : 0)) << 24) |
4627 DBC_TCI_WAIT_FOR_VALID | DBC_TCI_COMPARE_PHASE;
4628 cmd->residual[1] = virt_to_bus(hostdata->script)
4629 + ((dcmd & DCMD_BMI_IO)
4630 ? hostdata->E_other_in : hostdata->E_other_out);
4631 }
4632
4633 /*
4634 * The second instruction is the a data transfer block
4635 * move instruction, reflecting the pointer and count at the
4636 * time of the phase mismatch.
4637 */
4638 cmd->residual[2] = dbc_dcmd + residual;
4639 cmd->residual[3] = NCR53c7x0_read32(DNAD_REG) - residual;
4640
4641 /*
4642 * The third and final instruction is a jump to the instruction
4643 * which follows the instruction which had to be 'split'
4644 */
4645 if (dsp != cmd->residual + 2) {
4646 cmd->residual[4] = ((DCMD_TYPE_TCI|DCMD_TCI_OP_JUMP)
4647 << 24) | DBC_TCI_TRUE;
4648 cmd->residual[5] = virt_to_bus(dsp_next);
4649 }
4650
4651 /*
4652 * For the sake of simplicity, transfer control to the
4653 * conditional CALL at the start of the residual buffer.
4654 */
4655 hostdata->dsp = cmd->residual;
4656 hostdata->dsp_changed = 1;
4657 action = ACTION_CONTINUE;
4658 } else {
4659 where = "non-BMI dynamic DSA code";
4660 action = ACTION_ABORT_PRINT;
4661 }
4662 } else if (dsp == (hostdata->script + hostdata->E_select_msgout / 4 + 2)) {
4663 /* RGH 290697: Added +2 above, to compensate for the script
4664 * instruction which disables the selection timer. */
4665 /* Release ATN */
4666 NCR53c7x0_write8 (SOCL_REG, 0);
4667 switch (sbcl) {
4668 /*
4669 * Some devices (SQ555 come to mind) grab the IDENTIFY message
4670 * sent on selection, and decide to go into COMMAND OUT phase
4671 * rather than accepting the rest of the messages or rejecting
4672 * them. Handle these devices gracefully.
4673 */
4674 case SBCL_PHASE_CMDOUT:
4675 hostdata->dsp = dsp + 2 /* two _words_ */;
4676 hostdata->dsp_changed = 1;
4677 printk ("scsi%d : target %d ignored SDTR and went into COMMAND OUT\n",
4678 host->host_no, cmd->cmd->device->id);
4679 cmd->flags &= ~CMD_FLAG_SDTR;
4680 action = ACTION_CONTINUE;
4681 break;
4682 case SBCL_PHASE_MSGIN:
4683 hostdata->dsp = hostdata->script + hostdata->E_msg_in /
4684 sizeof(u32);
4685 hostdata->dsp_changed = 1;
4686 action = ACTION_CONTINUE;
4687 break;
4688 default:
4689 where="select message out";
4690 action = ACTION_ABORT_PRINT;
4691 }
4692 /*
4693 * Some SCSI devices will interpret a command as they read the bytes
4694 * off the SCSI bus, and may decide that the command is Bogus before
4695 * they've read the entire command off the bus.
4696 */
4697 } else if (dsp == hostdata->script + hostdata->E_cmdout_cmdout / sizeof
4698 (u32)) {
4699 hostdata->dsp = hostdata->script + hostdata->E_data_transfer /
4700 sizeof (u32);
4701 hostdata->dsp_changed = 1;
4702 action = ACTION_CONTINUE;
4703 /* FIXME : we need to handle message reject, etc. within msg_respond. */
4704#ifdef notyet
4705 } else if (dsp == hostdata->script + hostdata->E_reply_message) {
4706 switch (sbcl) {
4707 /* Any other phase mismatches abort the currently executing command. */
4708#endif
4709 } else {
4710 where = "unknown location";
4711 action = ACTION_ABORT_PRINT;
4712 }
4713
4714 /* Flush DMA FIFO */
4715 if (!hostdata->dstat_valid) {
4716 hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
4717 hostdata->dstat_valid = 1;
4718 }
4719 if (!(hostdata->dstat & DSTAT_DFE)) {
4720 /* Really need to check this out for 710 RGH */
4721 NCR53c7x0_write8 (CTEST8_REG, CTEST8_10_CLF);
4722 while (NCR53c7x0_read8 (CTEST8_REG) & CTEST8_10_CLF);
4723 hostdata->dstat |= DSTAT_DFE;
4724 }
4725
4726 switch (action) {
4727 case ACTION_ABORT_PRINT:
4728 printk("scsi%d : %s : unexpected phase %s.\n",
4729 host->host_no, where ? where : "unknown location",
4730 sbcl_to_phase(sbcl));
4731 print_lots (host);
4732 /* Fall through to ACTION_ABORT */
4733 case ACTION_ABORT:
4734 abort_connected (host);
4735 break;
4736 case ACTION_CONTINUE:
4737 break;
4738 }
4739
4740#if 0
4741 if (hostdata->dsp_changed) {
4742 printk("scsi%d: new dsp 0x%p\n", host->host_no, hostdata->dsp);
4743 print_insn (host, hostdata->dsp, "", 1);
4744 }
4745#endif
4746}
4747
4748/*
4749 * Function : static void intr_bf (struct Scsi_Host *host,
4750 * struct NCR53c7x0_cmd *cmd)
4751 *
4752 * Purpose : handle BUS FAULT interrupts
4753 *
4754 * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
4755 * may be NULL.
4756 */
4757
4758static void
4759intr_bf (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
4760 NCR53c7x0_local_declare();
4761 u32 *dsp,
4762 *next_dsp, /* Current dsp */
4763 *dsa,
4764 dbc_dcmd; /* DCMD (high eight bits) + DBC */
4765 char *reason = NULL;
4766 /* Default behavior is for a silent error, with a retry until we've
4767 exhausted retries. */
4768 enum {MAYBE, ALWAYS, NEVER} retry = MAYBE;
4769 int report = 0;
4770 NCR53c7x0_local_setup(host);
4771
4772 dbc_dcmd = NCR53c7x0_read32 (DBC_REG);
4773 next_dsp = bus_to_virt (NCR53c7x0_read32(DSP_REG));
4774 dsp = next_dsp - NCR53c7x0_insn_size ((dbc_dcmd >> 24) & 0xff);
4775/* FIXME - check chip type */
4776 dsa = bus_to_virt (NCR53c7x0_read32(DSA_REG));
4777
4778 /*
4779 * Bus faults can be caused by either a Bad Address or
4780 * Target Abort. We should check the Received Target Abort
4781 * bit of the PCI status register and Master Abort Bit.
4782 *
4783 * - Master Abort bit indicates that no device claimed
4784 * the address with DEVSEL within five clocks
4785 *
4786 * - Target Abort bit indicates that a target claimed it,
4787 * but changed its mind once it saw the byte enables.
4788 *
4789 */
4790
4791 /* 53c710, not PCI system */
4792 report = 1;
4793 reason = "Unknown";
4794
4795#ifndef notyet
4796 report = 1;
4797#endif
4798 if (report && reason)
4799 {
4800 printk(KERN_ALERT "scsi%d : BUS FAULT reason = %s\n",
4801 host->host_no, reason ? reason : "unknown");
4802 print_lots (host);
4803 }
4804
4805#ifndef notyet
4806 retry = NEVER;
4807#endif
4808
4809 /*
4810 * TODO : we should attempt to recover from any spurious bus
4811 * faults. After X retries, we should figure that things are
4812 * sufficiently wedged, and call NCR53c7xx_reset.
4813 *
4814 * This code should only get executed once we've decided that we
4815 * cannot retry.
4816 */
4817
4818 if (retry == NEVER) {
4819 printk(KERN_ALERT " mail richard@sleepie.demon.co.uk\n");
4820 FATAL (host);
4821 }
4822}
4823
4824/*
4825 * Function : static void intr_dma (struct Scsi_Host *host,
4826 * struct NCR53c7x0_cmd *cmd)
4827 *
4828 * Purpose : handle all DMA interrupts, indicated by the setting
4829 * of the DIP bit in the ISTAT register.
4830 *
4831 * Inputs : host, cmd - host and NCR command causing the interrupt, cmd
4832 * may be NULL.
4833 */
4834
4835static void
4836intr_dma (struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd) {
4837 NCR53c7x0_local_declare();
4838 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
4839 host->hostdata[0];
4840 unsigned char dstat; /* DSTAT */
4841 u32 *dsp,
4842 *next_dsp, /* Current dsp */
4843 *dsa,
4844 dbc_dcmd; /* DCMD (high eight bits) + DBC */
4845 int tmp;
4846 unsigned long flags;
4847 NCR53c7x0_local_setup(host);
4848
4849 if (!hostdata->dstat_valid) {
4850 hostdata->dstat = NCR53c7x0_read8(DSTAT_REG);
4851 hostdata->dstat_valid = 1;
4852 }
4853
4854 dstat = hostdata->dstat;
4855
4856 if (hostdata->options & OPTION_DEBUG_INTR)
4857 printk("scsi%d : DSTAT=0x%x\n", host->host_no, (int) dstat);
4858
4859 dbc_dcmd = NCR53c7x0_read32 (DBC_REG);
4860 next_dsp = bus_to_virt(NCR53c7x0_read32(DSP_REG));
4861 dsp = next_dsp - NCR53c7x0_insn_size ((dbc_dcmd >> 24) & 0xff);
4862/* XXX - check chip type */
4863 dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
4864
4865 /*
4866 * DSTAT_ABRT is the aborted interrupt. This is set whenever the
4867 * SCSI chip is aborted.
4868 *
4869 * With NCR53c700 and NCR53c700-66 style chips, we should only
4870 * get this when the chip is currently running the accept
4871 * reselect/select code and we have set the abort bit in the
4872 * ISTAT register.
4873 *
4874 */
4875
4876 if (dstat & DSTAT_ABRT) {
4877#if 0
4878 /* XXX - add code here to deal with normal abort */
4879 if ((hostdata->options & OPTION_700) && (hostdata->state ==
4880 STATE_ABORTING)) {
4881 } else
4882#endif
4883 {
4884 printk(KERN_ALERT "scsi%d : unexpected abort interrupt at\n"
4885 " ", host->host_no);
4886 print_insn (host, dsp, KERN_ALERT "s ", 1);
4887 FATAL (host);
4888 }
4889 }
4890
4891 /*
4892 * DSTAT_SSI is the single step interrupt. Should be generated
4893 * whenever we have single stepped or are tracing.
4894 */
4895
4896 if (dstat & DSTAT_SSI) {
4897 if (hostdata->options & OPTION_DEBUG_TRACE) {
4898 /* Don't print instr. until we write DSP at end of intr function */
4899 } else if (hostdata->options & OPTION_DEBUG_SINGLE) {
4900 print_insn (host, dsp, "s ", 0);
4901 local_irq_save(flags);
4902/* XXX - should we do this, or can we get away with writing dsp? */
4903
4904 NCR53c7x0_write8 (DCNTL_REG, (NCR53c7x0_read8(DCNTL_REG) &
4905 ~DCNTL_SSM) | DCNTL_STD);
4906 local_irq_restore(flags);
4907 } else {
4908 printk(KERN_ALERT "scsi%d : unexpected single step interrupt at\n"
4909 " ", host->host_no);
4910 print_insn (host, dsp, KERN_ALERT "", 1);
4911 printk(KERN_ALERT " mail drew@PoohSticks.ORG\n");
4912 FATAL (host);
4913 }
4914 }
4915
4916 /*
4917 * DSTAT_IID / DSTAT_OPC (same bit, same meaning, only the name
4918 * is different) is generated whenever an illegal instruction is
4919 * encountered.
4920 *
4921 * XXX - we may want to emulate INTFLY here, so we can use
4922 * the same SCSI SCRIPT (tm) for NCR53c710 through NCR53c810
4923 * chips.
4924 */
4925
4926 if (dstat & DSTAT_OPC) {
4927 /*
4928 * Ascertain if this IID interrupts occurred before or after a STO
4929 * interrupt. Since the interrupt handling code now leaves
4930 * DSP unmodified until _after_ all stacked interrupts have been
4931 * processed, reading the DSP returns the original DSP register.
4932 * This means that if dsp lies between the select code, and
4933 * message out following the selection code (where the IID interrupt
4934 * would have to have occurred by due to the implicit wait for REQ),
4935 * we have an IID interrupt resulting from a STO condition and
4936 * can ignore it.
4937 */
4938
4939 if (((dsp >= (hostdata->script + hostdata->E_select / sizeof(u32))) &&
4940 (dsp <= (hostdata->script + hostdata->E_select_msgout /
4941 sizeof(u32) + 8))) || (hostdata->test_running == 2)) {
4942 if (hostdata->options & OPTION_DEBUG_INTR)
4943 printk ("scsi%d : ignoring DSTAT_IID for SSTAT_STO\n",
4944 host->host_no);
4945 if (hostdata->expecting_iid) {
4946 hostdata->expecting_iid = 0;
4947 hostdata->idle = 1;
4948 if (hostdata->test_running == 2) {
4949 hostdata->test_running = 0;
4950 hostdata->test_completed = 3;
4951 } else if (cmd)
4952 abnormal_finished (cmd, DID_BAD_TARGET << 16);
4953 } else {
4954 hostdata->expecting_sto = 1;
4955 }
4956 /*
4957 * We can't guarantee we'll be able to execute the WAIT DISCONNECT
4958 * instruction within the 3.4us of bus free and arbitration delay
4959 * that a target can RESELECT in and assert REQ after we've dropped
4960 * ACK. If this happens, we'll get an illegal instruction interrupt.
4961 * Doing away with the WAIT DISCONNECT instructions broke everything,
4962 * so instead I'll settle for moving one WAIT DISCONNECT a few
4963 * instructions closer to the CLEAR ACK before it to minimize the
4964 * chances of this happening, and handle it if it occurs anyway.
4965 *
4966 * Simply continue with what we were doing, and control should
4967 * be transferred to the schedule routine which will ultimately
4968 * pass control onto the reselection or selection (not yet)
4969 * code.
4970 */
4971 } else if (dbc_dcmd == 0x48000000 && (NCR53c7x0_read8 (SBCL_REG) &
4972 SBCL_REQ)) {
4973 if (!(hostdata->options & OPTION_NO_PRINT_RACE))
4974 {
4975 printk("scsi%d: REQ before WAIT DISCONNECT IID\n",
4976 host->host_no);
4977 hostdata->options |= OPTION_NO_PRINT_RACE;
4978 }
4979 } else {
4980 printk(KERN_ALERT "scsi%d : invalid instruction\n", host->host_no);
4981 print_lots (host);
4982 printk(KERN_ALERT " mail Richard@sleepie.demon.co.uk with ALL\n"
4983 " boot messages and diagnostic output\n");
4984 FATAL (host);
4985 }
4986 }
4987
4988 /*
4989 * DSTAT_BF are bus fault errors. DSTAT_800_BF is valid for 710 also.
4990 */
4991
4992 if (dstat & DSTAT_800_BF) {
4993 intr_bf (host, cmd);
4994 }
4995
4996
4997 /*
4998 * DSTAT_SIR interrupts are generated by the execution of
4999 * the INT instruction. Since the exact values available
5000 * are determined entirely by the SCSI script running,
5001 * and are local to a particular script, a unique handler
5002 * is called for each script.
5003 */
5004
5005 if (dstat & DSTAT_SIR) {
5006 if (hostdata->options & OPTION_DEBUG_INTR)
5007 printk ("scsi%d : DSTAT_SIR\n", host->host_no);
5008 switch ((tmp = hostdata->dstat_sir_intr (host, cmd))) {
5009 case SPECIFIC_INT_NOTHING:
5010 case SPECIFIC_INT_RESTART:
5011 break;
5012 case SPECIFIC_INT_ABORT:
5013 abort_connected(host);
5014 break;
5015 case SPECIFIC_INT_PANIC:
5016 printk(KERN_ALERT "scsi%d : failure at ", host->host_no);
5017 print_insn (host, dsp, KERN_ALERT "", 1);
5018 printk(KERN_ALERT " dstat_sir_intr() returned SPECIFIC_INT_PANIC\n");
5019 FATAL (host);
5020 break;
5021 case SPECIFIC_INT_BREAK:
5022 intr_break (host, cmd);
5023 break;
5024 default:
5025 printk(KERN_ALERT "scsi%d : failure at ", host->host_no);
5026 print_insn (host, dsp, KERN_ALERT "", 1);
5027 printk(KERN_ALERT" dstat_sir_intr() returned unknown value %d\n",
5028 tmp);
5029 FATAL (host);
5030 }
5031 }
5032}
5033
5034/*
5035 * Function : static int print_insn (struct Scsi_Host *host,
5036 * u32 *insn, int kernel)
5037 *
5038 * Purpose : print numeric representation of the instruction pointed
5039 * to by insn to the debugging or kernel message buffer
5040 * as appropriate.
5041 *
5042 * If desired, a user level program can interpret this
5043 * information.
5044 *
5045 * Inputs : host, insn - host, pointer to instruction, prefix -
5046 * string to prepend, kernel - use printk instead of debugging buffer.
5047 *
5048 * Returns : size, in u32s, of instruction printed.
5049 */
5050
5051/*
5052 * FIXME: should change kernel parameter so that it takes an ENUM
5053 * specifying severity - either KERN_ALERT or KERN_PANIC so
5054 * all panic messages are output with the same severity.
5055 */
5056
5057static int
5058print_insn (struct Scsi_Host *host, const u32 *insn,
5059 const char *prefix, int kernel) {
5060 char buf[160], /* Temporary buffer and pointer. ICKY
5061 arbitrary length. */
5062
5063
5064 *tmp;
5065 unsigned char dcmd; /* dcmd register for *insn */
5066 int size;
5067
5068 /*
5069 * Check to see if the instruction pointer is not bogus before
5070 * indirecting through it; avoiding red-zone at start of
5071 * memory.
5072 *
5073 * FIXME: icky magic needs to happen here on non-intel boxes which
5074 * don't have kernel memory mapped in like this. Might be reasonable
5075 * to use vverify()?
5076 */
5077
5078 if (virt_to_phys((void *)insn) < PAGE_SIZE ||
5079 virt_to_phys((void *)(insn + 8)) > virt_to_phys(high_memory) ||
5080 ((((dcmd = (insn[0] >> 24) & 0xff) & DCMD_TYPE_MMI) == DCMD_TYPE_MMI) &&
5081 virt_to_phys((void *)(insn + 12)) > virt_to_phys(high_memory))) {
5082 size = 0;
5083 sprintf (buf, "%s%p: address out of range\n",
5084 prefix, insn);
5085 } else {
5086/*
5087 * FIXME : (void *) cast in virt_to_bus should be unnecessary, because
5088 * it should take const void * as argument.
5089 */
5090#if !defined(CONFIG_MVME16x) && !defined(CONFIG_BVME6000)
5091 sprintf(buf, "%s0x%lx (virt 0x%p) : 0x%08x 0x%08x (virt 0x%p)",
5092 (prefix ? prefix : ""), virt_to_bus((void *) insn), insn,
5093 insn[0], insn[1], bus_to_virt (insn[1]));
5094#else
5095 /* Remove virtual addresses to reduce output, as they are the same */
5096 sprintf(buf, "%s0x%x (+%x) : 0x%08x 0x%08x",
5097 (prefix ? prefix : ""), (u32)insn, ((u32)insn -
5098 (u32)&(((struct NCR53c7x0_hostdata *)host->hostdata[0])->script))/4,
5099 insn[0], insn[1]);
5100#endif
5101 tmp = buf + strlen(buf);
5102 if ((dcmd & DCMD_TYPE_MASK) == DCMD_TYPE_MMI) {
5103#if !defined(CONFIG_MVME16x) && !defined(CONFIG_BVME6000)
5104 sprintf (tmp, " 0x%08x (virt 0x%p)\n", insn[2],
5105 bus_to_virt(insn[2]));
5106#else
5107 /* Remove virtual addr to reduce output, as it is the same */
5108 sprintf (tmp, " 0x%08x\n", insn[2]);
5109#endif
5110 size = 3;
5111 } else {
5112 sprintf (tmp, "\n");
5113 size = 2;
5114 }
5115 }
5116
5117 if (kernel)
5118 printk ("%s", buf);
5119#ifdef NCR_DEBUG
5120 else {
5121 size_t len = strlen(buf);
5122 debugger_kernel_write(host, buf, len);
5123 }
5124#endif
5125 return size;
5126}
5127
5128/*
5129 * Function : int NCR53c7xx_abort (Scsi_Cmnd *cmd)
5130 *
5131 * Purpose : Abort an errant SCSI command, doing all necessary
5132 * cleanup of the issue_queue, running_list, shared Linux/NCR
5133 * dsa issue and reconnect queues.
5134 *
5135 * Inputs : cmd - command to abort, code - entire result field
5136 *
5137 * Returns : 0 on success, -1 on failure.
5138 */
5139
5140int
5141NCR53c7xx_abort (Scsi_Cmnd *cmd) {
5142 NCR53c7x0_local_declare();
5143 struct Scsi_Host *host = cmd->device->host;
5144 struct NCR53c7x0_hostdata *hostdata = host ? (struct NCR53c7x0_hostdata *)
5145 host->hostdata[0] : NULL;
5146 unsigned long flags;
5147 struct NCR53c7x0_cmd *curr, **prev;
5148 Scsi_Cmnd *me, **last;
5149#if 0
5150 static long cache_pid = -1;
5151#endif
5152
5153
5154 if (!host) {
5155 printk ("Bogus SCSI command pid %ld; no host structure\n",
5156 cmd->pid);
5157 return SCSI_ABORT_ERROR;
5158 } else if (!hostdata) {
5159 printk ("Bogus SCSI host %d; no hostdata\n", host->host_no);
5160 return SCSI_ABORT_ERROR;
5161 }
5162 NCR53c7x0_local_setup(host);
5163
5164/*
5165 * CHECK : I don't think that reading ISTAT will unstack any interrupts,
5166 * since we need to write the INTF bit to clear it, and SCSI/DMA
5167 * interrupts don't clear until we read SSTAT/SIST and DSTAT registers.
5168 *
5169 * See that this is the case. Appears to be correct on the 710, at least.
5170 *
5171 * I suspect that several of our failures may be coming from a new fatal
5172 * interrupt (possibly due to a phase mismatch) happening after we've left
5173 * the interrupt handler, but before the PIC has had the interrupt condition
5174 * cleared.
5175 */
5176
5177 if (NCR53c7x0_read8(hostdata->istat) & (ISTAT_DIP|ISTAT_SIP)) {
5178 printk ("scsi%d : dropped interrupt for command %ld\n", host->host_no,
5179 cmd->pid);
5180 NCR53c7x0_intr (host->irq, NULL, NULL);
5181 return SCSI_ABORT_BUSY;
5182 }
5183
5184 local_irq_save(flags);
5185#if 0
5186 if (cache_pid == cmd->pid)
5187 panic ("scsi%d : bloody fetus %d\n", host->host_no, cmd->pid);
5188 else
5189 cache_pid = cmd->pid;
5190#endif
5191
5192
5193/*
5194 * The command could be hiding in the issue_queue. This would be very
5195 * nice, as commands can't be moved from the high level driver's issue queue
5196 * into the shared queue until an interrupt routine is serviced, and this
5197 * moving is atomic.
5198 *
5199 * If this is the case, we don't have to worry about anything - we simply
5200 * pull the command out of the old queue, and call it aborted.
5201 */
5202
5203 for (me = (Scsi_Cmnd *) hostdata->issue_queue,
5204 last = (Scsi_Cmnd **) &(hostdata->issue_queue);
5205 me && me != cmd; last = (Scsi_Cmnd **)&(me->SCp.ptr),
5206 me = (Scsi_Cmnd *)me->SCp.ptr);
5207
5208 if (me) {
5209 *last = (Scsi_Cmnd *) me->SCp.ptr;
5210 if (me->host_scribble) {
5211 ((struct NCR53c7x0_cmd *)me->host_scribble)->next = hostdata->free;
5212 hostdata->free = (struct NCR53c7x0_cmd *) me->host_scribble;
5213 me->host_scribble = NULL;
5214 }
5215 cmd->result = DID_ABORT << 16;
5216 cmd->scsi_done(cmd);
5217 printk ("scsi%d : found command %ld in Linux issue queue\n",
5218 host->host_no, me->pid);
5219 local_irq_restore(flags);
5220 run_process_issue_queue();
5221 return SCSI_ABORT_SUCCESS;
5222 }
5223
5224/*
5225 * That failing, the command could be in our list of already executing
5226 * commands. If this is the case, drastic measures are called for.
5227 */
5228
5229 for (curr = (struct NCR53c7x0_cmd *) hostdata->running_list,
5230 prev = (struct NCR53c7x0_cmd **) &(hostdata->running_list);
5231 curr && curr->cmd != cmd; prev = (struct NCR53c7x0_cmd **)
5232 &(curr->next), curr = (struct NCR53c7x0_cmd *) curr->next);
5233
5234 if (curr) {
5235 if ((curr->result & 0xff) != 0xff && (curr->result & 0xff00) != 0xff00) {
5236 cmd->result = curr->result;
5237 if (prev)
5238 *prev = (struct NCR53c7x0_cmd *) curr->next;
5239 curr->next = (struct NCR53c7x0_cmd *) hostdata->free;
5240 cmd->host_scribble = NULL;
5241 hostdata->free = curr;
5242 cmd->scsi_done(cmd);
5243 printk ("scsi%d : found finished command %ld in running list\n",
5244 host->host_no, cmd->pid);
5245 local_irq_restore(flags);
5246 return SCSI_ABORT_NOT_RUNNING;
5247 } else {
5248 printk ("scsi%d : DANGER : command running, can not abort.\n",
5249 cmd->device->host->host_no);
5250 local_irq_restore(flags);
5251 return SCSI_ABORT_BUSY;
5252 }
5253 }
5254
5255/*
5256 * And if we couldn't find it in any of our queues, it must have been
5257 * a dropped interrupt.
5258 */
5259
5260 curr = (struct NCR53c7x0_cmd *) cmd->host_scribble;
5261 if (curr) {
5262 curr->next = hostdata->free;
5263 hostdata->free = curr;
5264 cmd->host_scribble = NULL;
5265 }
5266
5267 if (curr == NULL || ((curr->result & 0xff00) == 0xff00) ||
5268 ((curr->result & 0xff) == 0xff)) {
5269 printk ("scsi%d : did this command ever run?\n", host->host_no);
5270 cmd->result = DID_ABORT << 16;
5271 } else {
5272 printk ("scsi%d : probably lost INTFLY, normal completion\n",
5273 host->host_no);
5274 cmd->result = curr->result;
5275/*
5276 * FIXME : We need to add an additional flag which indicates if a
5277 * command was ever counted as BUSY, so if we end up here we can
5278 * decrement the busy count if and only if it is necessary.
5279 */
5280 --hostdata->busy[cmd->device->id][cmd->device->lun];
5281 }
5282 local_irq_restore(flags);
5283 cmd->scsi_done(cmd);
5284
5285/*
5286 * We need to run process_issue_queue since termination of this command
5287 * may allow another queued command to execute first?
5288 */
5289 return SCSI_ABORT_NOT_RUNNING;
5290}
5291
5292/*
5293 * Function : int NCR53c7xx_reset (Scsi_Cmnd *cmd)
5294 *
5295 * Purpose : perform a hard reset of the SCSI bus and NCR
5296 * chip.
5297 *
5298 * Inputs : cmd - command which caused the SCSI RESET
5299 *
5300 * Returns : 0 on success.
5301 */
5302
5303int
5304NCR53c7xx_reset (Scsi_Cmnd *cmd, unsigned int reset_flags) {
5305 NCR53c7x0_local_declare();
5306 unsigned long flags;
5307 int found = 0;
5308 struct NCR53c7x0_cmd * c;
5309 Scsi_Cmnd *tmp;
5310 /*
5311 * When we call scsi_done(), it's going to wake up anything sleeping on the
5312 * resources which were in use by the aborted commands, and we'll start to
5313 * get new commands.
5314 *
5315 * We can't let this happen until after we've re-initialized the driver
5316 * structures, and can't reinitialize those structures until after we've
5317 * dealt with their contents.
5318 *
5319 * So, we need to find all of the commands which were running, stick
5320 * them on a linked list of completed commands (we'll use the host_scribble
5321 * pointer), do our reinitialization, and then call the done function for
5322 * each command.
5323 */
5324 Scsi_Cmnd *nuke_list = NULL;
5325 struct Scsi_Host *host = cmd->device->host;
5326 struct NCR53c7x0_hostdata *hostdata =
5327 (struct NCR53c7x0_hostdata *) host->hostdata[0];
5328
5329 NCR53c7x0_local_setup(host);
5330 local_irq_save(flags);
5331 ncr_halt (host);
5332 print_lots (host);
5333 dump_events (host, 30);
5334 ncr_scsi_reset (host);
5335 for (tmp = nuke_list = return_outstanding_commands (host, 1 /* free */,
5336 0 /* issue */ ); tmp; tmp = (Scsi_Cmnd *) tmp->SCp.buffer)
5337 if (tmp == cmd) {
5338 found = 1;
5339 break;
5340 }
5341
5342 /*
5343 * If we didn't find the command which caused this reset in our running
5344 * list, then we've lost it. See that it terminates normally anyway.
5345 */
5346 if (!found) {
5347 c = (struct NCR53c7x0_cmd *) cmd->host_scribble;
5348 if (c) {
5349 cmd->host_scribble = NULL;
5350 c->next = hostdata->free;
5351 hostdata->free = c;
5352 } else
5353 printk ("scsi%d: lost command %ld\n", host->host_no, cmd->pid);
5354 cmd->SCp.buffer = (struct scatterlist *) nuke_list;
5355 nuke_list = cmd;
5356 }
5357
5358 NCR53c7x0_driver_init (host);
5359 hostdata->soft_reset (host);
5360 if (hostdata->resets == 0)
5361 disable(host);
5362 else if (hostdata->resets != -1)
5363 --hostdata->resets;
5364 local_irq_restore(flags);
5365 for (; nuke_list; nuke_list = tmp) {
5366 tmp = (Scsi_Cmnd *) nuke_list->SCp.buffer;
5367 nuke_list->result = DID_RESET << 16;
5368 nuke_list->scsi_done (nuke_list);
5369 }
5370 local_irq_restore(flags);
5371 return SCSI_RESET_SUCCESS;
5372}
5373
5374/*
5375 * The NCR SDMS bios follows Annex A of the SCSI-CAM draft, and
5376 * therefore shares the scsicam_bios_param function.
5377 */
5378
5379/*
5380 * Function : int insn_to_offset (Scsi_Cmnd *cmd, u32 *insn)
5381 *
5382 * Purpose : convert instructions stored at NCR pointer into data
5383 * pointer offset.
5384 *
5385 * Inputs : cmd - SCSI command; insn - pointer to instruction. Either current
5386 * DSP, or saved data pointer.
5387 *
5388 * Returns : offset on success, -1 on failure.
5389 */
5390
5391
5392static int
5393insn_to_offset (Scsi_Cmnd *cmd, u32 *insn) {
5394 struct NCR53c7x0_hostdata *hostdata =
5395 (struct NCR53c7x0_hostdata *) cmd->device->host->hostdata[0];
5396 struct NCR53c7x0_cmd *ncmd =
5397 (struct NCR53c7x0_cmd *) cmd->host_scribble;
5398 int offset = 0, buffers;
5399 struct scatterlist *segment;
5400 char *ptr;
5401 int found = 0;
5402
5403/*
5404 * With the current code implementation, if the insn is inside dynamically
5405 * generated code, the data pointer will be the instruction preceding
5406 * the next transfer segment.
5407 */
5408
5409 if (!check_address ((unsigned long) ncmd, sizeof (struct NCR53c7x0_cmd)) &&
5410 ((insn >= ncmd->data_transfer_start &&
5411 insn < ncmd->data_transfer_end) ||
5412 (insn >= ncmd->residual &&
5413 insn < (ncmd->residual +
5414 sizeof(ncmd->residual))))) {
5415 ptr = bus_to_virt(insn[3]);
5416
5417 if ((buffers = cmd->use_sg)) {
5418 for (offset = 0,
5419 segment = (struct scatterlist *) cmd->request_buffer;
5420 buffers && !((found = ((ptr >= (char *)page_address(segment->page)+segment->offset) &&
5421 (ptr < ((char *)page_address(segment->page)+segment->offset+segment->length)))));
5422 --buffers, offset += segment->length, ++segment)
5423#if 0
5424 printk("scsi%d: comparing 0x%p to 0x%p\n",
5425 cmd->device->host->host_no, saved, page_address(segment->page+segment->offset));
5426#else
5427 ;
5428#endif
5429 offset += ptr - ((char *)page_address(segment->page)+segment->offset);
5430 } else {
5431 found = 1;
5432 offset = ptr - (char *) (cmd->request_buffer);
5433 }
5434 } else if ((insn >= hostdata->script +
5435 hostdata->E_data_transfer / sizeof(u32)) &&
5436 (insn <= hostdata->script +
5437 hostdata->E_end_data_transfer / sizeof(u32))) {
5438 found = 1;
5439 offset = 0;
5440 }
5441 return found ? offset : -1;
5442}
5443
5444
5445
5446/*
5447 * Function : void print_progress (Scsi_Cmnd *cmd)
5448 *
5449 * Purpose : print the current location of the saved data pointer
5450 *
5451 * Inputs : cmd - command we are interested in
5452 *
5453 */
5454
5455static void
5456print_progress (Scsi_Cmnd *cmd) {
5457 NCR53c7x0_local_declare();
5458 struct NCR53c7x0_cmd *ncmd =
5459 (struct NCR53c7x0_cmd *) cmd->host_scribble;
5460 int offset, i;
5461 char *where;
5462 u32 *ptr;
5463 NCR53c7x0_local_setup (cmd->device->host);
5464
5465 if (check_address ((unsigned long) ncmd,sizeof (struct NCR53c7x0_cmd)) == 0)
5466 {
5467 printk("\nNCR53c7x0_cmd fields:\n");
5468 printk(" bounce.len=0x%x, addr=0x%0x, buf[]=0x%02x %02x %02x %02x\n",
5469 ncmd->bounce.len, ncmd->bounce.addr, ncmd->bounce.buf[0],
5470 ncmd->bounce.buf[1], ncmd->bounce.buf[2], ncmd->bounce.buf[3]);
5471 printk(" result=%04x, cdb[0]=0x%02x\n", ncmd->result, ncmd->cmnd[0]);
5472 }
5473
5474 for (i = 0; i < 2; ++i) {
5475 if (check_address ((unsigned long) ncmd,
5476 sizeof (struct NCR53c7x0_cmd)) == -1)
5477 continue;
5478 if (!i) {
5479 where = "saved";
5480 ptr = bus_to_virt(ncmd->saved_data_pointer);
5481 } else {
5482 where = "active";
5483 ptr = bus_to_virt (NCR53c7x0_read32 (DSP_REG) -
5484 NCR53c7x0_insn_size (NCR53c7x0_read8 (DCMD_REG)) *
5485 sizeof(u32));
5486 }
5487 offset = insn_to_offset (cmd, ptr);
5488
5489 if (offset != -1)
5490 printk ("scsi%d : %s data pointer at offset %d\n",
5491 cmd->device->host->host_no, where, offset);
5492 else {
5493 int size;
5494 printk ("scsi%d : can't determine %s data pointer offset\n",
5495 cmd->device->host->host_no, where);
5496 if (ncmd) {
5497 size = print_insn (cmd->device->host,
5498 bus_to_virt(ncmd->saved_data_pointer), "", 1);
5499 print_insn (cmd->device->host,
5500 bus_to_virt(ncmd->saved_data_pointer) + size * sizeof(u32),
5501 "", 1);
5502 }
5503 }
5504 }
5505}
5506
5507
5508static void
5509print_dsa (struct Scsi_Host *host, u32 *dsa, const char *prefix) {
5510 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
5511 host->hostdata[0];
5512 int i, len;
5513 char *ptr;
5514 Scsi_Cmnd *cmd;
5515
5516 if (check_address ((unsigned long) dsa, hostdata->dsa_end -
5517 hostdata->dsa_start) == -1) {
5518 printk("scsi%d : bad dsa virt 0x%p\n", host->host_no, dsa);
5519 return;
5520 }
5521 printk("%sscsi%d : dsa at phys 0x%lx (virt 0x%p)\n"
5522 " + %d : dsa_msgout length = %u, data = 0x%x (virt 0x%p)\n" ,
5523 prefix ? prefix : "",
5524 host->host_no, virt_to_bus (dsa), dsa, hostdata->dsa_msgout,
5525 dsa[hostdata->dsa_msgout / sizeof(u32)],
5526 dsa[hostdata->dsa_msgout / sizeof(u32) + 1],
5527 bus_to_virt (dsa[hostdata->dsa_msgout / sizeof(u32) + 1]));
5528
5529 /*
5530 * Only print messages if they're sane in length so we don't
5531 * blow the kernel printk buffer on something which won't buy us
5532 * anything.
5533 */
5534
5535 if (dsa[hostdata->dsa_msgout / sizeof(u32)] <
5536 sizeof (hostdata->free->select))
5537 for (i = dsa[hostdata->dsa_msgout / sizeof(u32)],
5538 ptr = bus_to_virt (dsa[hostdata->dsa_msgout / sizeof(u32) + 1]);
5539 i > 0 && !check_address ((unsigned long) ptr, 1);
5540 ptr += len, i -= len) {
5541 printk(" ");
5542 len = spi_print_msg(ptr);
5543 printk("\n");
5544 if (!len)
5545 break;
5546 }
5547
5548 printk(" + %d : select_indirect = 0x%x\n",
5549 hostdata->dsa_select, dsa[hostdata->dsa_select / sizeof(u32)]);
5550 cmd = (Scsi_Cmnd *) bus_to_virt(dsa[hostdata->dsa_cmnd / sizeof(u32)]);
5551 printk(" + %d : dsa_cmnd = 0x%x ", hostdata->dsa_cmnd,
5552 (u32) virt_to_bus(cmd));
5553 /* XXX Maybe we should access cmd->host_scribble->result here. RGH */
5554 if (cmd) {
5555 printk(" result = 0x%x, target = %d, lun = %d, cmd = ",
5556 cmd->result, cmd->device->id, cmd->device->lun);
5557 __scsi_print_command(cmd->cmnd);
5558 } else
5559 printk("\n");
5560 printk(" + %d : dsa_next = 0x%x\n", hostdata->dsa_next,
5561 dsa[hostdata->dsa_next / sizeof(u32)]);
5562 if (cmd) {
5563 printk("scsi%d target %d : sxfer_sanity = 0x%x, scntl3_sanity = 0x%x\n"
5564 " script : ",
5565 host->host_no, cmd->device->id,
5566 hostdata->sync[cmd->device->id].sxfer_sanity,
5567 hostdata->sync[cmd->device->id].scntl3_sanity);
5568 for (i = 0; i < (sizeof(hostdata->sync[cmd->device->id].script) / 4); ++i)
5569 printk ("0x%x ", hostdata->sync[cmd->device->id].script[i]);
5570 printk ("\n");
5571 print_progress (cmd);
5572 }
5573}
5574/*
5575 * Function : void print_queues (Scsi_Host *host)
5576 *
5577 * Purpose : print the contents of the NCR issue and reconnect queues
5578 *
5579 * Inputs : host - SCSI host we are interested in
5580 *
5581 */
5582
5583static void
5584print_queues (struct Scsi_Host *host) {
5585 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
5586 host->hostdata[0];
5587 u32 *dsa, *next_dsa;
5588 volatile u32 *ncrcurrent;
5589 int left;
5590 Scsi_Cmnd *cmd, *next_cmd;
5591 unsigned long flags;
5592
5593 printk ("scsi%d : issue queue\n", host->host_no);
5594
5595 for (left = host->can_queue, cmd = (Scsi_Cmnd *) hostdata->issue_queue;
5596 left >= 0 && cmd;
5597 cmd = next_cmd) {
5598 next_cmd = (Scsi_Cmnd *) cmd->SCp.ptr;
5599 local_irq_save(flags);
5600 if (cmd->host_scribble) {
5601 if (check_address ((unsigned long) (cmd->host_scribble),
5602 sizeof (cmd->host_scribble)) == -1)
5603 printk ("scsi%d: scsi pid %ld bad pointer to NCR53c7x0_cmd\n",
5604 host->host_no, cmd->pid);
5605 /* print_dsa does sanity check on address, no need to check */
5606 else
5607 print_dsa (host, ((struct NCR53c7x0_cmd *) cmd->host_scribble)
5608 -> dsa, "");
5609 } else
5610 printk ("scsi%d : scsi pid %ld for target %d lun %d has no NCR53c7x0_cmd\n",
5611 host->host_no, cmd->pid, cmd->device->id, cmd->device->lun);
5612 local_irq_restore(flags);
5613 }
5614
5615 if (left <= 0) {
5616 printk ("scsi%d : loop detected in issue queue\n",
5617 host->host_no);
5618 }
5619
5620 /*
5621 * Traverse the NCR reconnect and start DSA structures, printing out
5622 * each element until we hit the end or detect a loop. Currently,
5623 * the reconnect structure is a linked list; and the start structure
5624 * is an array. Eventually, the reconnect structure will become a
5625 * list as well, since this simplifies the code.
5626 */
5627
5628 printk ("scsi%d : schedule dsa array :\n", host->host_no);
5629 for (left = host->can_queue, ncrcurrent = hostdata->schedule;
5630 left > 0; ncrcurrent += 2, --left)
5631 if (ncrcurrent[0] != hostdata->NOP_insn)
5632/* FIXME : convert pointer to dsa_begin to pointer to dsa. */
5633 print_dsa (host, bus_to_virt (ncrcurrent[1] -
5634 (hostdata->E_dsa_code_begin -
5635 hostdata->E_dsa_code_template)), "");
5636 printk ("scsi%d : end schedule dsa array\n", host->host_no);
5637
5638 printk ("scsi%d : reconnect_dsa_head :\n", host->host_no);
5639
5640 for (left = host->can_queue,
5641 dsa = bus_to_virt (hostdata->reconnect_dsa_head);
5642 left >= 0 && dsa;
5643 dsa = next_dsa) {
5644 local_irq_save(flags);
5645 if (check_address ((unsigned long) dsa, sizeof(dsa)) == -1) {
5646 printk ("scsi%d: bad DSA pointer 0x%p", host->host_no,
5647 dsa);
5648 next_dsa = NULL;
5649 }
5650 else
5651 {
5652 next_dsa = bus_to_virt(dsa[hostdata->dsa_next / sizeof(u32)]);
5653 print_dsa (host, dsa, "");
5654 }
5655 local_irq_restore(flags);
5656 }
5657 printk ("scsi%d : end reconnect_dsa_head\n", host->host_no);
5658 if (left < 0)
5659 printk("scsi%d: possible loop in ncr reconnect list\n",
5660 host->host_no);
5661}
5662
5663static void
5664print_lots (struct Scsi_Host *host) {
5665 NCR53c7x0_local_declare();
5666 struct NCR53c7x0_hostdata *hostdata =
5667 (struct NCR53c7x0_hostdata *) host->hostdata[0];
5668 u32 *dsp_next, *dsp, *dsa, dbc_dcmd;
5669 unsigned char dcmd, sbcl;
5670 int i, size;
5671 NCR53c7x0_local_setup(host);
5672
5673 if ((dsp_next = bus_to_virt(NCR53c7x0_read32 (DSP_REG)))) {
5674 dbc_dcmd = NCR53c7x0_read32(DBC_REG);
5675 dcmd = (dbc_dcmd & 0xff000000) >> 24;
5676 dsp = dsp_next - NCR53c7x0_insn_size(dcmd);
5677 dsa = bus_to_virt(NCR53c7x0_read32(DSA_REG));
5678 sbcl = NCR53c7x0_read8 (SBCL_REG);
5679
5680 /*
5681 * For the 53c710, the following will report value 0 for SCNTL3
5682 * and STEST0 - we don't have these registers.
5683 */
5684 printk ("scsi%d : DCMD|DBC=0x%x, DNAD=0x%x (virt 0x%p)\n"
5685 " DSA=0x%lx (virt 0x%p)\n"
5686 " DSPS=0x%x, TEMP=0x%x (virt 0x%p), DMODE=0x%x\n"
5687 " SXFER=0x%x, SCNTL3=0x%x\n"
5688 " %s%s%sphase=%s, %d bytes in SCSI FIFO\n"
5689 " SCRATCH=0x%x, saved2_dsa=0x%0lx\n",
5690 host->host_no, dbc_dcmd, NCR53c7x0_read32(DNAD_REG),
5691 bus_to_virt(NCR53c7x0_read32(DNAD_REG)),
5692 virt_to_bus(dsa), dsa,
5693 NCR53c7x0_read32(DSPS_REG), NCR53c7x0_read32(TEMP_REG),
5694 bus_to_virt (NCR53c7x0_read32(TEMP_REG)),
5695 (int) NCR53c7x0_read8(hostdata->dmode),
5696 (int) NCR53c7x0_read8(SXFER_REG),
5697 ((hostdata->chip / 100) == 8) ?
5698 (int) NCR53c7x0_read8(SCNTL3_REG_800) : 0,
5699 (sbcl & SBCL_BSY) ? "BSY " : "",
5700 (sbcl & SBCL_SEL) ? "SEL " : "",
5701 (sbcl & SBCL_REQ) ? "REQ " : "",
5702 sstat2_to_phase(NCR53c7x0_read8 (((hostdata->chip / 100) == 8) ?
5703 SSTAT1_REG : SSTAT2_REG)),
5704 (NCR53c7x0_read8 ((hostdata->chip / 100) == 8 ?
5705 SSTAT1_REG : SSTAT2_REG) & SSTAT2_FF_MASK) >> SSTAT2_FF_SHIFT,
5706 ((hostdata->chip / 100) == 8) ? NCR53c7x0_read8 (STEST0_REG_800) :
5707 NCR53c7x0_read32(SCRATCHA_REG_800),
5708 hostdata->saved2_dsa);
5709 printk ("scsi%d : DSP 0x%lx (virt 0x%p) ->\n", host->host_no,
5710 virt_to_bus(dsp), dsp);
5711 for (i = 6; i > 0; --i, dsp += size)
5712 size = print_insn (host, dsp, "", 1);
5713 if (NCR53c7x0_read8 (SCNTL1_REG) & SCNTL1_CON) {
5714 if ((hostdata->chip / 100) == 8)
5715 printk ("scsi%d : connected (SDID=0x%x, SSID=0x%x)\n",
5716 host->host_no, NCR53c7x0_read8 (SDID_REG_800),
5717 NCR53c7x0_read8 (SSID_REG_800));
5718 else
5719 printk ("scsi%d : connected (SDID=0x%x)\n",
5720 host->host_no, NCR53c7x0_read8 (SDID_REG_700));
5721 print_dsa (host, dsa, "");
5722 }
5723
5724#if 1
5725 print_queues (host);
5726#endif
5727 }
5728}
5729
5730/*
5731 * Function : static int shutdown (struct Scsi_Host *host)
5732 *
5733 * Purpose : does a clean (we hope) shutdown of the NCR SCSI
5734 * chip. Use prior to dumping core, unloading the NCR driver,
5735 *
5736 * Returns : 0 on success
5737 */
5738static int
5739shutdown (struct Scsi_Host *host) {
5740 NCR53c7x0_local_declare();
5741 unsigned long flags;
5742 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
5743 host->hostdata[0];
5744 NCR53c7x0_local_setup(host);
5745 local_irq_save(flags);
5746/* Get in a state where we can reset the SCSI bus */
5747 ncr_halt (host);
5748 ncr_scsi_reset (host);
5749 hostdata->soft_reset(host);
5750
5751 disable (host);
5752 local_irq_restore(flags);
5753 return 0;
5754}
5755
5756/*
5757 * Function : void ncr_scsi_reset (struct Scsi_Host *host)
5758 *
5759 * Purpose : reset the SCSI bus.
5760 */
5761
5762static void
5763ncr_scsi_reset (struct Scsi_Host *host) {
5764 NCR53c7x0_local_declare();
5765 unsigned long flags;
5766 NCR53c7x0_local_setup(host);
5767 local_irq_save(flags);
5768 NCR53c7x0_write8(SCNTL1_REG, SCNTL1_RST);
5769 udelay(25); /* Minimum amount of time to assert RST */
5770 NCR53c7x0_write8(SCNTL1_REG, 0);
5771 local_irq_restore(flags);
5772}
5773
5774/*
5775 * Function : void hard_reset (struct Scsi_Host *host)
5776 *
5777 */
5778
5779static void
5780hard_reset (struct Scsi_Host *host) {
5781 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
5782 host->hostdata[0];
5783 unsigned long flags;
5784 local_irq_save(flags);
5785 ncr_scsi_reset(host);
5786 NCR53c7x0_driver_init (host);
5787 if (hostdata->soft_reset)
5788 hostdata->soft_reset (host);
5789 local_irq_restore(flags);
5790}
5791
5792
5793/*
5794 * Function : Scsi_Cmnd *return_outstanding_commands (struct Scsi_Host *host,
5795 * int free, int issue)
5796 *
5797 * Purpose : return a linked list (using the SCp.buffer field as next,
5798 * so we don't perturb hostdata. We don't use a field of the
5799 * NCR53c7x0_cmd structure since we may not have allocated one
5800 * for the command causing the reset.) of Scsi_Cmnd structures that
5801 * had propagated below the Linux issue queue level. If free is set,
5802 * free the NCR53c7x0_cmd structures which are associated with
5803 * the Scsi_Cmnd structures, and clean up any internal
5804 * NCR lists that the commands were on. If issue is set,
5805 * also return commands in the issue queue.
5806 *
5807 * Returns : linked list of commands
5808 *
5809 * NOTE : the caller should insure that the NCR chip is halted
5810 * if the free flag is set.
5811 */
5812
5813static Scsi_Cmnd *
5814return_outstanding_commands (struct Scsi_Host *host, int free, int issue) {
5815 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
5816 host->hostdata[0];
5817 struct NCR53c7x0_cmd *c;
5818 int i;
5819 u32 *ncrcurrent;
5820 Scsi_Cmnd *list = NULL, *tmp;
5821 for (c = (struct NCR53c7x0_cmd *) hostdata->running_list; c;
5822 c = (struct NCR53c7x0_cmd *) c->next) {
5823 if (c->cmd->SCp.buffer) {
5824 printk ("scsi%d : loop detected in running list!\n", host->host_no);
5825 break;
5826 } else {
5827 printk ("Duh? Bad things happening in the NCR driver\n");
5828 break;
5829 }
5830
5831 c->cmd->SCp.buffer = (struct scatterlist *) list;
5832 list = c->cmd;
5833 if (free) {
5834 c->next = hostdata->free;
5835 hostdata->free = c;
5836 }
5837 }
5838
5839 if (free) {
5840 for (i = 0, ncrcurrent = (u32 *) hostdata->schedule;
5841 i < host->can_queue; ++i, ncrcurrent += 2) {
5842 ncrcurrent[0] = hostdata->NOP_insn;
5843 ncrcurrent[1] = 0xdeadbeef;
5844 }
5845 hostdata->ncrcurrent = NULL;
5846 }
5847
5848 if (issue) {
5849 for (tmp = (Scsi_Cmnd *) hostdata->issue_queue; tmp; tmp = tmp->next) {
5850 if (tmp->SCp.buffer) {
5851 printk ("scsi%d : loop detected in issue queue!\n",
5852 host->host_no);
5853 break;
5854 }
5855 tmp->SCp.buffer = (struct scatterlist *) list;
5856 list = tmp;
5857 }
5858 if (free)
5859 hostdata->issue_queue = NULL;
5860
5861 }
5862 return list;
5863}
5864
5865/*
5866 * Function : static int disable (struct Scsi_Host *host)
5867 *
5868 * Purpose : disables the given NCR host, causing all commands
5869 * to return a driver error. Call this so we can unload the
5870 * module during development and try again. Eventually,
5871 * we should be able to find clean workarounds for these
5872 * problems.
5873 *
5874 * Inputs : host - hostadapter to twiddle
5875 *
5876 * Returns : 0 on success.
5877 */
5878
5879static int
5880disable (struct Scsi_Host *host) {
5881 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
5882 host->hostdata[0];
5883 unsigned long flags;
5884 Scsi_Cmnd *nuke_list, *tmp;
5885 local_irq_save(flags);
5886 if (hostdata->state != STATE_HALTED)
5887 ncr_halt (host);
5888 nuke_list = return_outstanding_commands (host, 1 /* free */, 1 /* issue */);
5889 hard_reset (host);
5890 hostdata->state = STATE_DISABLED;
5891 local_irq_restore(flags);
5892 printk ("scsi%d : nuking commands\n", host->host_no);
5893 for (; nuke_list; nuke_list = tmp) {
5894 tmp = (Scsi_Cmnd *) nuke_list->SCp.buffer;
5895 nuke_list->result = DID_ERROR << 16;
5896 nuke_list->scsi_done(nuke_list);
5897 }
5898 printk ("scsi%d : done. \n", host->host_no);
5899 printk (KERN_ALERT "scsi%d : disabled. Unload and reload\n",
5900 host->host_no);
5901 return 0;
5902}
5903
5904/*
5905 * Function : static int ncr_halt (struct Scsi_Host *host)
5906 *
5907 * Purpose : halts the SCSI SCRIPTS(tm) processor on the NCR chip
5908 *
5909 * Inputs : host - SCSI chip to halt
5910 *
5911 * Returns : 0 on success
5912 */
5913
5914static int
5915ncr_halt (struct Scsi_Host *host) {
5916 NCR53c7x0_local_declare();
5917 unsigned long flags;
5918 unsigned char istat, tmp;
5919 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
5920 host->hostdata[0];
5921 int stage;
5922 NCR53c7x0_local_setup(host);
5923
5924 local_irq_save(flags);
5925 /* Stage 0 : eat all interrupts
5926 Stage 1 : set ABORT
5927 Stage 2 : eat all but abort interrupts
5928 Stage 3 : eat all interrupts
5929 */
5930 for (stage = 0;;) {
5931 if (stage == 1) {
5932 NCR53c7x0_write8(hostdata->istat, ISTAT_ABRT);
5933 ++stage;
5934 }
5935 istat = NCR53c7x0_read8 (hostdata->istat);
5936 if (istat & ISTAT_SIP) {
5937 tmp = NCR53c7x0_read8(SSTAT0_REG);
5938 } else if (istat & ISTAT_DIP) {
5939 tmp = NCR53c7x0_read8(DSTAT_REG);
5940 if (stage == 2) {
5941 if (tmp & DSTAT_ABRT) {
5942 NCR53c7x0_write8(hostdata->istat, 0);
5943 ++stage;
5944 } else {
5945 printk(KERN_ALERT "scsi%d : could not halt NCR chip\n",
5946 host->host_no);
5947 disable (host);
5948 }
5949 }
5950 }
5951 if (!(istat & (ISTAT_SIP|ISTAT_DIP))) {
5952 if (stage == 0)
5953 ++stage;
5954 else if (stage == 3)
5955 break;
5956 }
5957 }
5958 hostdata->state = STATE_HALTED;
5959 local_irq_restore(flags);
5960#if 0
5961 print_lots (host);
5962#endif
5963 return 0;
5964}
5965
5966/*
5967 * Function: event_name (int event)
5968 *
5969 * Purpose: map event enum into user-readable strings.
5970 */
5971
5972static const char *
5973event_name (int event) {
5974 switch (event) {
5975 case EVENT_NONE: return "none";
5976 case EVENT_ISSUE_QUEUE: return "to issue queue";
5977 case EVENT_START_QUEUE: return "to start queue";
5978 case EVENT_SELECT: return "selected";
5979 case EVENT_DISCONNECT: return "disconnected";
5980 case EVENT_RESELECT: return "reselected";
5981 case EVENT_COMPLETE: return "completed";
5982 case EVENT_IDLE: return "idle";
5983 case EVENT_SELECT_FAILED: return "select failed";
5984 case EVENT_BEFORE_SELECT: return "before select";
5985 case EVENT_RESELECT_FAILED: return "reselect failed";
5986 default: return "unknown";
5987 }
5988}
5989
5990/*
5991 * Function : void dump_events (struct Scsi_Host *host, count)
5992 *
5993 * Purpose : print last count events which have occurred.
5994 */
5995static void
5996dump_events (struct Scsi_Host *host, int count) {
5997 struct NCR53c7x0_hostdata *hostdata = (struct NCR53c7x0_hostdata *)
5998 host->hostdata[0];
5999 struct NCR53c7x0_event event;
6000 int i;
6001 unsigned long flags;
6002 if (hostdata->events) {
6003 if (count > hostdata->event_size)
6004 count = hostdata->event_size;
6005 for (i = hostdata->event_index; count > 0;
6006 i = (i ? i - 1 : hostdata->event_size -1), --count) {
6007/*
6008 * By copying the event we're currently examining with interrupts
6009 * disabled, we can do multiple printk(), etc. operations and
6010 * still be guaranteed that they're happening on the same
6011 * event structure.
6012 */
6013 local_irq_save(flags);
6014#if 0
6015 event = hostdata->events[i];
6016#else
6017 memcpy ((void *) &event, (void *) &(hostdata->events[i]),
6018 sizeof(event));
6019#endif
6020
6021 local_irq_restore(flags);
6022 printk ("scsi%d : %s event %d at %ld secs %ld usecs target %d lun %d\n",
6023 host->host_no, event_name (event.event), count,
6024 (long) event.time.tv_sec, (long) event.time.tv_usec,
6025 event.target, event.lun);
6026 if (event.dsa)
6027 printk (" event for dsa 0x%lx (virt 0x%p)\n",
6028 virt_to_bus(event.dsa), event.dsa);
6029 if (event.pid != -1) {
6030 printk (" event for pid %ld ", event.pid);
6031 __scsi_print_command (event.cmnd);
6032 }
6033 }
6034 }
6035}
6036
6037/*
6038 * Function: check_address
6039 *
6040 * Purpose: Check to see if a possibly corrupt pointer will fault the
6041 * kernel.
6042 *
6043 * Inputs: addr - address; size - size of area
6044 *
6045 * Returns: 0 if area is OK, -1 on error.
6046 *
6047 * NOTES: should be implemented in terms of vverify on kernels
6048 * that have it.
6049 */
6050
6051static int
6052check_address (unsigned long addr, int size) {
6053 return (virt_to_phys((void *)addr) < PAGE_SIZE || virt_to_phys((void *)(addr + size)) > virt_to_phys(high_memory) ? -1 : 0);
6054}
6055
6056#ifdef MODULE
6057int
6058NCR53c7x0_release(struct Scsi_Host *host) {
6059 struct NCR53c7x0_hostdata *hostdata =
6060 (struct NCR53c7x0_hostdata *) host->hostdata[0];
6061 struct NCR53c7x0_cmd *cmd, *tmp;
6062 shutdown (host);
6063 if (host->irq != SCSI_IRQ_NONE)
6064 {
6065 int irq_count;
6066 struct Scsi_Host *tmp;
6067 for (irq_count = 0, tmp = first_host; tmp; tmp = tmp->next)
6068 if (tmp->hostt == the_template && tmp->irq == host->irq)
6069 ++irq_count;
6070 if (irq_count == 1)
6071 free_irq(host->irq, NULL);
6072 }
6073 if (host->dma_channel != DMA_NONE)
6074 free_dma(host->dma_channel);
6075 if (host->io_port)
6076 release_region(host->io_port, host->n_io_port);
6077
6078 for (cmd = (struct NCR53c7x0_cmd *) hostdata->free; cmd; cmd = tmp,
6079 --hostdata->num_cmds) {
6080 tmp = (struct NCR53c7x0_cmd *) cmd->next;
6081 /*
6082 * If we're going to loop, try to stop it to get a more accurate
6083 * count of the leaked commands.
6084 */
6085 cmd->next = NULL;
6086 if (cmd->free)
6087 cmd->free ((void *) cmd->real, cmd->size);
6088 }
6089 if (hostdata->num_cmds)
6090 printk ("scsi%d : leaked %d NCR53c7x0_cmd structures\n",
6091 host->host_no, hostdata->num_cmds);
6092
6093 vfree(hostdata->events);
6094
6095 /* XXX This assumes default cache mode to be IOMAP_FULL_CACHING, which
6096 * XXX may be invalid (CONFIG_060_WRITETHROUGH)
6097 */
6098 kernel_set_cachemode((void *)hostdata, 8192, IOMAP_FULL_CACHING);
6099 free_pages ((u32)hostdata, 1);
6100 return 1;
6101}
6102#endif /* def MODULE */
diff --git a/drivers/scsi/53c7xx.h b/drivers/scsi/53c7xx.h
deleted file mode 100644
index 218f3b901537..000000000000
--- a/drivers/scsi/53c7xx.h
+++ /dev/null
@@ -1,1608 +0,0 @@
1/*
2 * 53c710 driver. Modified from Drew Eckhardts driver
3 * for 53c810 by Richard Hirst [richard@sleepie.demon.co.uk]
4 *
5 * I have left the code for the 53c8xx family in here, because it didn't
6 * seem worth removing it. The possibility of IO_MAPPED chips rather
7 * than MEMORY_MAPPED remains, in case someone wants to add support for
8 * 53c710 chips on Intel PCs (some older machines have them on the
9 * motherboard).
10 *
11 * NOTE THERE MAY BE PROBLEMS WITH CASTS IN read8 AND Co.
12 */
13
14/*
15 * NCR 53c{7,8}0x0 driver, header file
16 *
17 * Sponsored by
18 * iX Multiuser Multitasking Magazine
19 * Hannover, Germany
20 * hm@ix.de
21 *
22 * Copyright 1993, 1994, 1995 Drew Eckhardt
23 * Visionary Computing
24 * (Unix and Linux consulting and custom programming)
25 * drew@PoohSticks.ORG
26 * +1 (303) 786-7975
27 *
28 * TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
29 *
30 * PRE-ALPHA
31 *
32 * For more information, please consult
33 *
34 * NCR 53C700/53C700-66
35 * SCSI I/O Processor
36 * Data Manual
37 *
38 * NCR 53C810
39 * PCI-SCSI I/O Processor
40 * Data Manual
41 *
42 * NCR Microelectronics
43 * 1635 Aeroplaza Drive
44 * Colorado Springs, CO 80916
45 * +1 (719) 578-3400
46 *
47 * Toll free literature number
48 * +1 (800) 334-5454
49 *
50 */
51
52#ifndef NCR53c710_H
53#define NCR53c710_H
54
55#ifndef HOSTS_C
56
57/* SCSI control 0 rw, default = 0xc0 */
58#define SCNTL0_REG 0x00
59#define SCNTL0_ARB1 0x80 /* 0 0 = simple arbitration */
60#define SCNTL0_ARB2 0x40 /* 1 1 = full arbitration */
61#define SCNTL0_STRT 0x20 /* Start Sequence */
62#define SCNTL0_WATN 0x10 /* Select with ATN */
63#define SCNTL0_EPC 0x08 /* Enable parity checking */
64/* Bit 2 is reserved on 800 series chips */
65#define SCNTL0_EPG_700 0x04 /* Enable parity generation */
66#define SCNTL0_AAP 0x02 /* ATN/ on parity error */
67#define SCNTL0_TRG 0x01 /* Target mode */
68
69/* SCSI control 1 rw, default = 0x00 */
70
71#define SCNTL1_REG 0x01
72#define SCNTL1_EXC 0x80 /* Extra Clock Cycle of Data setup */
73#define SCNTL1_ADB 0x40 /* contents of SODL on bus */
74#define SCNTL1_ESR_700 0x20 /* Enable SIOP response to selection
75 and reselection */
76#define SCNTL1_DHP_800 0x20 /* Disable halt on parity error or ATN
77 target mode only */
78#define SCNTL1_CON 0x10 /* Connected */
79#define SCNTL1_RST 0x08 /* SCSI RST/ */
80#define SCNTL1_AESP 0x04 /* Force bad parity */
81#define SCNTL1_SND_700 0x02 /* Start SCSI send */
82#define SCNTL1_IARB_800 0x02 /* Immediate Arbitration, start
83 arbitration immediately after
84 busfree is detected */
85#define SCNTL1_RCV_700 0x01 /* Start SCSI receive */
86#define SCNTL1_SST_800 0x01 /* Start SCSI transfer */
87
88/* SCSI control 2 rw, */
89
90#define SCNTL2_REG_800 0x02
91#define SCNTL2_800_SDU 0x80 /* SCSI disconnect unexpected */
92
93/* SCSI control 3 rw */
94
95#define SCNTL3_REG_800 0x03
96#define SCNTL3_800_SCF_SHIFT 4
97#define SCNTL3_800_SCF_MASK 0x70
98#define SCNTL3_800_SCF2 0x40 /* Synchronous divisor */
99#define SCNTL3_800_SCF1 0x20 /* 0x00 = SCLK/3 */
100#define SCNTL3_800_SCF0 0x10 /* 0x10 = SCLK/1 */
101 /* 0x20 = SCLK/1.5
102 0x30 = SCLK/2
103 0x40 = SCLK/3 */
104
105#define SCNTL3_800_CCF_SHIFT 0
106#define SCNTL3_800_CCF_MASK 0x07
107#define SCNTL3_800_CCF2 0x04 /* 0x00 50.01 to 66 */
108#define SCNTL3_800_CCF1 0x02 /* 0x01 16.67 to 25 */
109#define SCNTL3_800_CCF0 0x01 /* 0x02 25.01 - 37.5
110 0x03 37.51 - 50
111 0x04 50.01 - 66 */
112
113/*
114 * SCSI destination ID rw - the appropriate bit is set for the selected
115 * target ID. This is written by the SCSI SCRIPTS processor.
116 * default = 0x00
117 */
118#define SDID_REG_700 0x02
119#define SDID_REG_800 0x06
120
121#define GP_REG_800 0x07 /* General purpose IO */
122#define GP_800_IO1 0x02
123#define GP_800_IO2 0x01
124
125/* SCSI interrupt enable rw, default = 0x00 */
126#define SIEN_REG_700 0x03
127#define SIEN0_REG_800 0x40
128#define SIEN_MA 0x80 /* Phase mismatch (ini) or ATN (tgt) */
129#define SIEN_FC 0x40 /* Function complete */
130#define SIEN_700_STO 0x20 /* Selection or reselection timeout */
131#define SIEN_800_SEL 0x20 /* Selected */
132#define SIEN_700_SEL 0x10 /* Selected or reselected */
133#define SIEN_800_RESEL 0x10 /* Reselected */
134#define SIEN_SGE 0x08 /* SCSI gross error */
135#define SIEN_UDC 0x04 /* Unexpected disconnect */
136#define SIEN_RST 0x02 /* SCSI RST/ received */
137#define SIEN_PAR 0x01 /* Parity error */
138
139/*
140 * SCSI chip ID rw
141 * NCR53c700 :
142 * When arbitrating, the highest bit is used, when reselection or selection
143 * occurs, the chip responds to all IDs for which a bit is set.
144 * default = 0x00
145 * NCR53c810 :
146 * Uses bit mapping
147 */
148#define SCID_REG 0x04
149/* Bit 7 is reserved on 800 series chips */
150#define SCID_800_RRE 0x40 /* Enable response to reselection */
151#define SCID_800_SRE 0x20 /* Enable response to selection */
152/* Bits four and three are reserved on 800 series chips */
153#define SCID_800_ENC_MASK 0x07 /* Encoded SCSI ID */
154
155/* SCSI transfer rw, default = 0x00 */
156#define SXFER_REG 0x05
157#define SXFER_DHP 0x80 /* Disable halt on parity */
158
159#define SXFER_TP2 0x40 /* Transfer period msb */
160#define SXFER_TP1 0x20
161#define SXFER_TP0 0x10 /* lsb */
162#define SXFER_TP_MASK 0x70
163/* FIXME : SXFER_TP_SHIFT == 5 is right for '8xx chips */
164#define SXFER_TP_SHIFT 5
165#define SXFER_TP_4 0x00 /* Divisors */
166#define SXFER_TP_5 0x10<<1
167#define SXFER_TP_6 0x20<<1
168#define SXFER_TP_7 0x30<<1
169#define SXFER_TP_8 0x40<<1
170#define SXFER_TP_9 0x50<<1
171#define SXFER_TP_10 0x60<<1
172#define SXFER_TP_11 0x70<<1
173
174#define SXFER_MO3 0x08 /* Max offset msb */
175#define SXFER_MO2 0x04
176#define SXFER_MO1 0x02
177#define SXFER_MO0 0x01 /* lsb */
178#define SXFER_MO_MASK 0x0f
179#define SXFER_MO_SHIFT 0
180
181/*
182 * SCSI output data latch rw
183 * The contents of this register are driven onto the SCSI bus when
184 * the Assert Data Bus bit of the SCNTL1 register is set and
185 * the CD, IO, and MSG bits of the SOCL register match the SCSI phase
186 */
187#define SODL_REG_700 0x06
188#define SODL_REG_800 0x54
189
190
191/*
192 * SCSI output control latch rw, default = 0
193 * Note that when the chip is being manually programmed as an initiator,
194 * the MSG, CD, and IO bits must be set correctly for the phase the target
195 * is driving the bus in. Otherwise no data transfer will occur due to
196 * phase mismatch.
197 */
198
199#define SOCL_REG 0x07
200#define SOCL_REQ 0x80 /* REQ */
201#define SOCL_ACK 0x40 /* ACK */
202#define SOCL_BSY 0x20 /* BSY */
203#define SOCL_SEL 0x10 /* SEL */
204#define SOCL_ATN 0x08 /* ATN */
205#define SOCL_MSG 0x04 /* MSG */
206#define SOCL_CD 0x02 /* C/D */
207#define SOCL_IO 0x01 /* I/O */
208
209/*
210 * SCSI first byte received latch ro
211 * This register contains the first byte received during a block MOVE
212 * SCSI SCRIPTS instruction, including
213 *
214 * Initiator mode Target mode
215 * Message in Command
216 * Status Message out
217 * Data in Data out
218 *
219 * It also contains the selecting or reselecting device's ID and our
220 * ID.
221 *
222 * Note that this is the register the various IF conditionals can
223 * operate on.
224 */
225#define SFBR_REG 0x08
226
227/*
228 * SCSI input data latch ro
229 * In initiator mode, data is latched into this register on the rising
230 * edge of REQ/. In target mode, data is latched on the rising edge of
231 * ACK/
232 */
233#define SIDL_REG_700 0x09
234#define SIDL_REG_800 0x50
235
236/*
237 * SCSI bus data lines ro
238 * This register reflects the instantaneous status of the SCSI data
239 * lines. Note that SCNTL0 must be set to disable parity checking,
240 * otherwise reading this register will latch new parity.
241 */
242#define SBDL_REG_700 0x0a
243#define SBDL_REG_800 0x58
244
245#define SSID_REG_800 0x0a
246#define SSID_800_VAL 0x80 /* Exactly two bits asserted at sel */
247#define SSID_800_ENCID_MASK 0x07 /* Device which performed operation */
248
249
250/*
251 * SCSI bus control lines rw,
252 * instantaneous readout of control lines
253 */
254#define SBCL_REG 0x0b
255#define SBCL_REQ 0x80 /* REQ ro */
256#define SBCL_ACK 0x40 /* ACK ro */
257#define SBCL_BSY 0x20 /* BSY ro */
258#define SBCL_SEL 0x10 /* SEL ro */
259#define SBCL_ATN 0x08 /* ATN ro */
260#define SBCL_MSG 0x04 /* MSG ro */
261#define SBCL_CD 0x02 /* C/D ro */
262#define SBCL_IO 0x01 /* I/O ro */
263#define SBCL_PHASE_CMDOUT SBCL_CD
264#define SBCL_PHASE_DATAIN SBCL_IO
265#define SBCL_PHASE_DATAOUT 0
266#define SBCL_PHASE_MSGIN (SBCL_CD|SBCL_IO|SBCL_MSG)
267#define SBCL_PHASE_MSGOUT (SBCL_CD|SBCL_MSG)
268#define SBCL_PHASE_STATIN (SBCL_CD|SBCL_IO)
269#define SBCL_PHASE_MASK (SBCL_CD|SBCL_IO|SBCL_MSG)
270/*
271 * Synchronous SCSI Clock Control bits
272 * 0 - set by DCNTL
273 * 1 - SCLK / 1.0
274 * 2 - SCLK / 1.5
275 * 3 - SCLK / 2.0
276 */
277#define SBCL_SSCF1 0x02 /* wo, -66 only */
278#define SBCL_SSCF0 0x01 /* wo, -66 only */
279#define SBCL_SSCF_MASK 0x03
280
281/*
282 * XXX note : when reading the DSTAT and STAT registers to clear interrupts,
283 * insure that 10 clocks elapse between the two
284 */
285/* DMA status ro */
286#define DSTAT_REG 0x0c
287#define DSTAT_DFE 0x80 /* DMA FIFO empty */
288#define DSTAT_800_MDPE 0x40 /* Master Data Parity Error */
289#define DSTAT_800_BF 0x20 /* Bus Fault */
290#define DSTAT_ABRT 0x10 /* Aborted - set on error */
291#define DSTAT_SSI 0x08 /* SCRIPTS single step interrupt */
292#define DSTAT_SIR 0x04 /* SCRIPTS interrupt received -
293 set when INT instruction is
294 executed */
295#define DSTAT_WTD 0x02 /* Watchdog timeout detected */
296#define DSTAT_OPC 0x01 /* Illegal instruction */
297#define DSTAT_800_IID 0x01 /* Same thing, different name */
298
299
300/* NCR53c800 moves this stuff into SIST0 */
301#define SSTAT0_REG 0x0d /* SCSI status 0 ro */
302#define SIST0_REG_800 0x42
303#define SSTAT0_MA 0x80 /* ini : phase mismatch,
304 * tgt : ATN/ asserted
305 */
306#define SSTAT0_CMP 0x40 /* function complete */
307#define SSTAT0_700_STO 0x20 /* Selection or reselection timeout */
308#define SIST0_800_SEL 0x20 /* Selected */
309#define SSTAT0_700_SEL 0x10 /* Selected or reselected */
310#define SIST0_800_RSL 0x10 /* Reselected */
311#define SSTAT0_SGE 0x08 /* SCSI gross error */
312#define SSTAT0_UDC 0x04 /* Unexpected disconnect */
313#define SSTAT0_RST 0x02 /* SCSI RST/ received */
314#define SSTAT0_PAR 0x01 /* Parity error */
315
316/* And uses SSTAT0 for what was SSTAT1 */
317
318#define SSTAT1_REG 0x0e /* SCSI status 1 ro */
319#define SSTAT1_ILF 0x80 /* SIDL full */
320#define SSTAT1_ORF 0x40 /* SODR full */
321#define SSTAT1_OLF 0x20 /* SODL full */
322#define SSTAT1_AIP 0x10 /* Arbitration in progress */
323#define SSTAT1_LOA 0x08 /* Lost arbitration */
324#define SSTAT1_WOA 0x04 /* Won arbitration */
325#define SSTAT1_RST 0x02 /* Instant readout of RST/ */
326#define SSTAT1_SDP 0x01 /* Instant readout of SDP/ */
327
328#define SSTAT2_REG 0x0f /* SCSI status 2 ro */
329#define SSTAT2_FF3 0x80 /* number of bytes in synchronous */
330#define SSTAT2_FF2 0x40 /* data FIFO */
331#define SSTAT2_FF1 0x20
332#define SSTAT2_FF0 0x10
333#define SSTAT2_FF_MASK 0xf0
334#define SSTAT2_FF_SHIFT 4
335
336/*
337 * Latched signals, latched on the leading edge of REQ/ for initiators,
338 * ACK/ for targets.
339 */
340#define SSTAT2_SDP 0x08 /* SDP */
341#define SSTAT2_MSG 0x04 /* MSG */
342#define SSTAT2_CD 0x02 /* C/D */
343#define SSTAT2_IO 0x01 /* I/O */
344#define SSTAT2_PHASE_CMDOUT SSTAT2_CD
345#define SSTAT2_PHASE_DATAIN SSTAT2_IO
346#define SSTAT2_PHASE_DATAOUT 0
347#define SSTAT2_PHASE_MSGIN (SSTAT2_CD|SSTAT2_IO|SSTAT2_MSG)
348#define SSTAT2_PHASE_MSGOUT (SSTAT2_CD|SSTAT2_MSG)
349#define SSTAT2_PHASE_STATIN (SSTAT2_CD|SSTAT2_IO)
350#define SSTAT2_PHASE_MASK (SSTAT2_CD|SSTAT2_IO|SSTAT2_MSG)
351
352
353/* NCR53c700-66 only */
354#define SCRATCHA_REG_00 0x10 /* through 0x13 Scratch A rw */
355/* NCR53c710 and higher */
356#define DSA_REG 0x10 /* DATA structure address */
357
358#define CTEST0_REG_700 0x14 /* Chip test 0 ro */
359#define CTEST0_REG_800 0x18 /* Chip test 0 rw, general purpose */
360/* 0x80 - 0x04 are reserved */
361#define CTEST0_700_RTRG 0x02 /* Real target mode */
362#define CTEST0_700_DDIR 0x01 /* Data direction, 1 =
363 * SCSI bus to host, 0 =
364 * host to SCSI.
365 */
366
367#define CTEST1_REG_700 0x15 /* Chip test 1 ro */
368#define CTEST1_REG_800 0x19 /* Chip test 1 ro */
369#define CTEST1_FMT3 0x80 /* Identify which byte lanes are empty */
370#define CTEST1_FMT2 0x40 /* in the DMA FIFO */
371#define CTEST1_FMT1 0x20
372#define CTEST1_FMT0 0x10
373
374#define CTEST1_FFL3 0x08 /* Identify which bytes lanes are full */
375#define CTEST1_FFL2 0x04 /* in the DMA FIFO */
376#define CTEST1_FFL1 0x02
377#define CTEST1_FFL0 0x01
378
379#define CTEST2_REG_700 0x16 /* Chip test 2 ro */
380#define CTEST2_REG_800 0x1a /* Chip test 2 ro */
381
382#define CTEST2_800_DDIR 0x80 /* 1 = SCSI->host */
383#define CTEST2_800_SIGP 0x40 /* A copy of SIGP in ISTAT.
384 Reading this register clears */
385#define CTEST2_800_CIO 0x20 /* Configured as IO */.
386#define CTEST2_800_CM 0x10 /* Configured as memory */
387
388/* 0x80 - 0x40 are reserved on 700 series chips */
389#define CTEST2_700_SOFF 0x20 /* SCSI Offset Compare,
390 * As an initiator, this bit is
391 * one when the synchronous offset
392 * is zero, as a target this bit
393 * is one when the synchronous
394 * offset is at the maximum
395 * defined in SXFER
396 */
397#define CTEST2_700_SFP 0x10 /* SCSI FIFO parity bit,
398 * reading CTEST3 unloads a byte
399 * from the FIFO and sets this
400 */
401#define CTEST2_700_DFP 0x08 /* DMA FIFO parity bit,
402 * reading CTEST6 unloads a byte
403 * from the FIFO and sets this
404 */
405#define CTEST2_TEOP 0x04 /* SCSI true end of process,
406 * indicates a totally finished
407 * transfer
408 */
409#define CTEST2_DREQ 0x02 /* Data request signal */
410/* 0x01 is reserved on 700 series chips */
411#define CTEST2_800_DACK 0x01
412
413/*
414 * Chip test 3 ro
415 * Unloads the bottom byte of the eight deep SCSI synchronous FIFO,
416 * check SSTAT2 FIFO full bits to determine size. Note that a GROSS
417 * error results if a read is attempted on this register. Also note
418 * that 16 and 32 bit reads of this register will cause corruption.
419 */
420#define CTEST3_REG_700 0x17
421/* Chip test 3 rw */
422#define CTEST3_REG_800 0x1b
423#define CTEST3_800_V3 0x80 /* Chip revision */
424#define CTEST3_800_V2 0x40
425#define CTEST3_800_V1 0x20
426#define CTEST3_800_V0 0x10
427#define CTEST3_800_FLF 0x08 /* Flush DMA FIFO */
428#define CTEST3_800_CLF 0x04 /* Clear DMA FIFO */
429#define CTEST3_800_FM 0x02 /* Fetch mode pin */
430/* bit 0 is reserved on 800 series chips */
431
432#define CTEST4_REG_700 0x18 /* Chip test 4 rw */
433#define CTEST4_REG_800 0x21 /* Chip test 4 rw */
434/* 0x80 is reserved on 700 series chips */
435#define CTEST4_800_BDIS 0x80 /* Burst mode disable */
436#define CTEST4_ZMOD 0x40 /* High impedance mode */
437#define CTEST4_SZM 0x20 /* SCSI bus high impedance */
438#define CTEST4_700_SLBE 0x10 /* SCSI loopback enabled */
439#define CTEST4_800_SRTM 0x10 /* Shadow Register Test Mode */
440#define CTEST4_700_SFWR 0x08 /* SCSI FIFO write enable,
441 * redirects writes from SODL
442 * to the SCSI FIFO.
443 */
444#define CTEST4_800_MPEE 0x08 /* Enable parity checking
445 during master cycles on PCI
446 bus */
447
448/*
449 * These bits send the contents of the CTEST6 register to the appropriate
450 * byte lane of the 32 bit DMA FIFO. Normal operation is zero, otherwise
451 * the high bit means the low two bits select the byte lane.
452 */
453#define CTEST4_FBL2 0x04
454#define CTEST4_FBL1 0x02
455#define CTEST4_FBL0 0x01
456#define CTEST4_FBL_MASK 0x07
457#define CTEST4_FBL_0 0x04 /* Select DMA FIFO byte lane 0 */
458#define CTEST4_FBL_1 0x05 /* Select DMA FIFO byte lane 1 */
459#define CTEST4_FBL_2 0x06 /* Select DMA FIFO byte lane 2 */
460#define CTEST4_FBL_3 0x07 /* Select DMA FIFO byte lane 3 */
461#define CTEST4_800_SAVE (CTEST4_800_BDIS)
462
463
464#define CTEST5_REG_700 0x19 /* Chip test 5 rw */
465#define CTEST5_REG_800 0x22 /* Chip test 5 rw */
466/*
467 * Clock Address Incrementor. When set, it increments the
468 * DNAD register to the next bus size boundary. It automatically
469 * resets itself when the operation is complete.
470 */
471#define CTEST5_ADCK 0x80
472/*
473 * Clock Byte Counter. When set, it decrements the DBC register to
474 * the next bus size boundary.
475 */
476#define CTEST5_BBCK 0x40
477/*
478 * Reset SCSI Offset. Setting this bit to 1 clears the current offset
479 * pointer in the SCSI synchronous offset counter (SSTAT). This bit
480 * is set to 1 if a SCSI Gross Error Condition occurs. The offset should
481 * be cleared when a synchronous transfer fails. When written, it is
482 * automatically cleared after the SCSI synchronous offset counter is
483 * reset.
484 */
485/* Bit 5 is reserved on 800 series chips */
486#define CTEST5_700_ROFF 0x20
487/*
488 * Master Control for Set or Reset pulses. When 1, causes the low
489 * four bits of register to set when set, 0 causes the low bits to
490 * clear when set.
491 */
492#define CTEST5_MASR 0x10
493#define CTEST5_DDIR 0x08 /* DMA direction */
494/*
495 * Bits 2-0 are reserved on 800 series chips
496 */
497#define CTEST5_700_EOP 0x04 /* End of process */
498#define CTEST5_700_DREQ 0x02 /* Data request */
499#define CTEST5_700_DACK 0x01 /* Data acknowledge */
500
501/*
502 * Chip test 6 rw - writing to this register writes to the byte
503 * lane in the DMA FIFO as determined by the FBL bits in the CTEST4
504 * register.
505 */
506#define CTEST6_REG_700 0x1a
507#define CTEST6_REG_800 0x23
508
509#define CTEST7_REG 0x1b /* Chip test 7 rw */
510/* 0x80 - 0x40 are reserved on NCR53c700 and NCR53c700-66 chips */
511#define CTEST7_10_CDIS 0x80 /* Cache burst disable */
512#define CTEST7_10_SC1 0x40 /* Snoop control bits */
513#define CTEST7_10_SC0 0x20
514#define CTEST7_10_SC_MASK 0x60
515/* 0x20 is reserved on the NCR53c700 */
516#define CTEST7_0060_FM 0x20 /* Fetch mode */
517#define CTEST7_STD 0x10 /* Selection timeout disable */
518#define CTEST7_DFP 0x08 /* DMA FIFO parity bit for CTEST6 */
519#define CTEST7_EVP 0x04 /* 1 = host bus even parity, 0 = odd */
520#define CTEST7_10_TT1 0x02 /* Transfer type */
521#define CTEST7_00_DC 0x02 /* Set to drive DC low during instruction
522 fetch */
523#define CTEST7_DIFF 0x01 /* Differential mode */
524
525#define CTEST7_SAVE ( CTEST7_EVP | CTEST7_DIFF )
526
527
528#define TEMP_REG 0x1c /* through 0x1f Temporary stack rw */
529
530#define DFIFO_REG 0x20 /* DMA FIFO rw */
531/*
532 * 0x80 is reserved on the NCR53c710, the CLF and FLF bits have been
533 * moved into the CTEST8 register.
534 */
535#define DFIFO_00_FLF 0x80 /* Flush DMA FIFO to memory */
536#define DFIFO_00_CLF 0x40 /* Clear DMA and SCSI FIFOs */
537#define DFIFO_BO6 0x40
538#define DFIFO_BO5 0x20
539#define DFIFO_BO4 0x10
540#define DFIFO_BO3 0x08
541#define DFIFO_BO2 0x04
542#define DFIFO_BO1 0x02
543#define DFIFO_BO0 0x01
544#define DFIFO_10_BO_MASK 0x7f /* 7 bit counter */
545#define DFIFO_00_BO_MASK 0x3f /* 6 bit counter */
546
547/*
548 * Interrupt status rw
549 * Note that this is the only register which can be read while SCSI
550 * SCRIPTS are being executed.
551 */
552#define ISTAT_REG_700 0x21
553#define ISTAT_REG_800 0x14
554#define ISTAT_ABRT 0x80 /* Software abort, write
555 *1 to abort, wait for interrupt. */
556/* 0x40 and 0x20 are reserved on NCR53c700 and NCR53c700-66 chips */
557#define ISTAT_10_SRST 0x40 /* software reset */
558#define ISTAT_10_SIGP 0x20 /* signal script */
559/* 0x10 is reserved on NCR53c700 series chips */
560#define ISTAT_800_SEM 0x10 /* semaphore */
561#define ISTAT_CON 0x08 /* 1 when connected */
562#define ISTAT_800_INTF 0x04 /* Interrupt on the fly */
563#define ISTAT_700_PRE 0x04 /* Pointer register empty.
564 * Set to 1 when DSPS and DSP
565 * registers are empty in pipeline
566 * mode, always set otherwise.
567 */
568#define ISTAT_SIP 0x02 /* SCSI interrupt pending from
569 * SCSI portion of SIOP see
570 * SSTAT0
571 */
572#define ISTAT_DIP 0x01 /* DMA interrupt pending
573 * see DSTAT
574 */
575
576/* NCR53c700-66 and NCR53c710 only */
577#define CTEST8_REG 0x22 /* Chip test 8 rw */
578#define CTEST8_0066_EAS 0x80 /* Enable alternate SCSI clock,
579 * ie read from SCLK/ rather than CLK/
580 */
581#define CTEST8_0066_EFM 0x40 /* Enable fetch and master outputs */
582#define CTEST8_0066_GRP 0x20 /* Generate Receive Parity for
583 * pass through. This insures that
584 * bad parity won't reach the host
585 * bus.
586 */
587#define CTEST8_0066_TE 0x10 /* TolerANT enable. Enable
588 * active negation, should only
589 * be used for slow SCSI
590 * non-differential.
591 */
592#define CTEST8_0066_HSC 0x08 /* Halt SCSI clock */
593#define CTEST8_0066_SRA 0x04 /* Shorten REQ/ACK filtering,
594 * must be set for fast SCSI-II
595 * speeds.
596 */
597#define CTEST8_0066_DAS 0x02 /* Disable automatic target/initiator
598 * switching.
599 */
600#define CTEST8_0066_LDE 0x01 /* Last disconnect enable.
601 * The status of pending
602 * disconnect is maintained by
603 * the core, eliminating
604 * the possibility of missing a
605 * selection or reselection
606 * while waiting to fetch a
607 * WAIT DISCONNECT opcode.
608 */
609
610#define CTEST8_10_V3 0x80 /* Chip revision */
611#define CTEST8_10_V2 0x40
612#define CTEST8_10_V1 0x20
613#define CTEST8_10_V0 0x10
614#define CTEST8_10_V_MASK 0xf0
615#define CTEST8_10_FLF 0x08 /* Flush FIFOs */
616#define CTEST8_10_CLF 0x04 /* Clear FIFOs */
617#define CTEST8_10_FM 0x02 /* Fetch pin mode */
618#define CTEST8_10_SM 0x01 /* Snoop pin mode */
619
620
621/*
622 * The CTEST9 register may be used to differentiate between a
623 * NCR53c700 and a NCR53c710.
624 *
625 * Write 0xff to this register.
626 * Read it.
627 * If the contents are 0xff, it is a NCR53c700
628 * If the contents are 0x00, it is a NCR53c700-66 first revision
629 * If the contents are some other value, it is some other NCR53c700-66
630 */
631#define CTEST9_REG_00 0x23 /* Chip test 9 ro */
632#define LCRC_REG_10 0x23
633
634/*
635 * 0x24 through 0x27 are the DMA byte counter register. Instructions
636 * write their high 8 bits into the DCMD register, the low 24 bits into
637 * the DBC register.
638 *
639 * Function is dependent on the command type being executed.
640 */
641
642
643#define DBC_REG 0x24
644/*
645 * For Block Move Instructions, DBC is a 24 bit quantity representing
646 * the number of bytes to transfer.
647 * For Transfer Control Instructions, DBC is bit fielded as follows :
648 */
649/* Bits 20 - 23 should be clear */
650#define DBC_TCI_TRUE (1 << 19) /* Jump when true */
651#define DBC_TCI_COMPARE_DATA (1 << 18) /* Compare data */
652#define DBC_TCI_COMPARE_PHASE (1 << 17) /* Compare phase with DCMD field */
653#define DBC_TCI_WAIT_FOR_VALID (1 << 16) /* Wait for REQ */
654/* Bits 8 - 15 are reserved on some implementations ? */
655#define DBC_TCI_MASK_MASK 0xff00 /* Mask for data compare */
656#define DBC_TCI_MASK_SHIFT 8
657#define DBC_TCI_DATA_MASK 0xff /* Data to be compared */
658#define DBC_TCI_DATA_SHIFT 0
659
660#define DBC_RWRI_IMMEDIATE_MASK 0xff00 /* Immediate data */
661#define DBC_RWRI_IMMEDIATE_SHIFT 8 /* Amount to shift */
662#define DBC_RWRI_ADDRESS_MASK 0x3f0000 /* Register address */
663#define DBC_RWRI_ADDRESS_SHIFT 16
664
665
666/*
667 * DMA command r/w
668 */
669#define DCMD_REG 0x27
670#define DCMD_TYPE_MASK 0xc0 /* Masks off type */
671#define DCMD_TYPE_BMI 0x00 /* Indicates a Block Move instruction */
672#define DCMD_BMI_IO 0x01 /* I/O, CD, and MSG bits selecting */
673#define DCMD_BMI_CD 0x02 /* the phase for the block MOVE */
674#define DCMD_BMI_MSG 0x04 /* instruction */
675
676#define DCMD_BMI_OP_MASK 0x18 /* mask for opcode */
677#define DCMD_BMI_OP_MOVE_T 0x00 /* MOVE */
678#define DCMD_BMI_OP_MOVE_I 0x08 /* MOVE Initiator */
679
680#define DCMD_BMI_INDIRECT 0x20 /* Indirect addressing */
681
682#define DCMD_TYPE_TCI 0x80 /* Indicates a Transfer Control
683 instruction */
684#define DCMD_TCI_IO 0x01 /* I/O, CD, and MSG bits selecting */
685#define DCMD_TCI_CD 0x02 /* the phase for the block MOVE */
686#define DCMD_TCI_MSG 0x04 /* instruction */
687#define DCMD_TCI_OP_MASK 0x38 /* mask for opcode */
688#define DCMD_TCI_OP_JUMP 0x00 /* JUMP */
689#define DCMD_TCI_OP_CALL 0x08 /* CALL */
690#define DCMD_TCI_OP_RETURN 0x10 /* RETURN */
691#define DCMD_TCI_OP_INT 0x18 /* INT */
692
693#define DCMD_TYPE_RWRI 0x40 /* Indicates I/O or register Read/Write
694 instruction */
695#define DCMD_RWRI_OPC_MASK 0x38 /* Opcode mask */
696#define DCMD_RWRI_OPC_WRITE 0x28 /* Write SFBR to register */
697#define DCMD_RWRI_OPC_READ 0x30 /* Read register to SFBR */
698#define DCMD_RWRI_OPC_MODIFY 0x38 /* Modify in place */
699
700#define DCMD_RWRI_OP_MASK 0x07
701#define DCMD_RWRI_OP_MOVE 0x00
702#define DCMD_RWRI_OP_SHL 0x01
703#define DCMD_RWRI_OP_OR 0x02
704#define DCMD_RWRI_OP_XOR 0x03
705#define DCMD_RWRI_OP_AND 0x04
706#define DCMD_RWRI_OP_SHR 0x05
707#define DCMD_RWRI_OP_ADD 0x06
708#define DCMD_RWRI_OP_ADDC 0x07
709
710#define DCMD_TYPE_MMI 0xc0 /* Indicates a Memory Move instruction
711 (three words) */
712
713
714#define DNAD_REG 0x28 /* through 0x2b DMA next address for
715 data */
716#define DSP_REG 0x2c /* through 0x2f DMA SCRIPTS pointer rw */
717#define DSPS_REG 0x30 /* through 0x33 DMA SCRIPTS pointer
718 save rw */
719#define DMODE_REG_00 0x34 /* DMA mode rw */
720#define DMODE_00_BL1 0x80 /* Burst length bits */
721#define DMODE_00_BL0 0x40
722#define DMODE_BL_MASK 0xc0
723/* Burst lengths (800) */
724#define DMODE_BL_2 0x00 /* 2 transfer */
725#define DMODE_BL_4 0x40 /* 4 transfers */
726#define DMODE_BL_8 0x80 /* 8 transfers */
727#define DMODE_BL_16 0xc0 /* 16 transfers */
728
729#define DMODE_10_BL_1 0x00 /* 1 transfer */
730#define DMODE_10_BL_2 0x40 /* 2 transfers */
731#define DMODE_10_BL_4 0x80 /* 4 transfers */
732#define DMODE_10_BL_8 0xc0 /* 8 transfers */
733#define DMODE_10_FC2 0x20 /* Driven to FC2 pin */
734#define DMODE_10_FC1 0x10 /* Driven to FC1 pin */
735#define DMODE_710_PD 0x08 /* Program/data on FC0 pin */
736#define DMODE_710_UO 0x02 /* User prog. output */
737
738#define DMODE_700_BW16 0x20 /* Host buswidth = 16 */
739#define DMODE_700_286 0x10 /* 286 mode */
740#define DMODE_700_IOM 0x08 /* Transfer to IO port */
741#define DMODE_700_FAM 0x04 /* Fixed address mode */
742#define DMODE_700_PIPE 0x02 /* Pipeline mode disables
743 * automatic fetch / exec
744 */
745#define DMODE_MAN 0x01 /* Manual start mode,
746 * requires a 1 to be written
747 * to the start DMA bit in the DCNTL
748 * register to run scripts
749 */
750
751#define DMODE_700_SAVE ( DMODE_00_BL_MASK | DMODE_00_BW16 | DMODE_00_286 )
752
753/* NCR53c800 series only */
754#define SCRATCHA_REG_800 0x34 /* through 0x37 Scratch A rw */
755/* NCR53c710 only */
756#define SCRATCHB_REG_10 0x34 /* through 0x37 scratch B rw */
757
758#define DMODE_REG_10 0x38 /* DMA mode rw, NCR53c710 and newer */
759#define DMODE_800_SIOM 0x20 /* Source IO = 1 */
760#define DMODE_800_DIOM 0x10 /* Destination IO = 1 */
761#define DMODE_800_ERL 0x08 /* Enable Read Line */
762
763/* 35-38 are reserved on 700 and 700-66 series chips */
764#define DIEN_REG 0x39 /* DMA interrupt enable rw */
765/* 0x80, 0x40, and 0x20 are reserved on 700-series chips */
766#define DIEN_800_MDPE 0x40 /* Master data parity error */
767#define DIEN_800_BF 0x20 /* BUS fault */
768#define DIEN_700_BF 0x20 /* BUS fault */
769#define DIEN_ABRT 0x10 /* Enable aborted interrupt */
770#define DIEN_SSI 0x08 /* Enable single step interrupt */
771#define DIEN_SIR 0x04 /* Enable SCRIPTS INT command
772 * interrupt
773 */
774/* 0x02 is reserved on 800 series chips */
775#define DIEN_700_WTD 0x02 /* Enable watchdog timeout interrupt */
776#define DIEN_700_OPC 0x01 /* Enable illegal instruction
777 * interrupt
778 */
779#define DIEN_800_IID 0x01 /* Same meaning, different name */
780
781/*
782 * DMA watchdog timer rw
783 * set in 16 CLK input periods.
784 */
785#define DWT_REG 0x3a
786
787/* DMA control rw */
788#define DCNTL_REG 0x3b
789#define DCNTL_700_CF1 0x80 /* Clock divisor bits */
790#define DCNTL_700_CF0 0x40
791#define DCNTL_700_CF_MASK 0xc0
792/* Clock divisors Divisor SCLK range (MHZ) */
793#define DCNTL_700_CF_2 0x00 /* 2.0 37.51-50.00 */
794#define DCNTL_700_CF_1_5 0x40 /* 1.5 25.01-37.50 */
795#define DCNTL_700_CF_1 0x80 /* 1.0 16.67-25.00 */
796#define DCNTL_700_CF_3 0xc0 /* 3.0 50.01-66.67 (53c700-66) */
797
798#define DCNTL_700_S16 0x20 /* Load scripts 16 bits at a time */
799#define DCNTL_SSM 0x10 /* Single step mode */
800#define DCNTL_700_LLM 0x08 /* Low level mode, can only be set
801 * after selection */
802#define DCNTL_800_IRQM 0x08 /* Totem pole IRQ pin */
803#define DCNTL_STD 0x04 /* Start DMA / SCRIPTS */
804/* 0x02 is reserved */
805#define DCNTL_00_RST 0x01 /* Software reset, resets everything
806 * but 286 mode bit in DMODE. On the
807 * NCR53c710, this bit moved to CTEST8
808 */
809#define DCNTL_10_COM 0x01 /* 700 software compatibility mode */
810#define DCNTL_10_EA 0x20 /* Enable Ack - needed for MVME16x */
811
812#define DCNTL_700_SAVE ( DCNTL_CF_MASK | DCNTL_S16)
813
814
815/* NCR53c700-66 only */
816#define SCRATCHB_REG_00 0x3c /* through 0x3f scratch b rw */
817#define SCRATCHB_REG_800 0x5c /* through 0x5f scratch b rw */
818/* NCR53c710 only */
819#define ADDER_REG_10 0x3c /* Adder, NCR53c710 only */
820
821#define SIEN1_REG_800 0x41
822#define SIEN1_800_STO 0x04 /* selection/reselection timeout */
823#define SIEN1_800_GEN 0x02 /* general purpose timer */
824#define SIEN1_800_HTH 0x01 /* handshake to handshake */
825
826#define SIST1_REG_800 0x43
827#define SIST1_800_STO 0x04 /* selection/reselection timeout */
828#define SIST1_800_GEN 0x02 /* general purpose timer */
829#define SIST1_800_HTH 0x01 /* handshake to handshake */
830
831#define SLPAR_REG_800 0x44 /* Parity */
832
833#define MACNTL_REG_800 0x46 /* Memory access control */
834#define MACNTL_800_TYP3 0x80
835#define MACNTL_800_TYP2 0x40
836#define MACNTL_800_TYP1 0x20
837#define MACNTL_800_TYP0 0x10
838#define MACNTL_800_DWR 0x08
839#define MACNTL_800_DRD 0x04
840#define MACNTL_800_PSCPT 0x02
841#define MACNTL_800_SCPTS 0x01
842
843#define GPCNTL_REG_800 0x47 /* General Purpose Pin Control */
844
845/* Timeouts are expressed such that 0=off, 1=100us, doubling after that */
846#define STIME0_REG_800 0x48 /* SCSI Timer Register 0 */
847#define STIME0_800_HTH_MASK 0xf0 /* Handshake to Handshake timeout */
848#define STIME0_800_HTH_SHIFT 4
849#define STIME0_800_SEL_MASK 0x0f /* Selection timeout */
850#define STIME0_800_SEL_SHIFT 0
851
852#define STIME1_REG_800 0x49
853#define STIME1_800_GEN_MASK 0x0f /* General purpose timer */
854
855#define RESPID_REG_800 0x4a /* Response ID, bit fielded. 8
856 bits on narrow chips, 16 on WIDE */
857
858#define STEST0_REG_800 0x4c
859#define STEST0_800_SLT 0x08 /* Selection response logic test */
860#define STEST0_800_ART 0x04 /* Arbitration priority encoder test */
861#define STEST0_800_SOZ 0x02 /* Synchronous offset zero */
862#define STEST0_800_SOM 0x01 /* Synchronous offset maximum */
863
864#define STEST1_REG_800 0x4d
865#define STEST1_800_SCLK 0x80 /* Disable SCSI clock */
866
867#define STEST2_REG_800 0x4e
868#define STEST2_800_SCE 0x80 /* Enable SOCL/SODL */
869#define STEST2_800_ROF 0x40 /* Reset SCSI sync offset */
870#define STEST2_800_SLB 0x10 /* Enable SCSI loopback mode */
871#define STEST2_800_SZM 0x08 /* SCSI high impedance mode */
872#define STEST2_800_EXT 0x02 /* Extend REQ/ACK filter 30 to 60ns */
873#define STEST2_800_LOW 0x01 /* SCSI low level mode */
874
875#define STEST3_REG_800 0x4f
876#define STEST3_800_TE 0x80 /* Enable active negation */
877#define STEST3_800_STR 0x40 /* SCSI FIFO test read */
878#define STEST3_800_HSC 0x20 /* Halt SCSI clock */
879#define STEST3_800_DSI 0x10 /* Disable single initiator response */
880#define STEST3_800_TTM 0x04 /* Time test mode */
881#define STEST3_800_CSF 0x02 /* Clear SCSI FIFO */
882#define STEST3_800_STW 0x01 /* SCSI FIFO test write */
883
884#define OPTION_PARITY 0x1 /* Enable parity checking */
885#define OPTION_TAGGED_QUEUE 0x2 /* Enable SCSI-II tagged queuing */
886#define OPTION_700 0x8 /* Always run NCR53c700 scripts */
887#define OPTION_INTFLY 0x10 /* Use INTFLY interrupts */
888#define OPTION_DEBUG_INTR 0x20 /* Debug interrupts */
889#define OPTION_DEBUG_INIT_ONLY 0x40 /* Run initialization code and
890 simple test code, return
891 DID_NO_CONNECT if any SCSI
892 commands are attempted. */
893#define OPTION_DEBUG_READ_ONLY 0x80 /* Return DID_ERROR if any
894 SCSI write is attempted */
895#define OPTION_DEBUG_TRACE 0x100 /* Animated trace mode, print
896 each address and instruction
897 executed to debug buffer. */
898#define OPTION_DEBUG_SINGLE 0x200 /* stop after executing one
899 instruction */
900#define OPTION_SYNCHRONOUS 0x400 /* Enable sync SCSI. */
901#define OPTION_MEMORY_MAPPED 0x800 /* NCR registers have valid
902 memory mapping */
903#define OPTION_IO_MAPPED 0x1000 /* NCR registers have valid
904 I/O mapping */
905#define OPTION_DEBUG_PROBE_ONLY 0x2000 /* Probe only, don't even init */
906#define OPTION_DEBUG_TESTS_ONLY 0x4000 /* Probe, init, run selected tests */
907#define OPTION_DEBUG_TEST0 0x08000 /* Run test 0 */
908#define OPTION_DEBUG_TEST1 0x10000 /* Run test 1 */
909#define OPTION_DEBUG_TEST2 0x20000 /* Run test 2 */
910#define OPTION_DEBUG_DUMP 0x40000 /* Dump commands */
911#define OPTION_DEBUG_TARGET_LIMIT 0x80000 /* Only talk to target+luns specified */
912#define OPTION_DEBUG_NCOMMANDS_LIMIT 0x100000 /* Limit the number of commands */
913#define OPTION_DEBUG_SCRIPT 0x200000 /* Print when checkpoints are passed */
914#define OPTION_DEBUG_FIXUP 0x400000 /* print fixup values */
915#define OPTION_DEBUG_DSA 0x800000
916#define OPTION_DEBUG_CORRUPTION 0x1000000 /* Detect script corruption */
917#define OPTION_DEBUG_SDTR 0x2000000 /* Debug SDTR problem */
918#define OPTION_DEBUG_MISMATCH 0x4000000 /* Debug phase mismatches */
919#define OPTION_DISCONNECT 0x8000000 /* Allow disconnect */
920#define OPTION_DEBUG_DISCONNECT 0x10000000
921#define OPTION_ALWAYS_SYNCHRONOUS 0x20000000 /* Negotiate sync. transfers
922 on power up */
923#define OPTION_DEBUG_QUEUES 0x80000000
924#define OPTION_DEBUG_ALLOCATION 0x100000000LL
925#define OPTION_DEBUG_SYNCHRONOUS 0x200000000LL /* Sanity check SXFER and
926 SCNTL3 registers */
927#define OPTION_NO_ASYNC 0x400000000LL /* Don't automagically send
928 SDTR for async transfers when
929 we haven't been told to do
930 a synchronous transfer. */
931#define OPTION_NO_PRINT_RACE 0x800000000LL /* Don't print message when
932 the reselect/WAIT DISCONNECT
933 race condition hits */
934#if !defined(PERM_OPTIONS)
935#define PERM_OPTIONS 0
936#endif
937
938/*
939 * Some data which is accessed by the NCR chip must be 4-byte aligned.
940 * For some hosts the default is less than that (eg. 68K uses 2-byte).
941 * Alignment has only been forced where it is important; also if one
942 * 32 bit structure field is aligned then it is assumed that following
943 * 32 bit fields are also aligned. Take care when adding fields
944 * which are other than 32 bit.
945 */
946
947struct NCR53c7x0_synchronous {
948 u32 select_indirect /* Value used for indirect selection */
949 __attribute__ ((aligned (4)));
950 u32 sscf_710; /* Used to set SSCF bits for 710 */
951 u32 script[8]; /* Size ?? Script used when target is
952 reselected */
953 unsigned char synchronous_want[5]; /* Per target desired SDTR */
954/*
955 * Set_synchronous programs these, select_indirect and current settings after
956 * int_debug_should show a match.
957 */
958 unsigned char sxfer_sanity, scntl3_sanity;
959};
960
961#define CMD_FLAG_SDTR 1 /* Initiating synchronous
962 transfer negotiation */
963#define CMD_FLAG_WDTR 2 /* Initiating wide transfer
964 negotiation */
965#define CMD_FLAG_DID_SDTR 4 /* did SDTR */
966#define CMD_FLAG_DID_WDTR 8 /* did WDTR */
967
968struct NCR53c7x0_table_indirect {
969 u32 count;
970 void *address;
971};
972
973enum ncr_event {
974 EVENT_NONE = 0,
975/*
976 * Order is IMPORTANT, since these must correspond to the event interrupts
977 * in 53c7,8xx.scr
978 */
979
980 EVENT_ISSUE_QUEUE = 0x5000000, /* 0 Command was added to issue queue */
981 EVENT_START_QUEUE, /* 1 Command moved to start queue */
982 EVENT_SELECT, /* 2 Command completed selection */
983 EVENT_DISCONNECT, /* 3 Command disconnected */
984 EVENT_RESELECT, /* 4 Command reselected */
985 EVENT_COMPLETE, /* 5 Command completed */
986 EVENT_IDLE, /* 6 */
987 EVENT_SELECT_FAILED, /* 7 */
988 EVENT_BEFORE_SELECT, /* 8 */
989 EVENT_RESELECT_FAILED /* 9 */
990};
991
992struct NCR53c7x0_event {
993 enum ncr_event event; /* What type of event */
994 unsigned char target;
995 unsigned char lun;
996 struct timeval time;
997 u32 *dsa; /* What's in the DSA register now (virt) */
998/*
999 * A few things from that SCSI pid so we know what happened after
1000 * the Scsi_Cmnd structure in question may have disappeared.
1001 */
1002 unsigned long pid; /* The SCSI PID which caused this
1003 event */
1004 unsigned char cmnd[12];
1005};
1006
1007/*
1008 * Things in the NCR53c7x0_cmd structure are split into two parts :
1009 *
1010 * 1. A fixed portion, for things which are not accessed directly by static NCR
1011 * code (ie, are referenced only by the Linux side of the driver,
1012 * or only by dynamically generated code).
1013 *
1014 * 2. The DSA portion, for things which are accessed directly by static NCR
1015 * code.
1016 *
1017 * This is a little ugly, but it
1018 * 1. Avoids conflicts between the NCR code's picture of the structure, and
1019 * Linux code's idea of what it looks like.
1020 *
1021 * 2. Minimizes the pain in the Linux side of the code needed
1022 * to calculate real dsa locations for things, etc.
1023 *
1024 */
1025
1026struct NCR53c7x0_cmd {
1027 void *real; /* Real, unaligned address for
1028 free function */
1029 void (* free)(void *, int); /* Command to deallocate; NULL
1030 for structures allocated with
1031 scsi_register, etc. */
1032 Scsi_Cmnd *cmd; /* Associated Scsi_Cmnd
1033 structure, Scsi_Cmnd points
1034 at NCR53c7x0_cmd using
1035 host_scribble structure */
1036
1037 int size; /* scsi_malloc'd size of this
1038 structure */
1039
1040 int flags; /* CMD_* flags */
1041
1042 unsigned char cmnd[12]; /* CDB, copied from Scsi_Cmnd */
1043 int result; /* Copy to Scsi_Cmnd when done */
1044
1045 struct { /* Private non-cached bounce buffer */
1046 unsigned char buf[256];
1047 u32 addr;
1048 u32 len;
1049 } bounce;
1050
1051/*
1052 * SDTR and WIDE messages are an either/or affair
1053 * in this message, since we will go into message out and send
1054 * _the whole mess_ without dropping out of message out to
1055 * let the target go into message in after sending the first
1056 * message.
1057 */
1058
1059 unsigned char select[11]; /* Select message, includes
1060 IDENTIFY
1061 (optional) QUEUE TAG
1062 (optional) SDTR or WDTR
1063 */
1064
1065
1066 volatile struct NCR53c7x0_cmd *next; /* Linux maintained lists (free,
1067 running, eventually finished */
1068
1069
1070 u32 *data_transfer_start; /* Start of data transfer routines */
1071 u32 *data_transfer_end; /* Address after end of data transfer o
1072 routines */
1073/*
1074 * The following three fields were moved from the DSA proper to here
1075 * since only dynamically generated NCR code refers to them, meaning
1076 * we don't need dsa_* absolutes, and it is simpler to let the
1077 * host code refer to them directly.
1078 */
1079
1080/*
1081 * HARD CODED : residual and saved_residual need to agree with the sizes
1082 * used in NCR53c7,8xx.scr.
1083 *
1084 * FIXME: we want to consider the case where we have odd-length
1085 * scatter/gather buffers and a WIDE transfer, in which case
1086 * we'll need to use the CHAIN MOVE instruction. Ick.
1087 */
1088 u32 residual[6] __attribute__ ((aligned (4)));
1089 /* Residual data transfer which
1090 allows pointer code to work
1091 right.
1092
1093 [0-1] : Conditional call to
1094 appropriate other transfer
1095 routine.
1096 [2-3] : Residual block transfer
1097 instruction.
1098 [4-5] : Jump to instruction
1099 after splice.
1100 */
1101 u32 saved_residual[6]; /* Copy of old residual, so we
1102 can get another partial
1103 transfer and still recover
1104 */
1105
1106 u32 saved_data_pointer; /* Saved data pointer */
1107
1108 u32 dsa_next_addr; /* _Address_ of dsa_next field
1109 in this dsa for RISCy
1110 style constant. */
1111
1112 u32 dsa_addr; /* Address of dsa; RISCy style
1113 constant */
1114
1115 u32 dsa[0]; /* Variable length (depending
1116 on host type, number of scatter /
1117 gather buffers, etc). */
1118};
1119
1120struct NCR53c7x0_break {
1121 u32 *address, old_instruction[2];
1122 struct NCR53c7x0_break *next;
1123 unsigned char old_size; /* Size of old instruction */
1124};
1125
1126/* Indicates that the NCR is not executing code */
1127#define STATE_HALTED 0
1128/*
1129 * Indicates that the NCR is executing the wait for select / reselect
1130 * script. Only used when running NCR53c700 compatible scripts, only
1131 * state during which an ABORT is _not_ considered an error condition.
1132 */
1133#define STATE_WAITING 1
1134/* Indicates that the NCR is executing other code. */
1135#define STATE_RUNNING 2
1136/*
1137 * Indicates that the NCR was being aborted.
1138 */
1139#define STATE_ABORTING 3
1140/* Indicates that the NCR was successfully aborted. */
1141#define STATE_ABORTED 4
1142/* Indicates that the NCR has been disabled due to a fatal error */
1143#define STATE_DISABLED 5
1144
1145/*
1146 * Where knowledge of SCSI SCRIPT(tm) specified values are needed
1147 * in an interrupt handler, an interrupt handler exists for each
1148 * different SCSI script so we don't have name space problems.
1149 *
1150 * Return values of these handlers are as follows :
1151 */
1152#define SPECIFIC_INT_NOTHING 0 /* don't even restart */
1153#define SPECIFIC_INT_RESTART 1 /* restart at the next instruction */
1154#define SPECIFIC_INT_ABORT 2 /* recoverable error, abort cmd */
1155#define SPECIFIC_INT_PANIC 3 /* unrecoverable error, panic */
1156#define SPECIFIC_INT_DONE 4 /* normal command completion */
1157#define SPECIFIC_INT_BREAK 5 /* break point encountered */
1158
1159struct NCR53c7x0_hostdata {
1160 int size; /* Size of entire Scsi_Host
1161 structure */
1162 int board; /* set to board type, useful if
1163 we have host specific things,
1164 ie, a general purpose I/O
1165 bit is being used to enable
1166 termination, etc. */
1167
1168 int chip; /* set to chip type; 700-66 is
1169 700-66, rest are last three
1170 digits of part number */
1171
1172 char valid_ids[8]; /* Valid SCSI ID's for adapter */
1173
1174 u32 *dsp; /* dsp to restart with after
1175 all stacked interrupts are
1176 handled. */
1177
1178 unsigned dsp_changed:1; /* Has dsp changed within this
1179 set of stacked interrupts ? */
1180
1181 unsigned char dstat; /* Most recent value of dstat */
1182 unsigned dstat_valid:1;
1183
1184 unsigned expecting_iid:1; /* Expect IID interrupt */
1185 unsigned expecting_sto:1; /* Expect STO interrupt */
1186
1187 /*
1188 * The code stays cleaner if we use variables with function
1189 * pointers and offsets that are unique for the different
1190 * scripts rather than having a slew of switch(hostdata->chip)
1191 * statements.
1192 *
1193 * It also means that the #defines from the SCSI SCRIPTS(tm)
1194 * don't have to be visible outside of the script-specific
1195 * instructions, preventing name space pollution.
1196 */
1197
1198 void (* init_fixup)(struct Scsi_Host *host);
1199 void (* init_save_regs)(struct Scsi_Host *host);
1200 void (* dsa_fixup)(struct NCR53c7x0_cmd *cmd);
1201 void (* soft_reset)(struct Scsi_Host *host);
1202 int (* run_tests)(struct Scsi_Host *host);
1203
1204 /*
1205 * Called when DSTAT_SIR is set, indicating an interrupt generated
1206 * by the INT instruction, where values are unique for each SCSI
1207 * script. Should return one of the SPEC_* values.
1208 */
1209
1210 int (* dstat_sir_intr)(struct Scsi_Host *host, struct NCR53c7x0_cmd *cmd);
1211
1212 int dsa_len; /* Size of DSA structure */
1213
1214 /*
1215 * Location of DSA fields for the SCSI SCRIPT corresponding to this
1216 * chip.
1217 */
1218
1219 s32 dsa_start;
1220 s32 dsa_end;
1221 s32 dsa_next;
1222 s32 dsa_prev;
1223 s32 dsa_cmnd;
1224 s32 dsa_select;
1225 s32 dsa_msgout;
1226 s32 dsa_cmdout;
1227 s32 dsa_dataout;
1228 s32 dsa_datain;
1229 s32 dsa_msgin;
1230 s32 dsa_msgout_other;
1231 s32 dsa_write_sync;
1232 s32 dsa_write_resume;
1233 s32 dsa_check_reselect;
1234 s32 dsa_status;
1235 s32 dsa_saved_pointer;
1236 s32 dsa_jump_dest;
1237
1238 /*
1239 * Important entry points that generic fixup code needs
1240 * to know about, fixed up.
1241 */
1242
1243 s32 E_accept_message;
1244 s32 E_command_complete;
1245 s32 E_data_transfer;
1246 s32 E_dsa_code_template;
1247 s32 E_dsa_code_template_end;
1248 s32 E_end_data_transfer;
1249 s32 E_msg_in;
1250 s32 E_initiator_abort;
1251 s32 E_other_transfer;
1252 s32 E_other_in;
1253 s32 E_other_out;
1254 s32 E_target_abort;
1255 s32 E_debug_break;
1256 s32 E_reject_message;
1257 s32 E_respond_message;
1258 s32 E_select;
1259 s32 E_select_msgout;
1260 s32 E_test_0;
1261 s32 E_test_1;
1262 s32 E_test_2;
1263 s32 E_test_3;
1264 s32 E_dsa_zero;
1265 s32 E_cmdout_cmdout;
1266 s32 E_wait_reselect;
1267 s32 E_dsa_code_begin;
1268
1269 long long options; /* Bitfielded set of options enabled */
1270 volatile u32 test_completed; /* Test completed */
1271 int test_running; /* Test currently running */
1272 s32 test_source
1273 __attribute__ ((aligned (4)));
1274 volatile s32 test_dest;
1275
1276 volatile int state; /* state of driver, only used for
1277 OPTION_700 */
1278
1279 unsigned char dmode; /*
1280 * set to the address of the DMODE
1281 * register for this chip.
1282 */
1283 unsigned char istat; /*
1284 * set to the address of the ISTAT
1285 * register for this chip.
1286 */
1287
1288 int scsi_clock; /*
1289 * SCSI clock in HZ. 0 may be used
1290 * for unknown, although this will
1291 * disable synchronous negotiation.
1292 */
1293
1294 volatile int intrs; /* Number of interrupts */
1295 volatile int resets; /* Number of SCSI resets */
1296 unsigned char saved_dmode;
1297 unsigned char saved_ctest4;
1298 unsigned char saved_ctest7;
1299 unsigned char saved_dcntl;
1300 unsigned char saved_scntl3;
1301
1302 unsigned char this_id_mask;
1303
1304 /* Debugger information */
1305 struct NCR53c7x0_break *breakpoints, /* Linked list of all break points */
1306 *breakpoint_current; /* Current breakpoint being stepped
1307 through, NULL if we are running
1308 normally. */
1309#ifdef NCR_DEBUG
1310 int debug_size; /* Size of debug buffer */
1311 volatile int debug_count; /* Current data count */
1312 volatile char *debug_buf; /* Output ring buffer */
1313 volatile char *debug_write; /* Current write pointer */
1314 volatile char *debug_read; /* Current read pointer */
1315#endif /* def NCR_DEBUG */
1316
1317 /* XXX - primitive debugging junk, remove when working ? */
1318 int debug_print_limit; /* Number of commands to print
1319 out exhaustive debugging
1320 information for if
1321 OPTION_DEBUG_DUMP is set */
1322
1323 unsigned char debug_lun_limit[16]; /* If OPTION_DEBUG_TARGET_LIMIT
1324 set, puke if commands are sent
1325 to other target/lun combinations */
1326
1327 int debug_count_limit; /* Number of commands to execute
1328 before puking to limit debugging
1329 output */
1330
1331
1332 volatile unsigned idle:1; /* set to 1 if idle */
1333
1334 /*
1335 * Table of synchronous+wide transfer parameters set on a per-target
1336 * basis.
1337 */
1338
1339 volatile struct NCR53c7x0_synchronous sync[16]
1340 __attribute__ ((aligned (4)));
1341
1342 volatile Scsi_Cmnd *issue_queue
1343 __attribute__ ((aligned (4)));
1344 /* waiting to be issued by
1345 Linux driver */
1346 volatile struct NCR53c7x0_cmd *running_list;
1347 /* commands running, maintained
1348 by Linux driver */
1349
1350 volatile struct NCR53c7x0_cmd *ncrcurrent; /* currently connected
1351 nexus, ONLY valid for
1352 NCR53c700/NCR53c700-66
1353 */
1354
1355 volatile struct NCR53c7x0_cmd *spare; /* pointer to spare,
1356 allocated at probe time,
1357 which we can use for
1358 initialization */
1359 volatile struct NCR53c7x0_cmd *free;
1360 int max_cmd_size; /* Maximum size of NCR53c7x0_cmd
1361 based on number of
1362 scatter/gather segments, etc.
1363 */
1364 volatile int num_cmds; /* Number of commands
1365 allocated */
1366 volatile int extra_allocate;
1367 volatile unsigned char cmd_allocated[16]; /* Have we allocated commands
1368 for this target yet? If not,
1369 do so ASAP */
1370 volatile unsigned char busy[16][8]; /* number of commands
1371 executing on each target
1372 */
1373 /*
1374 * Eventually, I'll switch to a coroutine for calling
1375 * cmd->done(cmd), etc. so that we can overlap interrupt
1376 * processing with this code for maximum performance.
1377 */
1378
1379 volatile struct NCR53c7x0_cmd *finished_queue;
1380
1381 /* Shared variables between SCRIPT and host driver */
1382 volatile u32 *schedule
1383 __attribute__ ((aligned (4))); /* Array of JUMPs to dsa_begin
1384 routines of various DSAs.
1385 When not in use, replace
1386 with jump to next slot */
1387
1388
1389 volatile unsigned char msg_buf[16]; /* buffer for messages
1390 other than the command
1391 complete message */
1392
1393 /* Per-target default synchronous and WIDE messages */
1394 volatile unsigned char synchronous_want[16][5];
1395 volatile unsigned char wide_want[16][4];
1396
1397 /* Bit fielded set of targets we want to speak synchronously with */
1398 volatile u16 initiate_sdtr;
1399 /* Bit fielded set of targets we want to speak wide with */
1400 volatile u16 initiate_wdtr;
1401 /* Bit fielded list of targets we've talked to. */
1402 volatile u16 talked_to;
1403
1404 /* Array of bit-fielded lun lists that we need to request_sense */
1405 volatile unsigned char request_sense[16];
1406
1407 u32 addr_reconnect_dsa_head
1408 __attribute__ ((aligned (4))); /* RISCy style constant,
1409 address of following */
1410 volatile u32 reconnect_dsa_head;
1411 /* Data identifying nexus we are trying to match during reselection */
1412 volatile unsigned char reselected_identify; /* IDENTIFY message */
1413 volatile unsigned char reselected_tag; /* second byte of queue tag
1414 message or 0 */
1415
1416 /* These were static variables before we moved them */
1417
1418 s32 NCR53c7xx_zero
1419 __attribute__ ((aligned (4)));
1420 s32 NCR53c7xx_sink;
1421 u32 NOP_insn;
1422 char NCR53c7xx_msg_reject;
1423 char NCR53c7xx_msg_abort;
1424 char NCR53c7xx_msg_nop;
1425
1426 /*
1427 * Following item introduced by RGH to support NCRc710, which is
1428 * VERY brain-dead when it come to memory moves
1429 */
1430
1431 /* DSA save area used only by the NCR chip */
1432 volatile unsigned long saved2_dsa
1433 __attribute__ ((aligned (4)));
1434
1435 volatile unsigned long emulated_intfly
1436 __attribute__ ((aligned (4)));
1437
1438 volatile int event_size, event_index;
1439 volatile struct NCR53c7x0_event *events;
1440
1441 /* If we need to generate code to kill off the currently connected
1442 command, this is where we do it. Should have a BMI instruction
1443 to source or sink the current data, followed by a JUMP
1444 to abort_connected */
1445
1446 u32 *abort_script;
1447
1448 int script_count; /* Size of script in words */
1449 u32 script[0]; /* Relocated SCSI script */
1450
1451};
1452
1453#define SCSI_IRQ_NONE 255
1454#define DMA_NONE 255
1455#define IRQ_AUTO 254
1456#define DMA_AUTO 254
1457
1458#define BOARD_GENERIC 0
1459
1460#define NCR53c7x0_insn_size(insn) \
1461 (((insn) & DCMD_TYPE_MASK) == DCMD_TYPE_MMI ? 3 : 2)
1462
1463
1464#define NCR53c7x0_local_declare() \
1465 volatile unsigned char *NCR53c7x0_address_memory; \
1466 unsigned int NCR53c7x0_address_io; \
1467 int NCR53c7x0_memory_mapped
1468
1469#define NCR53c7x0_local_setup(host) \
1470 NCR53c7x0_address_memory = (void *) (host)->base; \
1471 NCR53c7x0_address_io = (unsigned int) (host)->io_port; \
1472 NCR53c7x0_memory_mapped = ((struct NCR53c7x0_hostdata *) \
1473 host->hostdata[0])-> options & OPTION_MEMORY_MAPPED
1474
1475#ifdef BIG_ENDIAN
1476/* These could be more efficient, given that we are always memory mapped,
1477 * but they don't give the same problems as the write macros, so leave
1478 * them. */
1479#ifdef __mc68000__
1480#define NCR53c7x0_read8(address) \
1481 ((unsigned int)raw_inb((u32)NCR53c7x0_address_memory + ((u32)(address)^3)) )
1482
1483#define NCR53c7x0_read16(address) \
1484 ((unsigned int)raw_inw((u32)NCR53c7x0_address_memory + ((u32)(address)^2)))
1485#else
1486#define NCR53c7x0_read8(address) \
1487 (NCR53c7x0_memory_mapped ? \
1488 (unsigned int)readb((u32)NCR53c7x0_address_memory + ((u32)(address)^3)) : \
1489 inb(NCR53c7x0_address_io + (address)))
1490
1491#define NCR53c7x0_read16(address) \
1492 (NCR53c7x0_memory_mapped ? \
1493 (unsigned int)readw((u32)NCR53c7x0_address_memory + ((u32)(address)^2)) : \
1494 inw(NCR53c7x0_address_io + (address)))
1495#endif /* mc68000 */
1496#else
1497#define NCR53c7x0_read8(address) \
1498 (NCR53c7x0_memory_mapped ? \
1499 (unsigned int)readb((u32)NCR53c7x0_address_memory + (u32)(address)) : \
1500 inb(NCR53c7x0_address_io + (address)))
1501
1502#define NCR53c7x0_read16(address) \
1503 (NCR53c7x0_memory_mapped ? \
1504 (unsigned int)readw((u32)NCR53c7x0_address_memory + (u32)(address)) : \
1505 inw(NCR53c7x0_address_io + (address)))
1506#endif
1507
1508#ifdef __mc68000__
1509#define NCR53c7x0_read32(address) \
1510 ((unsigned int) raw_inl((u32)NCR53c7x0_address_memory + (u32)(address)))
1511#else
1512#define NCR53c7x0_read32(address) \
1513 (NCR53c7x0_memory_mapped ? \
1514 (unsigned int) readl((u32)NCR53c7x0_address_memory + (u32)(address)) : \
1515 inl(NCR53c7x0_address_io + (address)))
1516#endif /* mc68000*/
1517
1518#ifdef BIG_ENDIAN
1519/* If we are big-endian, then we are not Intel, so probably don't have
1520 * an i/o map as well as a memory map. So, let's assume memory mapped.
1521 * Also, I am having terrible problems trying to persuade the compiler
1522 * not to lay down code which does a read after write for these macros.
1523 * If you remove 'volatile' from writeb() and friends it is ok....
1524 */
1525
1526#define NCR53c7x0_write8(address,value) \
1527 *(volatile unsigned char *) \
1528 ((u32)NCR53c7x0_address_memory + ((u32)(address)^3)) = (value)
1529
1530#define NCR53c7x0_write16(address,value) \
1531 *(volatile unsigned short *) \
1532 ((u32)NCR53c7x0_address_memory + ((u32)(address)^2)) = (value)
1533
1534#define NCR53c7x0_write32(address,value) \
1535 *(volatile unsigned long *) \
1536 ((u32)NCR53c7x0_address_memory + ((u32)(address))) = (value)
1537
1538#else
1539
1540#define NCR53c7x0_write8(address,value) \
1541 (NCR53c7x0_memory_mapped ? \
1542 ({writeb((value), (u32)NCR53c7x0_address_memory + (u32)(address)); mb();}) : \
1543 outb((value), NCR53c7x0_address_io + (address)))
1544
1545#define NCR53c7x0_write16(address,value) \
1546 (NCR53c7x0_memory_mapped ? \
1547 ({writew((value), (u32)NCR53c7x0_address_memory + (u32)(address)); mb();}) : \
1548 outw((value), NCR53c7x0_address_io + (address)))
1549
1550#define NCR53c7x0_write32(address,value) \
1551 (NCR53c7x0_memory_mapped ? \
1552 ({writel((value), (u32)NCR53c7x0_address_memory + (u32)(address)); mb();}) : \
1553 outl((value), NCR53c7x0_address_io + (address)))
1554
1555#endif
1556
1557/* Patch arbitrary 32 bit words in the script */
1558#define patch_abs_32(script, offset, symbol, value) \
1559 for (i = 0; i < (sizeof (A_##symbol##_used) / sizeof \
1560 (u32)); ++i) { \
1561 (script)[A_##symbol##_used[i] - (offset)] += (value); \
1562 if (hostdata->options & OPTION_DEBUG_FIXUP) \
1563 printk("scsi%d : %s reference %d at 0x%x in %s is now 0x%x\n",\
1564 host->host_no, #symbol, i, A_##symbol##_used[i] - \
1565 (int)(offset), #script, (script)[A_##symbol##_used[i] - \
1566 (offset)]); \
1567 }
1568
1569/* Patch read/write instruction immediate field */
1570#define patch_abs_rwri_data(script, offset, symbol, value) \
1571 for (i = 0; i < (sizeof (A_##symbol##_used) / sizeof \
1572 (u32)); ++i) \
1573 (script)[A_##symbol##_used[i] - (offset)] = \
1574 ((script)[A_##symbol##_used[i] - (offset)] & \
1575 ~DBC_RWRI_IMMEDIATE_MASK) | \
1576 (((value) << DBC_RWRI_IMMEDIATE_SHIFT) & \
1577 DBC_RWRI_IMMEDIATE_MASK)
1578
1579/* Patch transfer control instruction data field */
1580#define patch_abs_tci_data(script, offset, symbol, value) \
1581 for (i = 0; i < (sizeof (A_##symbol##_used) / sizeof \
1582 (u32)); ++i) \
1583 (script)[A_##symbol##_used[i] - (offset)] = \
1584 ((script)[A_##symbol##_used[i] - (offset)] & \
1585 ~DBC_TCI_DATA_MASK) | \
1586 (((value) << DBC_TCI_DATA_SHIFT) & \
1587 DBC_TCI_DATA_MASK)
1588
1589/* Patch field in dsa structure (assignment should be +=?) */
1590#define patch_dsa_32(dsa, symbol, word, value) \
1591 { \
1592 (dsa)[(hostdata->##symbol - hostdata->dsa_start) / sizeof(u32) \
1593 + (word)] = (value); \
1594 if (hostdata->options & OPTION_DEBUG_DSA) \
1595 printk("scsi : dsa %s symbol %s(%d) word %d now 0x%x\n", \
1596 #dsa, #symbol, hostdata->##symbol, \
1597 (word), (u32) (value)); \
1598 }
1599
1600/* Paranoid people could use panic() here. */
1601#define FATAL(host) shutdown((host));
1602
1603extern int ncr53c7xx_init(struct scsi_host_template *tpnt, int board, int chip,
1604 unsigned long base, int io_port, int irq, int dma,
1605 long long options, int clock);
1606
1607#endif /* NCR53c710_C */
1608#endif /* NCR53c710_H */
diff --git a/drivers/scsi/53c7xx.scr b/drivers/scsi/53c7xx.scr
deleted file mode 100644
index 9c5694a2da8a..000000000000
--- a/drivers/scsi/53c7xx.scr
+++ /dev/null
@@ -1,1591 +0,0 @@
1#undef DEBUG
2#undef EVENTS
3#undef NO_SELECTION_TIMEOUT
4#define BIG_ENDIAN
5
6; 53c710 driver. Modified from Drew Eckhardts driver
7; for 53c810 by Richard Hirst [richard@sleepie.demon.co.uk]
8;
9; I have left the script for the 53c8xx family in here, as it is likely
10; to be useful to see what I changed when bug hunting.
11
12; NCR 53c810 driver, main script
13; Sponsored by
14; iX Multiuser Multitasking Magazine
15; hm@ix.de
16;
17; Copyright 1993, 1994, 1995 Drew Eckhardt
18; Visionary Computing
19; (Unix and Linux consulting and custom programming)
20; drew@PoohSticks.ORG
21; +1 (303) 786-7975
22;
23; TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
24;
25; PRE-ALPHA
26;
27; For more information, please consult
28;
29; NCR 53C810
30; PCI-SCSI I/O Processor
31; Data Manual
32;
33; NCR 53C710
34; SCSI I/O Processor
35; Programmers Guide
36;
37; NCR Microelectronics
38; 1635 Aeroplaza Drive
39; Colorado Springs, CO 80916
40; 1+ (719) 578-3400
41;
42; Toll free literature number
43; +1 (800) 334-5454
44;
45; IMPORTANT : This code is self modifying due to the limitations of
46; the NCR53c7,8xx series chips. Persons debugging this code with
47; the remote debugger should take this into account, and NOT set
48; breakpoints in modified instructions.
49;
50; Design:
51; The NCR53c7,8xx family of SCSI chips are busmasters with an onboard
52; microcontroller using a simple instruction set.
53;
54; So, to minimize the effects of interrupt latency, and to maximize
55; throughput, this driver offloads the practical maximum amount
56; of processing to the SCSI chip while still maintaining a common
57; structure.
58;
59; Where tradeoffs were needed between efficiency on the older
60; chips and the newer NCR53c800 series, the NCR53c800 series
61; was chosen.
62;
63; While the NCR53c700 and NCR53c700-66 lacked the facilities to fully
64; automate SCSI transfers without host processor intervention, this
65; isn't the case with the NCR53c710 and newer chips which allow
66;
67; - reads and writes to the internal registers from within the SCSI
68; scripts, allowing the SCSI SCRIPTS(tm) code to save processor
69; state so that multiple threads of execution are possible, and also
70; provide an ALU for loop control, etc.
71;
72; - table indirect addressing for some instructions. This allows
73; pointers to be located relative to the DSA ((Data Structure
74; Address) register.
75;
76; These features make it possible to implement a mailbox style interface,
77; where the same piece of code is run to handle I/O for multiple threads
78; at once minimizing our need to relocate code. Since the NCR53c700/
79; NCR53c800 series have a unique combination of features, making a
80; a standard ingoing/outgoing mailbox system, costly, I've modified it.
81;
82; - Mailboxes are a mixture of code and data. This lets us greatly
83; simplify the NCR53c810 code and do things that would otherwise
84; not be possible.
85;
86; The saved data pointer is now implemented as follows :
87;
88; Control flow has been architected such that if control reaches
89; munge_save_data_pointer, on a restore pointers message or
90; reconnection, a jump to the address formerly in the TEMP register
91; will allow the SCSI command to resume execution.
92;
93
94;
95; Note : the DSA structures must be aligned on 32 bit boundaries,
96; since the source and destination of MOVE MEMORY instructions
97; must share the same alignment and this is the alignment of the
98; NCR registers.
99;
100
101; For some systems (MVME166, for example) dmode is always the same, so don't
102; waste time writing it
103
104#if 1
105#define DMODE_MEMORY_TO_NCR
106#define DMODE_MEMORY_TO_MEMORY
107#define DMODE_NCR_TO_MEMORY
108#else
109#define DMODE_MEMORY_TO_NCR MOVE dmode_memory_to_ncr TO DMODE
110#define DMODE_MEMORY_TO_MEMORY MOVE dmode_memory_to_memory TO DMODE
111#define DMODE_NCR_TO_MEMORY MOVE dmode_ncr_to_memory TO DMODE
112#endif
113
114ABSOLUTE dsa_temp_lun = 0 ; Patch to lun for current dsa
115ABSOLUTE dsa_temp_next = 0 ; Patch to dsa next for current dsa
116ABSOLUTE dsa_temp_addr_next = 0 ; Patch to address of dsa next address
117 ; for current dsa
118ABSOLUTE dsa_temp_sync = 0 ; Patch to address of per-target
119 ; sync routine
120ABSOLUTE dsa_sscf_710 = 0 ; Patch to address of per-target
121 ; sscf value (53c710)
122ABSOLUTE dsa_temp_target = 0 ; Patch to id for current dsa
123ABSOLUTE dsa_temp_addr_saved_pointer = 0; Patch to address of per-command
124 ; saved data pointer
125ABSOLUTE dsa_temp_addr_residual = 0 ; Patch to address of per-command
126 ; current residual code
127ABSOLUTE dsa_temp_addr_saved_residual = 0; Patch to address of per-command
128 ; saved residual code
129ABSOLUTE dsa_temp_addr_new_value = 0 ; Address of value for JUMP operand
130ABSOLUTE dsa_temp_addr_array_value = 0 ; Address to copy to
131ABSOLUTE dsa_temp_addr_dsa_value = 0 ; Address of this DSA value
132
133;
134; Once a device has initiated reselection, we need to compare it
135; against the singly linked list of commands which have disconnected
136; and are pending reselection. These commands are maintained in
137; an unordered singly linked list of DSA structures, through the
138; DSA pointers at their 'centers' headed by the reconnect_dsa_head
139; pointer.
140;
141; To avoid complications in removing commands from the list,
142; I minimize the amount of expensive (at eight operations per
143; addition @ 500-600ns each) pointer operations which must
144; be done in the NCR driver by precomputing them on the
145; host processor during dsa structure generation.
146;
147; The fixed-up per DSA code knows how to recognize the nexus
148; associated with the corresponding SCSI command, and modifies
149; the source and destination pointers for the MOVE MEMORY
150; instruction which is executed when reselected_ok is called
151; to remove the command from the list. Similarly, DSA is
152; loaded with the address of the next DSA structure and
153; reselected_check_next is called if a failure occurs.
154;
155; Perhaps more concisely, the net effect of the mess is
156;
157; for (dsa = reconnect_dsa_head, dest = &reconnect_dsa_head,
158; src = NULL; dsa; dest = &dsa->next, dsa = dsa->next) {
159; src = &dsa->next;
160; if (target_id == dsa->id && target_lun == dsa->lun) {
161; *dest = *src;
162; break;
163; }
164; }
165;
166; if (!dsa)
167; error (int_err_unexpected_reselect);
168; else
169; longjmp (dsa->jump_resume, 0);
170;
171;
172
173#if (CHIP != 700) && (CHIP != 70066)
174; Define DSA structure used for mailboxes
175ENTRY dsa_code_template
176dsa_code_template:
177ENTRY dsa_code_begin
178dsa_code_begin:
179; RGH: Don't care about TEMP and DSA here
180 DMODE_MEMORY_TO_NCR
181 MOVE MEMORY 4, dsa_temp_addr_dsa_value, addr_scratch
182 DMODE_MEMORY_TO_MEMORY
183#if (CHIP == 710)
184 MOVE MEMORY 4, addr_scratch, saved_dsa
185 ; We are about to go and select the device, so must set SSCF bits
186 MOVE MEMORY 4, dsa_sscf_710, addr_scratch
187#ifdef BIG_ENDIAN
188 MOVE SCRATCH3 TO SFBR
189#else
190 MOVE SCRATCH0 TO SFBR
191#endif
192 MOVE SFBR TO SBCL
193 MOVE MEMORY 4, saved_dsa, addr_dsa
194#else
195 CALL scratch_to_dsa
196#endif
197 CALL select
198; Handle the phase mismatch which may have resulted from the
199; MOVE FROM dsa_msgout if we returned here. The CLEAR ATN
200; may or may not be necessary, and we should update script_asm.pl
201; to handle multiple pieces.
202 CLEAR ATN
203 CLEAR ACK
204
205; Replace second operand with address of JUMP instruction dest operand
206; in schedule table for this DSA. Becomes dsa_jump_dest in 53c7,8xx.c.
207ENTRY dsa_code_fix_jump
208dsa_code_fix_jump:
209 MOVE MEMORY 4, NOP_insn, 0
210 JUMP select_done
211
212; wrong_dsa loads the DSA register with the value of the dsa_next
213; field.
214;
215wrong_dsa:
216#if (CHIP == 710)
217; NOTE DSA is corrupt when we arrive here!
218#endif
219; Patch the MOVE MEMORY INSTRUCTION such that
220; the destination address is the address of the OLD
221; next pointer.
222;
223 MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok_patch + 8
224 DMODE_MEMORY_TO_NCR
225;
226; Move the _contents_ of the next pointer into the DSA register as
227; the next I_T_L or I_T_L_Q tupple to check against the established
228; nexus.
229;
230 MOVE MEMORY 4, dsa_temp_next, addr_scratch
231 DMODE_MEMORY_TO_MEMORY
232#if (CHIP == 710)
233 MOVE MEMORY 4, addr_scratch, saved_dsa
234 MOVE MEMORY 4, saved_dsa, addr_dsa
235#else
236 CALL scratch_to_dsa
237#endif
238 JUMP reselected_check_next
239
240ABSOLUTE dsa_save_data_pointer = 0
241ENTRY dsa_code_save_data_pointer
242dsa_code_save_data_pointer:
243#if (CHIP == 710)
244 ; When we get here, TEMP has been saved in jump_temp+4, DSA is corrupt
245 ; We MUST return with DSA correct
246 MOVE MEMORY 4, jump_temp+4, dsa_temp_addr_saved_pointer
247; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
248 MOVE MEMORY 24, dsa_temp_addr_residual, dsa_temp_addr_saved_residual
249 CLEAR ACK
250#ifdef DEBUG
251 INT int_debug_saved
252#endif
253 MOVE MEMORY 4, saved_dsa, addr_dsa
254 JUMP jump_temp
255#else
256 DMODE_NCR_TO_MEMORY
257 MOVE MEMORY 4, addr_temp, dsa_temp_addr_saved_pointer
258 DMODE_MEMORY_TO_MEMORY
259; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
260 MOVE MEMORY 24, dsa_temp_addr_residual, dsa_temp_addr_saved_residual
261 CLEAR ACK
262#ifdef DEBUG
263 INT int_debug_saved
264#endif
265 RETURN
266#endif
267ABSOLUTE dsa_restore_pointers = 0
268ENTRY dsa_code_restore_pointers
269dsa_code_restore_pointers:
270#if (CHIP == 710)
271 ; TEMP and DSA are corrupt when we get here, but who cares!
272 MOVE MEMORY 4, dsa_temp_addr_saved_pointer, jump_temp + 4
273; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
274 MOVE MEMORY 24, dsa_temp_addr_saved_residual, dsa_temp_addr_residual
275 CLEAR ACK
276 ; Restore DSA, note we don't care about TEMP
277 MOVE MEMORY 4, saved_dsa, addr_dsa
278#ifdef DEBUG
279 INT int_debug_restored
280#endif
281 JUMP jump_temp
282#else
283 DMODE_MEMORY_TO_NCR
284 MOVE MEMORY 4, dsa_temp_addr_saved_pointer, addr_temp
285 DMODE_MEMORY_TO_MEMORY
286; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
287 MOVE MEMORY 24, dsa_temp_addr_saved_residual, dsa_temp_addr_residual
288 CLEAR ACK
289#ifdef DEBUG
290 INT int_debug_restored
291#endif
292 RETURN
293#endif
294
295ABSOLUTE dsa_check_reselect = 0
296; dsa_check_reselect determines whether or not the current target and
297; lun match the current DSA
298ENTRY dsa_code_check_reselect
299dsa_code_check_reselect:
300#if (CHIP == 710)
301 /* Arrives here with DSA correct */
302 /* Assumes we are always ID 7 */
303 MOVE LCRC TO SFBR ; LCRC has our ID and his ID bits set
304 JUMP REL (wrong_dsa), IF NOT dsa_temp_target, AND MASK 0x80
305#else
306 MOVE SSID TO SFBR ; SSID contains 3 bit target ID
307; FIXME : we need to accommodate bit fielded and binary here for '7xx/'8xx chips
308 JUMP REL (wrong_dsa), IF NOT dsa_temp_target, AND MASK 0xf8
309#endif
310;
311; Hack - move to scratch first, since SFBR is not writeable
312; via the CPU and hence a MOVE MEMORY instruction.
313;
314 DMODE_MEMORY_TO_NCR
315 MOVE MEMORY 1, reselected_identify, addr_scratch
316 DMODE_MEMORY_TO_MEMORY
317#ifdef BIG_ENDIAN
318 ; BIG ENDIAN ON MVME16x
319 MOVE SCRATCH3 TO SFBR
320#else
321 MOVE SCRATCH0 TO SFBR
322#endif
323; FIXME : we need to accommodate bit fielded and binary here for '7xx/'8xx chips
324; Are you sure about that? richard@sleepie.demon.co.uk
325 JUMP REL (wrong_dsa), IF NOT dsa_temp_lun, AND MASK 0xf8
326; Patch the MOVE MEMORY INSTRUCTION such that
327; the source address is the address of this dsa's
328; next pointer.
329 MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok_patch + 4
330 CALL reselected_ok
331#if (CHIP == 710)
332; Restore DSA following memory moves in reselected_ok
333; dsa_temp_sync doesn't really care about DSA, but it has an
334; optional debug INT so a valid DSA is a good idea.
335 MOVE MEMORY 4, saved_dsa, addr_dsa
336#endif
337 CALL dsa_temp_sync
338; Release ACK on the IDENTIFY message _after_ we've set the synchronous
339; transfer parameters!
340 CLEAR ACK
341; Implicitly restore pointers on reselection, so a RETURN
342; will transfer control back to the right spot.
343 CALL REL (dsa_code_restore_pointers)
344 RETURN
345ENTRY dsa_zero
346dsa_zero:
347ENTRY dsa_code_template_end
348dsa_code_template_end:
349
350; Perform sanity check for dsa_fields_start == dsa_code_template_end -
351; dsa_zero, puke.
352
353ABSOLUTE dsa_fields_start = 0 ; Sanity marker
354 ; pad 48 bytes (fix this RSN)
355ABSOLUTE dsa_next = 48 ; len 4 Next DSA
356 ; del 4 Previous DSA address
357ABSOLUTE dsa_cmnd = 56 ; len 4 Scsi_Cmnd * for this thread.
358ABSOLUTE dsa_select = 60 ; len 4 Device ID, Period, Offset for
359 ; table indirect select
360ABSOLUTE dsa_msgout = 64 ; len 8 table indirect move parameter for
361 ; select message
362ABSOLUTE dsa_cmdout = 72 ; len 8 table indirect move parameter for
363 ; command
364ABSOLUTE dsa_dataout = 80 ; len 4 code pointer for dataout
365ABSOLUTE dsa_datain = 84 ; len 4 code pointer for datain
366ABSOLUTE dsa_msgin = 88 ; len 8 table indirect move for msgin
367ABSOLUTE dsa_status = 96 ; len 8 table indirect move for status byte
368ABSOLUTE dsa_msgout_other = 104 ; len 8 table indirect for normal message out
369 ; (Synchronous transfer negotiation, etc).
370ABSOLUTE dsa_end = 112
371
372ABSOLUTE schedule = 0 ; Array of JUMP dsa_begin or JUMP (next),
373 ; terminated by a call to JUMP wait_reselect
374
375; Linked lists of DSA structures
376ABSOLUTE reconnect_dsa_head = 0 ; Link list of DSAs which can reconnect
377ABSOLUTE addr_reconnect_dsa_head = 0 ; Address of variable containing
378 ; address of reconnect_dsa_head
379
380; These select the source and destination of a MOVE MEMORY instruction
381ABSOLUTE dmode_memory_to_memory = 0x0
382ABSOLUTE dmode_memory_to_ncr = 0x0
383ABSOLUTE dmode_ncr_to_memory = 0x0
384
385ABSOLUTE addr_scratch = 0x0
386ABSOLUTE addr_temp = 0x0
387#if (CHIP == 710)
388ABSOLUTE saved_dsa = 0x0
389ABSOLUTE emulfly = 0x0
390ABSOLUTE addr_dsa = 0x0
391#endif
392#endif /* CHIP != 700 && CHIP != 70066 */
393
394; Interrupts -
395; MSB indicates type
396; 0 handle error condition
397; 1 handle message
398; 2 handle normal condition
399; 3 debugging interrupt
400; 4 testing interrupt
401; Next byte indicates specific error
402
403; XXX not yet implemented, I'm not sure if I want to -
404; Next byte indicates the routine the error occurred in
405; The LSB indicates the specific place the error occurred
406
407ABSOLUTE int_err_unexpected_phase = 0x00000000 ; Unexpected phase encountered
408ABSOLUTE int_err_selected = 0x00010000 ; SELECTED (nee RESELECTED)
409ABSOLUTE int_err_unexpected_reselect = 0x00020000
410ABSOLUTE int_err_check_condition = 0x00030000
411ABSOLUTE int_err_no_phase = 0x00040000
412ABSOLUTE int_msg_wdtr = 0x01000000 ; WDTR message received
413ABSOLUTE int_msg_sdtr = 0x01010000 ; SDTR received
414ABSOLUTE int_msg_1 = 0x01020000 ; single byte special message
415 ; received
416
417ABSOLUTE int_norm_select_complete = 0x02000000 ; Select complete, reprogram
418 ; registers.
419ABSOLUTE int_norm_reselect_complete = 0x02010000 ; Nexus established
420ABSOLUTE int_norm_command_complete = 0x02020000 ; Command complete
421ABSOLUTE int_norm_disconnected = 0x02030000 ; Disconnected
422ABSOLUTE int_norm_aborted =0x02040000 ; Aborted *dsa
423ABSOLUTE int_norm_reset = 0x02050000 ; Generated BUS reset.
424ABSOLUTE int_norm_emulateintfly = 0x02060000 ; 53C710 Emulated intfly
425ABSOLUTE int_debug_break = 0x03000000 ; Break point
426#ifdef DEBUG
427ABSOLUTE int_debug_scheduled = 0x03010000 ; new I/O scheduled
428ABSOLUTE int_debug_idle = 0x03020000 ; scheduler is idle
429ABSOLUTE int_debug_dsa_loaded = 0x03030000 ; dsa reloaded
430ABSOLUTE int_debug_reselected = 0x03040000 ; NCR reselected
431ABSOLUTE int_debug_head = 0x03050000 ; issue head overwritten
432ABSOLUTE int_debug_disconnected = 0x03060000 ; disconnected
433ABSOLUTE int_debug_disconnect_msg = 0x03070000 ; got message to disconnect
434ABSOLUTE int_debug_dsa_schedule = 0x03080000 ; in dsa_schedule
435ABSOLUTE int_debug_reselect_check = 0x03090000 ; Check for reselection of DSA
436ABSOLUTE int_debug_reselected_ok = 0x030a0000 ; Reselection accepted
437#endif
438ABSOLUTE int_debug_panic = 0x030b0000 ; Panic driver
439#ifdef DEBUG
440ABSOLUTE int_debug_saved = 0x030c0000 ; save/restore pointers
441ABSOLUTE int_debug_restored = 0x030d0000
442ABSOLUTE int_debug_sync = 0x030e0000 ; Sanity check synchronous
443 ; parameters.
444ABSOLUTE int_debug_datain = 0x030f0000 ; going into data in phase
445 ; now.
446ABSOLUTE int_debug_check_dsa = 0x03100000 ; Sanity check DSA against
447 ; SDID.
448#endif
449
450ABSOLUTE int_test_1 = 0x04000000 ; Test 1 complete
451ABSOLUTE int_test_2 = 0x04010000 ; Test 2 complete
452ABSOLUTE int_test_3 = 0x04020000 ; Test 3 complete
453
454
455; These should start with 0x05000000, with low bits incrementing for
456; each one.
457
458#ifdef EVENTS
459ABSOLUTE int_EVENT_SELECT = 0
460ABSOLUTE int_EVENT_DISCONNECT = 0
461ABSOLUTE int_EVENT_RESELECT = 0
462ABSOLUTE int_EVENT_COMPLETE = 0
463ABSOLUTE int_EVENT_IDLE = 0
464ABSOLUTE int_EVENT_SELECT_FAILED = 0
465ABSOLUTE int_EVENT_BEFORE_SELECT = 0
466ABSOLUTE int_EVENT_RESELECT_FAILED = 0
467#endif
468
469ABSOLUTE NCR53c7xx_msg_abort = 0 ; Pointer to abort message
470ABSOLUTE NCR53c7xx_msg_reject = 0 ; Pointer to reject message
471ABSOLUTE NCR53c7xx_zero = 0 ; long with zero in it, use for source
472ABSOLUTE NCR53c7xx_sink = 0 ; long to dump worthless data in
473ABSOLUTE NOP_insn = 0 ; NOP instruction
474
475; Pointer to message, potentially multi-byte
476ABSOLUTE msg_buf = 0
477
478; Pointer to holding area for reselection information
479ABSOLUTE reselected_identify = 0
480ABSOLUTE reselected_tag = 0
481
482; Request sense command pointer, it's a 6 byte command, should
483; be constant for all commands since we always want 16 bytes of
484; sense and we don't need to change any fields as we did under
485; SCSI-I when we actually cared about the LUN field.
486;EXTERNAL NCR53c7xx_sense ; Request sense command
487
488#if (CHIP != 700) && (CHIP != 70066)
489; dsa_schedule
490; PURPOSE : after a DISCONNECT message has been received, and pointers
491; saved, insert the current DSA structure at the head of the
492; disconnected queue and fall through to the scheduler.
493;
494; CALLS : OK
495;
496; INPUTS : dsa - current DSA structure, reconnect_dsa_head - list
497; of disconnected commands
498;
499; MODIFIES : SCRATCH, reconnect_dsa_head
500;
501; EXITS : always passes control to schedule
502
503ENTRY dsa_schedule
504dsa_schedule:
505#ifdef DEBUG
506 INT int_debug_dsa_schedule
507#endif
508
509;
510; Calculate the address of the next pointer within the DSA
511; structure of the command that is currently disconnecting
512;
513#if (CHIP == 710)
514 ; Read what should be the current DSA from memory - actual DSA
515 ; register is probably corrupt
516 MOVE MEMORY 4, saved_dsa, addr_scratch
517#else
518 CALL dsa_to_scratch
519#endif
520 MOVE SCRATCH0 + dsa_next TO SCRATCH0
521 MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
522 MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
523 MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
524
525; Point the next field of this DSA structure at the current disconnected
526; list
527 DMODE_NCR_TO_MEMORY
528 MOVE MEMORY 4, addr_scratch, dsa_schedule_insert + 8
529 DMODE_MEMORY_TO_MEMORY
530dsa_schedule_insert:
531 MOVE MEMORY 4, reconnect_dsa_head, 0
532
533; And update the head pointer.
534#if (CHIP == 710)
535 ; Read what should be the current DSA from memory - actual DSA
536 ; register is probably corrupt
537 MOVE MEMORY 4, saved_dsa, addr_scratch
538#else
539 CALL dsa_to_scratch
540#endif
541 DMODE_NCR_TO_MEMORY
542 MOVE MEMORY 4, addr_scratch, reconnect_dsa_head
543 DMODE_MEMORY_TO_MEMORY
544/* Temporarily, see what happens. */
545#ifndef ORIGINAL
546#if (CHIP != 710)
547 MOVE SCNTL2 & 0x7f TO SCNTL2
548#endif
549 CLEAR ACK
550#endif
551#if (CHIP == 710)
552 ; Time to correct DSA following memory move
553 MOVE MEMORY 4, saved_dsa, addr_dsa
554#endif
555 WAIT DISCONNECT
556#ifdef EVENTS
557 INT int_EVENT_DISCONNECT;
558#endif
559#ifdef DEBUG
560 INT int_debug_disconnected
561#endif
562 JUMP schedule
563#endif
564
565;
566; select
567;
568; PURPOSE : establish a nexus for the SCSI command referenced by DSA.
569; On success, the current DSA structure is removed from the issue
570; queue. Usually, this is entered as a fall-through from schedule,
571; although the contingent allegiance handling code will write
572; the select entry address to the DSP to restart a command as a
573; REQUEST SENSE. A message is sent (usually IDENTIFY, although
574; additional SDTR or WDTR messages may be sent). COMMAND OUT
575; is handled.
576;
577; INPUTS : DSA - SCSI command, issue_dsa_head
578;
579; CALLS : NOT OK
580;
581; MODIFIES : SCRATCH, issue_dsa_head
582;
583; EXITS : on reselection or selection, go to select_failed
584; otherwise, RETURN so control is passed back to
585; dsa_begin.
586;
587
588ENTRY select
589select:
590
591#ifdef EVENTS
592 INT int_EVENT_BEFORE_SELECT
593#endif
594
595#ifdef DEBUG
596 INT int_debug_scheduled
597#endif
598 CLEAR TARGET
599
600; XXX
601;
602; In effect, SELECTION operations are backgrounded, with execution
603; continuing until code which waits for REQ or a fatal interrupt is
604; encountered.
605;
606; So, for more performance, we could overlap the code which removes
607; the command from the NCRs issue queue with the selection, but
608; at this point I don't want to deal with the error recovery.
609;
610
611#if (CHIP != 700) && (CHIP != 70066)
612#if (CHIP == 710)
613 ; Enable selection timer
614#ifdef NO_SELECTION_TIMEOUT
615 MOVE CTEST7 & 0xff TO CTEST7
616#else
617 MOVE CTEST7 & 0xef TO CTEST7
618#endif
619#endif
620 SELECT ATN FROM dsa_select, select_failed
621 JUMP select_msgout, WHEN MSG_OUT
622ENTRY select_msgout
623select_msgout:
624#if (CHIP == 710)
625 ; Disable selection timer
626 MOVE CTEST7 | 0x10 TO CTEST7
627#endif
628 MOVE FROM dsa_msgout, WHEN MSG_OUT
629#else
630ENTRY select_msgout
631 SELECT ATN 0, select_failed
632select_msgout:
633 MOVE 0, 0, WHEN MSGOUT
634#endif
635
636#ifdef EVENTS
637 INT int_EVENT_SELECT
638#endif
639 RETURN
640
641;
642; select_done
643;
644; PURPOSE: continue on to normal data transfer; called as the exit
645; point from dsa_begin.
646;
647; INPUTS: dsa
648;
649; CALLS: OK
650;
651;
652
653select_done:
654#if (CHIP == 710)
655; NOTE DSA is corrupt when we arrive here!
656 MOVE MEMORY 4, saved_dsa, addr_dsa
657#endif
658
659#ifdef DEBUG
660ENTRY select_check_dsa
661select_check_dsa:
662 INT int_debug_check_dsa
663#endif
664
665; After a successful selection, we should get either a CMD phase or
666; some transfer request negotiation message.
667
668 JUMP cmdout, WHEN CMD
669 INT int_err_unexpected_phase, WHEN NOT MSG_IN
670
671select_msg_in:
672 CALL msg_in, WHEN MSG_IN
673 JUMP select_msg_in, WHEN MSG_IN
674
675cmdout:
676 INT int_err_unexpected_phase, WHEN NOT CMD
677#if (CHIP == 700)
678 INT int_norm_selected
679#endif
680ENTRY cmdout_cmdout
681cmdout_cmdout:
682#if (CHIP != 700) && (CHIP != 70066)
683 MOVE FROM dsa_cmdout, WHEN CMD
684#else
685 MOVE 0, 0, WHEN CMD
686#endif /* (CHIP != 700) && (CHIP != 70066) */
687
688;
689; data_transfer
690; other_out
691; other_in
692; other_transfer
693;
694; PURPOSE : handle the main data transfer for a SCSI command in
695; several parts. In the first part, data_transfer, DATA_IN
696; and DATA_OUT phases are allowed, with the user provided
697; code (usually dynamically generated based on the scatter/gather
698; list associated with a SCSI command) called to handle these
699; phases.
700;
701; After control has passed to one of the user provided
702; DATA_IN or DATA_OUT routines, back calls are made to
703; other_transfer_in or other_transfer_out to handle non-DATA IN
704; and DATA OUT phases respectively, with the state of the active
705; data pointer being preserved in TEMP.
706;
707; On completion, the user code passes control to other_transfer
708; which causes DATA_IN and DATA_OUT to result in unexpected_phase
709; interrupts so that data overruns may be trapped.
710;
711; INPUTS : DSA - SCSI command
712;
713; CALLS : OK in data_transfer_start, not ok in other_out and other_in, ok in
714; other_transfer
715;
716; MODIFIES : SCRATCH
717;
718; EXITS : if STATUS IN is detected, signifying command completion,
719; the NCR jumps to command_complete. If MSG IN occurs, a
720; CALL is made to msg_in. Otherwise, other_transfer runs in
721; an infinite loop.
722;
723
724ENTRY data_transfer
725data_transfer:
726 JUMP cmdout_cmdout, WHEN CMD
727 CALL msg_in, WHEN MSG_IN
728 INT int_err_unexpected_phase, WHEN MSG_OUT
729 JUMP do_dataout, WHEN DATA_OUT
730 JUMP do_datain, WHEN DATA_IN
731 JUMP command_complete, WHEN STATUS
732 JUMP data_transfer
733ENTRY end_data_transfer
734end_data_transfer:
735
736;
737; FIXME: On NCR53c700 and NCR53c700-66 chips, do_dataout/do_datain
738; should be fixed up whenever the nexus changes so it can point to the
739; correct routine for that command.
740;
741
742#if (CHIP != 700) && (CHIP != 70066)
743; Nasty jump to dsa->dataout
744do_dataout:
745#if (CHIP == 710)
746 MOVE MEMORY 4, saved_dsa, addr_scratch
747#else
748 CALL dsa_to_scratch
749#endif
750 MOVE SCRATCH0 + dsa_dataout TO SCRATCH0
751 MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
752 MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
753 MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
754 DMODE_NCR_TO_MEMORY
755 MOVE MEMORY 4, addr_scratch, dataout_to_jump + 4
756 DMODE_MEMORY_TO_MEMORY
757dataout_to_jump:
758 MOVE MEMORY 4, 0, dataout_jump + 4
759#if (CHIP == 710)
760 ; Time to correct DSA following memory move
761 MOVE MEMORY 4, saved_dsa, addr_dsa
762#endif
763dataout_jump:
764 JUMP 0
765
766; Nasty jump to dsa->dsain
767do_datain:
768#if (CHIP == 710)
769 MOVE MEMORY 4, saved_dsa, addr_scratch
770#else
771 CALL dsa_to_scratch
772#endif
773 MOVE SCRATCH0 + dsa_datain TO SCRATCH0
774 MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
775 MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
776 MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
777 DMODE_NCR_TO_MEMORY
778 MOVE MEMORY 4, addr_scratch, datain_to_jump + 4
779 DMODE_MEMORY_TO_MEMORY
780ENTRY datain_to_jump
781datain_to_jump:
782 MOVE MEMORY 4, 0, datain_jump + 4
783#if (CHIP == 710)
784 ; Time to correct DSA following memory move
785 MOVE MEMORY 4, saved_dsa, addr_dsa
786#endif
787#ifdef DEBUG
788 INT int_debug_datain
789#endif
790datain_jump:
791 JUMP 0
792#endif /* (CHIP != 700) && (CHIP != 70066) */
793
794
795; Note that other_out and other_in loop until a non-data phase
796; is discovered, so we only execute return statements when we
797; can go on to the next data phase block move statement.
798
799ENTRY other_out
800other_out:
801#if 0
802 INT 0x03ffdead
803#endif
804 INT int_err_unexpected_phase, WHEN CMD
805 JUMP msg_in_restart, WHEN MSG_IN
806 INT int_err_unexpected_phase, WHEN MSG_OUT
807 INT int_err_unexpected_phase, WHEN DATA_IN
808 JUMP command_complete, WHEN STATUS
809 JUMP other_out, WHEN NOT DATA_OUT
810#if (CHIP == 710)
811; TEMP should be OK, as we got here from a call in the user dataout code.
812#endif
813 RETURN
814
815ENTRY other_in
816other_in:
817#if 0
818 INT 0x03ffdead
819#endif
820 INT int_err_unexpected_phase, WHEN CMD
821 JUMP msg_in_restart, WHEN MSG_IN
822 INT int_err_unexpected_phase, WHEN MSG_OUT
823 INT int_err_unexpected_phase, WHEN DATA_OUT
824 JUMP command_complete, WHEN STATUS
825 JUMP other_in, WHEN NOT DATA_IN
826#if (CHIP == 710)
827; TEMP should be OK, as we got here from a call in the user datain code.
828#endif
829 RETURN
830
831
832ENTRY other_transfer
833other_transfer:
834 INT int_err_unexpected_phase, WHEN CMD
835 CALL msg_in, WHEN MSG_IN
836 INT int_err_unexpected_phase, WHEN MSG_OUT
837 INT int_err_unexpected_phase, WHEN DATA_OUT
838 INT int_err_unexpected_phase, WHEN DATA_IN
839 JUMP command_complete, WHEN STATUS
840 JUMP other_transfer
841
842;
843; msg_in_restart
844; msg_in
845; munge_msg
846;
847; PURPOSE : process messages from a target. msg_in is called when the
848; caller hasn't read the first byte of the message. munge_message
849; is called when the caller has read the first byte of the message,
850; and left it in SFBR. msg_in_restart is called when the caller
851; hasn't read the first byte of the message, and wishes RETURN
852; to transfer control back to the address of the conditional
853; CALL instruction rather than to the instruction after it.
854;
855; Various int_* interrupts are generated when the host system
856; needs to intervene, as is the case with SDTR, WDTR, and
857; INITIATE RECOVERY messages.
858;
859; When the host system handles one of these interrupts,
860; it can respond by reentering at reject_message,
861; which rejects the message and returns control to
862; the caller of msg_in or munge_msg, accept_message
863; which clears ACK and returns control, or reply_message
864; which sends the message pointed to by the DSA
865; msgout_other table indirect field.
866;
867; DISCONNECT messages are handled by moving the command
868; to the reconnect_dsa_queue.
869#if (CHIP == 710)
870; NOTE: DSA should be valid when we get here - we cannot save both it
871; and TEMP in this routine.
872#endif
873;
874; INPUTS : DSA - SCSI COMMAND, SFBR - first byte of message (munge_msg
875; only)
876;
877; CALLS : NO. The TEMP register isn't backed up to allow nested calls.
878;
879; MODIFIES : SCRATCH, DSA on DISCONNECT
880;
881; EXITS : On receipt of SAVE DATA POINTER, RESTORE POINTERS,
882; and normal return from message handlers running under
883; Linux, control is returned to the caller. Receipt
884; of DISCONNECT messages pass control to dsa_schedule.
885;
886ENTRY msg_in_restart
887msg_in_restart:
888; XXX - hackish
889;
890; Since it's easier to debug changes to the statically
891; compiled code, rather than the dynamically generated
892; stuff, such as
893;
894; MOVE x, y, WHEN data_phase
895; CALL other_z, WHEN NOT data_phase
896; MOVE x, y, WHEN data_phase
897;
898; I'd like to have certain routines (notably the message handler)
899; restart on the conditional call rather than the next instruction.
900;
901; So, subtract 8 from the return address
902
903 MOVE TEMP0 + 0xf8 TO TEMP0
904 MOVE TEMP1 + 0xff TO TEMP1 WITH CARRY
905 MOVE TEMP2 + 0xff TO TEMP2 WITH CARRY
906 MOVE TEMP3 + 0xff TO TEMP3 WITH CARRY
907
908ENTRY msg_in
909msg_in:
910 MOVE 1, msg_buf, WHEN MSG_IN
911
912munge_msg:
913 JUMP munge_extended, IF 0x01 ; EXTENDED MESSAGE
914 JUMP munge_2, IF 0x20, AND MASK 0xdf ; two byte message
915;
916; XXX - I've seen a handful of broken SCSI devices which fail to issue
917; a SAVE POINTERS message before disconnecting in the middle of
918; a transfer, assuming that the DATA POINTER will be implicitly
919; restored.
920;
921; Historically, I've often done an implicit save when the DISCONNECT
922; message is processed. We may want to consider having the option of
923; doing that here.
924;
925 JUMP munge_save_data_pointer, IF 0x02 ; SAVE DATA POINTER
926 JUMP munge_restore_pointers, IF 0x03 ; RESTORE POINTERS
927 JUMP munge_disconnect, IF 0x04 ; DISCONNECT
928 INT int_msg_1, IF 0x07 ; MESSAGE REJECT
929 INT int_msg_1, IF 0x0f ; INITIATE RECOVERY
930#ifdef EVENTS
931 INT int_EVENT_SELECT_FAILED
932#endif
933 JUMP reject_message
934
935munge_2:
936 JUMP reject_message
937;
938; The SCSI standard allows targets to recover from transient
939; error conditions by backing up the data pointer with a
940; RESTORE POINTERS message.
941;
942; So, we must save and restore the _residual_ code as well as
943; the current instruction pointer. Because of this messiness,
944; it is simpler to put dynamic code in the dsa for this and to
945; just do a simple jump down there.
946;
947
948munge_save_data_pointer:
949#if (CHIP == 710)
950 ; We have something in TEMP here, so first we must save that
951 MOVE TEMP0 TO SFBR
952 MOVE SFBR TO SCRATCH0
953 MOVE TEMP1 TO SFBR
954 MOVE SFBR TO SCRATCH1
955 MOVE TEMP2 TO SFBR
956 MOVE SFBR TO SCRATCH2
957 MOVE TEMP3 TO SFBR
958 MOVE SFBR TO SCRATCH3
959 MOVE MEMORY 4, addr_scratch, jump_temp + 4
960 ; Now restore DSA
961 MOVE MEMORY 4, saved_dsa, addr_dsa
962#endif
963 MOVE DSA0 + dsa_save_data_pointer TO SFBR
964 MOVE SFBR TO SCRATCH0
965 MOVE DSA1 + 0xff TO SFBR WITH CARRY
966 MOVE SFBR TO SCRATCH1
967 MOVE DSA2 + 0xff TO SFBR WITH CARRY
968 MOVE SFBR TO SCRATCH2
969 MOVE DSA3 + 0xff TO SFBR WITH CARRY
970 MOVE SFBR TO SCRATCH3
971
972 DMODE_NCR_TO_MEMORY
973 MOVE MEMORY 4, addr_scratch, jump_dsa_save + 4
974 DMODE_MEMORY_TO_MEMORY
975jump_dsa_save:
976 JUMP 0
977
978munge_restore_pointers:
979#if (CHIP == 710)
980 ; The code at dsa_restore_pointers will RETURN, but we don't care
981 ; about TEMP here, as it will overwrite it anyway.
982#endif
983 MOVE DSA0 + dsa_restore_pointers TO SFBR
984 MOVE SFBR TO SCRATCH0
985 MOVE DSA1 + 0xff TO SFBR WITH CARRY
986 MOVE SFBR TO SCRATCH1
987 MOVE DSA2 + 0xff TO SFBR WITH CARRY
988 MOVE SFBR TO SCRATCH2
989 MOVE DSA3 + 0xff TO SFBR WITH CARRY
990 MOVE SFBR TO SCRATCH3
991
992 DMODE_NCR_TO_MEMORY
993 MOVE MEMORY 4, addr_scratch, jump_dsa_restore + 4
994 DMODE_MEMORY_TO_MEMORY
995jump_dsa_restore:
996 JUMP 0
997
998
999munge_disconnect:
1000#ifdef DEBUG
1001 INT int_debug_disconnect_msg
1002#endif
1003
1004/*
1005 * Before, we overlapped processing with waiting for disconnect, but
1006 * debugging was beginning to appear messy. Temporarily move things
1007 * to just before the WAIT DISCONNECT.
1008 */
1009
1010#ifdef ORIGINAL
1011#if (CHIP == 710)
1012; Following clears Unexpected Disconnect bit. What do we do?
1013#else
1014 MOVE SCNTL2 & 0x7f TO SCNTL2
1015#endif
1016 CLEAR ACK
1017#endif
1018
1019#if (CHIP != 700) && (CHIP != 70066)
1020 JUMP dsa_schedule
1021#else
1022 WAIT DISCONNECT
1023 INT int_norm_disconnected
1024#endif
1025
1026munge_extended:
1027 CLEAR ACK
1028 INT int_err_unexpected_phase, WHEN NOT MSG_IN
1029 MOVE 1, msg_buf + 1, WHEN MSG_IN
1030 JUMP munge_extended_2, IF 0x02
1031 JUMP munge_extended_3, IF 0x03
1032 JUMP reject_message
1033
1034munge_extended_2:
1035 CLEAR ACK
1036 MOVE 1, msg_buf + 2, WHEN MSG_IN
1037 JUMP reject_message, IF NOT 0x02 ; Must be WDTR
1038 CLEAR ACK
1039 MOVE 1, msg_buf + 3, WHEN MSG_IN
1040 INT int_msg_wdtr
1041
1042munge_extended_3:
1043 CLEAR ACK
1044 MOVE 1, msg_buf + 2, WHEN MSG_IN
1045 JUMP reject_message, IF NOT 0x01 ; Must be SDTR
1046 CLEAR ACK
1047 MOVE 2, msg_buf + 3, WHEN MSG_IN
1048 INT int_msg_sdtr
1049
1050ENTRY reject_message
1051reject_message:
1052 SET ATN
1053 CLEAR ACK
1054 MOVE 1, NCR53c7xx_msg_reject, WHEN MSG_OUT
1055 RETURN
1056
1057ENTRY accept_message
1058accept_message:
1059 CLEAR ATN
1060 CLEAR ACK
1061 RETURN
1062
1063ENTRY respond_message
1064respond_message:
1065 SET ATN
1066 CLEAR ACK
1067 MOVE FROM dsa_msgout_other, WHEN MSG_OUT
1068 RETURN
1069
1070;
1071; command_complete
1072;
1073; PURPOSE : handle command termination when STATUS IN is detected by reading
1074; a status byte followed by a command termination message.
1075;
1076; Normal termination results in an INTFLY instruction, and
1077; the host system can pick out which command terminated by
1078; examining the MESSAGE and STATUS buffers of all currently
1079; executing commands;
1080;
1081; Abnormal (CHECK_CONDITION) termination results in an
1082; int_err_check_condition interrupt so that a REQUEST SENSE
1083; command can be issued out-of-order so that no other command
1084; clears the contingent allegiance condition.
1085;
1086;
1087; INPUTS : DSA - command
1088;
1089; CALLS : OK
1090;
1091; EXITS : On successful termination, control is passed to schedule.
1092; On abnormal termination, the user will usually modify the
1093; DSA fields and corresponding buffers and return control
1094; to select.
1095;
1096
1097ENTRY command_complete
1098command_complete:
1099 MOVE FROM dsa_status, WHEN STATUS
1100#if (CHIP != 700) && (CHIP != 70066)
1101 MOVE SFBR TO SCRATCH0 ; Save status
1102#endif /* (CHIP != 700) && (CHIP != 70066) */
1103ENTRY command_complete_msgin
1104command_complete_msgin:
1105 MOVE FROM dsa_msgin, WHEN MSG_IN
1106; Indicate that we should be expecting a disconnect
1107#if (CHIP != 710)
1108 MOVE SCNTL2 & 0x7f TO SCNTL2
1109#else
1110 ; Above code cleared the Unexpected Disconnect bit, what do we do?
1111#endif
1112 CLEAR ACK
1113#if (CHIP != 700) && (CHIP != 70066)
1114 WAIT DISCONNECT
1115
1116;
1117; The SCSI specification states that when a UNIT ATTENTION condition
1118; is pending, as indicated by a CHECK CONDITION status message,
1119; the target shall revert to asynchronous transfers. Since
1120; synchronous transfers parameters are maintained on a per INITIATOR/TARGET
1121; basis, and returning control to our scheduler could work on a command
1122; running on another lun on that target using the old parameters, we must
1123; interrupt the host processor to get them changed, or change them ourselves.
1124;
1125; Once SCSI-II tagged queueing is implemented, things will be even more
1126; hairy, since contingent allegiance conditions exist on a per-target/lun
1127; basis, and issuing a new command with a different tag would clear it.
1128; In these cases, we must interrupt the host processor to get a request
1129; added to the HEAD of the queue with the request sense command, or we
1130; must automatically issue the request sense command.
1131
1132#if 0
1133 MOVE SCRATCH0 TO SFBR
1134 JUMP command_failed, IF 0x02
1135#endif
1136#if (CHIP == 710)
1137#if defined(MVME16x_INTFLY)
1138; For MVME16x (ie CHIP=710) we will force an INTFLY by triggering a software
1139; interrupt (SW7). We can use SCRATCH, as we are about to jump to
1140; schedule, which corrupts it anyway. Will probably remove this later,
1141; but want to check performance effects first.
1142
1143#define INTFLY_ADDR 0xfff40070
1144
1145 MOVE 0 TO SCRATCH0
1146 MOVE 0x80 TO SCRATCH1
1147 MOVE 0 TO SCRATCH2
1148 MOVE 0 TO SCRATCH3
1149 MOVE MEMORY 4, addr_scratch, INTFLY_ADDR
1150#else
1151 INT int_norm_emulateintfly
1152#endif
1153#else
1154 INTFLY
1155#endif
1156#endif /* (CHIP != 700) && (CHIP != 70066) */
1157#if (CHIP == 710)
1158 ; Time to correct DSA following memory move
1159 MOVE MEMORY 4, saved_dsa, addr_dsa
1160#endif
1161#ifdef EVENTS
1162 INT int_EVENT_COMPLETE
1163#endif
1164#if (CHIP != 700) && (CHIP != 70066)
1165 JUMP schedule
1166command_failed:
1167 INT int_err_check_condition
1168#else
1169 INT int_norm_command_complete
1170#endif
1171
1172;
1173; wait_reselect
1174;
1175; PURPOSE : This is essentially the idle routine, where control lands
1176; when there are no new processes to schedule. wait_reselect
1177; waits for reselection, selection, and new commands.
1178;
1179; When a successful reselection occurs, with the aid
1180; of fixed up code in each DSA, wait_reselect walks the
1181; reconnect_dsa_queue, asking each dsa if the target ID
1182; and LUN match its.
1183;
1184; If a match is found, a call is made back to reselected_ok,
1185; which through the miracles of self modifying code, extracts
1186; the found DSA from the reconnect_dsa_queue and then
1187; returns control to the DSAs thread of execution.
1188;
1189; INPUTS : NONE
1190;
1191; CALLS : OK
1192;
1193; MODIFIES : DSA,
1194;
1195; EXITS : On successful reselection, control is returned to the
1196; DSA which called reselected_ok. If the WAIT RESELECT
1197; was interrupted by a new commands arrival signaled by
1198; SIG_P, control is passed to schedule. If the NCR is
1199; selected, the host system is interrupted with an
1200; int_err_selected which is usually responded to by
1201; setting DSP to the target_abort address.
1202
1203ENTRY wait_reselect
1204wait_reselect:
1205#ifdef EVENTS
1206 int int_EVENT_IDLE
1207#endif
1208#ifdef DEBUG
1209 int int_debug_idle
1210#endif
1211 WAIT RESELECT wait_reselect_failed
1212
1213reselected:
1214#ifdef EVENTS
1215 int int_EVENT_RESELECT
1216#endif
1217 CLEAR TARGET
1218 DMODE_MEMORY_TO_MEMORY
1219 ; Read all data needed to reestablish the nexus -
1220 MOVE 1, reselected_identify, WHEN MSG_IN
1221 ; We used to CLEAR ACK here.
1222#if (CHIP != 700) && (CHIP != 70066)
1223#ifdef DEBUG
1224 int int_debug_reselected
1225#endif
1226
1227 ; Point DSA at the current head of the disconnected queue.
1228 DMODE_MEMORY_TO_NCR
1229 MOVE MEMORY 4, reconnect_dsa_head, addr_scratch
1230 DMODE_MEMORY_TO_MEMORY
1231#if (CHIP == 710)
1232 MOVE MEMORY 4, addr_scratch, saved_dsa
1233#else
1234 CALL scratch_to_dsa
1235#endif
1236
1237 ; Fix the update-next pointer so that the reconnect_dsa_head
1238 ; pointer is the one that will be updated if this DSA is a hit
1239 ; and we remove it from the queue.
1240
1241 MOVE MEMORY 4, addr_reconnect_dsa_head, reselected_ok_patch + 8
1242#if (CHIP == 710)
1243 ; Time to correct DSA following memory move
1244 MOVE MEMORY 4, saved_dsa, addr_dsa
1245#endif
1246
1247ENTRY reselected_check_next
1248reselected_check_next:
1249#ifdef DEBUG
1250 INT int_debug_reselect_check
1251#endif
1252 ; Check for a NULL pointer.
1253 MOVE DSA0 TO SFBR
1254 JUMP reselected_not_end, IF NOT 0
1255 MOVE DSA1 TO SFBR
1256 JUMP reselected_not_end, IF NOT 0
1257 MOVE DSA2 TO SFBR
1258 JUMP reselected_not_end, IF NOT 0
1259 MOVE DSA3 TO SFBR
1260 JUMP reselected_not_end, IF NOT 0
1261 INT int_err_unexpected_reselect
1262
1263reselected_not_end:
1264 ;
1265 ; XXX the ALU is only eight bits wide, and the assembler
1266 ; wont do the dirt work for us. As long as dsa_check_reselect
1267 ; is negative, we need to sign extend with 1 bits to the full
1268 ; 32 bit width of the address.
1269 ;
1270 ; A potential work around would be to have a known alignment
1271 ; of the DSA structure such that the base address plus
1272 ; dsa_check_reselect doesn't require carrying from bytes
1273 ; higher than the LSB.
1274 ;
1275
1276 MOVE DSA0 TO SFBR
1277 MOVE SFBR + dsa_check_reselect TO SCRATCH0
1278 MOVE DSA1 TO SFBR
1279 MOVE SFBR + 0xff TO SCRATCH1 WITH CARRY
1280 MOVE DSA2 TO SFBR
1281 MOVE SFBR + 0xff TO SCRATCH2 WITH CARRY
1282 MOVE DSA3 TO SFBR
1283 MOVE SFBR + 0xff TO SCRATCH3 WITH CARRY
1284
1285 DMODE_NCR_TO_MEMORY
1286 MOVE MEMORY 4, addr_scratch, reselected_check + 4
1287 DMODE_MEMORY_TO_MEMORY
1288#if (CHIP == 710)
1289 ; Time to correct DSA following memory move
1290 MOVE MEMORY 4, saved_dsa, addr_dsa
1291#endif
1292reselected_check:
1293 JUMP 0
1294
1295
1296;
1297;
1298#if (CHIP == 710)
1299; We have problems here - the memory move corrupts TEMP and DSA. This
1300; routine is called from DSA code, and patched from many places. Scratch
1301; is probably free when it is called.
1302; We have to:
1303; copy temp to scratch, one byte at a time
1304; write scratch to patch a jump in place of the return
1305; do the move memory
1306; jump to the patched in return address
1307; DSA is corrupt when we get here, and can be left corrupt
1308
1309ENTRY reselected_ok
1310reselected_ok:
1311 MOVE TEMP0 TO SFBR
1312 MOVE SFBR TO SCRATCH0
1313 MOVE TEMP1 TO SFBR
1314 MOVE SFBR TO SCRATCH1
1315 MOVE TEMP2 TO SFBR
1316 MOVE SFBR TO SCRATCH2
1317 MOVE TEMP3 TO SFBR
1318 MOVE SFBR TO SCRATCH3
1319 MOVE MEMORY 4, addr_scratch, reselected_ok_jump + 4
1320reselected_ok_patch:
1321 MOVE MEMORY 4, 0, 0
1322reselected_ok_jump:
1323 JUMP 0
1324#else
1325ENTRY reselected_ok
1326reselected_ok:
1327reselected_ok_patch:
1328 MOVE MEMORY 4, 0, 0 ; Patched : first word
1329 ; is address of
1330 ; successful dsa_next
1331 ; Second word is last
1332 ; unsuccessful dsa_next,
1333 ; starting with
1334 ; dsa_reconnect_head
1335 ; We used to CLEAR ACK here.
1336#ifdef DEBUG
1337 INT int_debug_reselected_ok
1338#endif
1339#ifdef DEBUG
1340 INT int_debug_check_dsa
1341#endif
1342 RETURN ; Return control to where
1343#endif
1344#else
1345 INT int_norm_reselected
1346#endif /* (CHIP != 700) && (CHIP != 70066) */
1347
1348selected:
1349 INT int_err_selected;
1350
1351;
1352; A select or reselect failure can be caused by one of two conditions :
1353; 1. SIG_P was set. This will be the case if the user has written
1354; a new value to a previously NULL head of the issue queue.
1355;
1356; 2. The NCR53c810 was selected or reselected by another device.
1357;
1358; 3. The bus was already busy since we were selected or reselected
1359; before starting the command.
1360
1361wait_reselect_failed:
1362#ifdef EVENTS
1363 INT int_EVENT_RESELECT_FAILED
1364#endif
1365; Check selected bit.
1366#if (CHIP == 710)
1367 ; Must work out how to tell if we are selected....
1368#else
1369 MOVE SIST0 & 0x20 TO SFBR
1370 JUMP selected, IF 0x20
1371#endif
1372; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
1373 MOVE CTEST2 & 0x40 TO SFBR
1374 JUMP schedule, IF 0x40
1375; Check connected bit.
1376; FIXME: this needs to change if we support target mode
1377 MOVE ISTAT & 0x08 TO SFBR
1378 JUMP reselected, IF 0x08
1379; FIXME : Something bogus happened, and we shouldn't fail silently.
1380#if 0
1381 JUMP schedule
1382#else
1383 INT int_debug_panic
1384#endif
1385
1386
1387select_failed:
1388#if (CHIP == 710)
1389 ; Disable selection timer
1390 MOVE CTEST7 | 0x10 TO CTEST7
1391#endif
1392#ifdef EVENTS
1393 int int_EVENT_SELECT_FAILED
1394#endif
1395; Otherwise, mask the selected and reselected bits off SIST0
1396#if (CHIP ==710)
1397 ; Let's assume we don't get selected for now
1398 MOVE SSTAT0 & 0x10 TO SFBR
1399#else
1400 MOVE SIST0 & 0x30 TO SFBR
1401 JUMP selected, IF 0x20
1402#endif
1403 JUMP reselected, IF 0x10
1404; If SIGP is set, the user just gave us another command, and
1405; we should restart or return to the scheduler.
1406; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
1407 MOVE CTEST2 & 0x40 TO SFBR
1408 JUMP select, IF 0x40
1409; Check connected bit.
1410; FIXME: this needs to change if we support target mode
1411; FIXME: is this really necessary?
1412 MOVE ISTAT & 0x08 TO SFBR
1413 JUMP reselected, IF 0x08
1414; FIXME : Something bogus happened, and we shouldn't fail silently.
1415#if 0
1416 JUMP schedule
1417#else
1418 INT int_debug_panic
1419#endif
1420
1421;
1422; test_1
1423; test_2
1424;
1425; PURPOSE : run some verification tests on the NCR. test_1
1426; copies test_src to test_dest and interrupts the host
1427; processor, testing for cache coherency and interrupt
1428; problems in the processes.
1429;
1430; test_2 runs a command with offsets relative to the
1431; DSA on entry, and is useful for miscellaneous experimentation.
1432;
1433
1434; Verify that interrupts are working correctly and that we don't
1435; have a cache invalidation problem.
1436
1437ABSOLUTE test_src = 0, test_dest = 0
1438ENTRY test_1
1439test_1:
1440 MOVE MEMORY 4, test_src, test_dest
1441 INT int_test_1
1442
1443;
1444; Run arbitrary commands, with test code establishing a DSA
1445;
1446
1447ENTRY test_2
1448test_2:
1449 CLEAR TARGET
1450#if (CHIP == 710)
1451 ; Enable selection timer
1452#ifdef NO_SELECTION_TIMEOUT
1453 MOVE CTEST7 & 0xff TO CTEST7
1454#else
1455 MOVE CTEST7 & 0xef TO CTEST7
1456#endif
1457#endif
1458 SELECT ATN FROM 0, test_2_fail
1459 JUMP test_2_msgout, WHEN MSG_OUT
1460ENTRY test_2_msgout
1461test_2_msgout:
1462#if (CHIP == 710)
1463 ; Disable selection timer
1464 MOVE CTEST7 | 0x10 TO CTEST7
1465#endif
1466 MOVE FROM 8, WHEN MSG_OUT
1467 MOVE FROM 16, WHEN CMD
1468 MOVE FROM 24, WHEN DATA_IN
1469 MOVE FROM 32, WHEN STATUS
1470 MOVE FROM 40, WHEN MSG_IN
1471#if (CHIP != 710)
1472 MOVE SCNTL2 & 0x7f TO SCNTL2
1473#endif
1474 CLEAR ACK
1475 WAIT DISCONNECT
1476test_2_fail:
1477#if (CHIP == 710)
1478 ; Disable selection timer
1479 MOVE CTEST7 | 0x10 TO CTEST7
1480#endif
1481 INT int_test_2
1482
1483ENTRY debug_break
1484debug_break:
1485 INT int_debug_break
1486
1487;
1488; initiator_abort
1489; target_abort
1490;
1491; PURPOSE : Abort the currently established nexus from with initiator
1492; or target mode.
1493;
1494;
1495
1496ENTRY target_abort
1497target_abort:
1498 SET TARGET
1499 DISCONNECT
1500 CLEAR TARGET
1501 JUMP schedule
1502
1503ENTRY initiator_abort
1504initiator_abort:
1505 SET ATN
1506;
1507; The SCSI-I specification says that targets may go into MSG out at
1508; their leisure upon receipt of the ATN single. On all versions of the
1509; specification, we can't change phases until REQ transitions true->false,
1510; so we need to sink/source one byte of data to allow the transition.
1511;
1512; For the sake of safety, we'll only source one byte of data in all
1513; cases, but to accommodate the SCSI-I dain bramage, we'll sink an
1514; arbitrary number of bytes.
1515 JUMP spew_cmd, WHEN CMD
1516 JUMP eat_msgin, WHEN MSG_IN
1517 JUMP eat_datain, WHEN DATA_IN
1518 JUMP eat_status, WHEN STATUS
1519 JUMP spew_dataout, WHEN DATA_OUT
1520 JUMP sated
1521spew_cmd:
1522 MOVE 1, NCR53c7xx_zero, WHEN CMD
1523 JUMP sated
1524eat_msgin:
1525 MOVE 1, NCR53c7xx_sink, WHEN MSG_IN
1526 JUMP eat_msgin, WHEN MSG_IN
1527 JUMP sated
1528eat_status:
1529 MOVE 1, NCR53c7xx_sink, WHEN STATUS
1530 JUMP eat_status, WHEN STATUS
1531 JUMP sated
1532eat_datain:
1533 MOVE 1, NCR53c7xx_sink, WHEN DATA_IN
1534 JUMP eat_datain, WHEN DATA_IN
1535 JUMP sated
1536spew_dataout:
1537 MOVE 1, NCR53c7xx_zero, WHEN DATA_OUT
1538sated:
1539#if (CHIP != 710)
1540 MOVE SCNTL2 & 0x7f TO SCNTL2
1541#endif
1542 MOVE 1, NCR53c7xx_msg_abort, WHEN MSG_OUT
1543 WAIT DISCONNECT
1544 INT int_norm_aborted
1545
1546#if (CHIP != 710)
1547;
1548; dsa_to_scratch
1549; scratch_to_dsa
1550;
1551; PURPOSE :
1552; The NCR chips cannot do a move memory instruction with the DSA register
1553; as the source or destination. So, we provide a couple of subroutines
1554; that let us switch between the DSA register and scratch register.
1555;
1556; Memory moves to/from the DSPS register also don't work, but we
1557; don't use them.
1558;
1559;
1560
1561
1562dsa_to_scratch:
1563 MOVE DSA0 TO SFBR
1564 MOVE SFBR TO SCRATCH0
1565 MOVE DSA1 TO SFBR
1566 MOVE SFBR TO SCRATCH1
1567 MOVE DSA2 TO SFBR
1568 MOVE SFBR TO SCRATCH2
1569 MOVE DSA3 TO SFBR
1570 MOVE SFBR TO SCRATCH3
1571 RETURN
1572
1573scratch_to_dsa:
1574 MOVE SCRATCH0 TO SFBR
1575 MOVE SFBR TO DSA0
1576 MOVE SCRATCH1 TO SFBR
1577 MOVE SFBR TO DSA1
1578 MOVE SCRATCH2 TO SFBR
1579 MOVE SFBR TO DSA2
1580 MOVE SCRATCH3 TO SFBR
1581 MOVE SFBR TO DSA3
1582 RETURN
1583#endif
1584
1585#if (CHIP == 710)
1586; Little patched jump, used to overcome problems with TEMP getting
1587; corrupted on memory moves.
1588
1589jump_temp:
1590 JUMP 0
1591#endif
diff --git a/drivers/scsi/53c7xx_d.h_shipped b/drivers/scsi/53c7xx_d.h_shipped
deleted file mode 100644
index 21d31b08ec31..000000000000
--- a/drivers/scsi/53c7xx_d.h_shipped
+++ /dev/null
@@ -1,2874 +0,0 @@
1/* DO NOT EDIT - Generated automatically by script_asm.pl */
2static u32 SCRIPT[] = {
3/*
4
5
6
7
8
9; 53c710 driver. Modified from Drew Eckhardts driver
10; for 53c810 by Richard Hirst [richard@sleepie.demon.co.uk]
11;
12; I have left the script for the 53c8xx family in here, as it is likely
13; to be useful to see what I changed when bug hunting.
14
15; NCR 53c810 driver, main script
16; Sponsored by
17; iX Multiuser Multitasking Magazine
18; hm@ix.de
19;
20; Copyright 1993, 1994, 1995 Drew Eckhardt
21; Visionary Computing
22; (Unix and Linux consulting and custom programming)
23; drew@PoohSticks.ORG
24; +1 (303) 786-7975
25;
26; TolerANT and SCSI SCRIPTS are registered trademarks of NCR Corporation.
27;
28; PRE-ALPHA
29;
30; For more information, please consult
31;
32; NCR 53C810
33; PCI-SCSI I/O Processor
34; Data Manual
35;
36; NCR 53C710
37; SCSI I/O Processor
38; Programmers Guide
39;
40; NCR Microelectronics
41; 1635 Aeroplaza Drive
42; Colorado Springs, CO 80916
43; 1+ (719) 578-3400
44;
45; Toll free literature number
46; +1 (800) 334-5454
47;
48; IMPORTANT : This code is self modifying due to the limitations of
49; the NCR53c7,8xx series chips. Persons debugging this code with
50; the remote debugger should take this into account, and NOT set
51; breakpoints in modified instructions.
52;
53; Design:
54; The NCR53c7,8xx family of SCSI chips are busmasters with an onboard
55; microcontroller using a simple instruction set.
56;
57; So, to minimize the effects of interrupt latency, and to maximize
58; throughput, this driver offloads the practical maximum amount
59; of processing to the SCSI chip while still maintaining a common
60; structure.
61;
62; Where tradeoffs were needed between efficiency on the older
63; chips and the newer NCR53c800 series, the NCR53c800 series
64; was chosen.
65;
66; While the NCR53c700 and NCR53c700-66 lacked the facilities to fully
67; automate SCSI transfers without host processor intervention, this
68; isn't the case with the NCR53c710 and newer chips which allow
69;
70; - reads and writes to the internal registers from within the SCSI
71; scripts, allowing the SCSI SCRIPTS(tm) code to save processor
72; state so that multiple threads of execution are possible, and also
73; provide an ALU for loop control, etc.
74;
75; - table indirect addressing for some instructions. This allows
76; pointers to be located relative to the DSA ((Data Structure
77; Address) register.
78;
79; These features make it possible to implement a mailbox style interface,
80; where the same piece of code is run to handle I/O for multiple threads
81; at once minimizing our need to relocate code. Since the NCR53c700/
82; NCR53c800 series have a unique combination of features, making a
83; a standard ingoing/outgoing mailbox system, costly, I've modified it.
84;
85; - Mailboxes are a mixture of code and data. This lets us greatly
86; simplify the NCR53c810 code and do things that would otherwise
87; not be possible.
88;
89; The saved data pointer is now implemented as follows :
90;
91; Control flow has been architected such that if control reaches
92; munge_save_data_pointer, on a restore pointers message or
93; reconnection, a jump to the address formerly in the TEMP register
94; will allow the SCSI command to resume execution.
95;
96
97;
98; Note : the DSA structures must be aligned on 32 bit boundaries,
99; since the source and destination of MOVE MEMORY instructions
100; must share the same alignment and this is the alignment of the
101; NCR registers.
102;
103
104; For some systems (MVME166, for example) dmode is always the same, so don't
105; waste time writing it
106
107
108
109
110
111
112
113
114
115
116
117ABSOLUTE dsa_temp_lun = 0 ; Patch to lun for current dsa
118ABSOLUTE dsa_temp_next = 0 ; Patch to dsa next for current dsa
119ABSOLUTE dsa_temp_addr_next = 0 ; Patch to address of dsa next address
120 ; for current dsa
121ABSOLUTE dsa_temp_sync = 0 ; Patch to address of per-target
122 ; sync routine
123ABSOLUTE dsa_sscf_710 = 0 ; Patch to address of per-target
124 ; sscf value (53c710)
125ABSOLUTE dsa_temp_target = 0 ; Patch to id for current dsa
126ABSOLUTE dsa_temp_addr_saved_pointer = 0; Patch to address of per-command
127 ; saved data pointer
128ABSOLUTE dsa_temp_addr_residual = 0 ; Patch to address of per-command
129 ; current residual code
130ABSOLUTE dsa_temp_addr_saved_residual = 0; Patch to address of per-command
131 ; saved residual code
132ABSOLUTE dsa_temp_addr_new_value = 0 ; Address of value for JUMP operand
133ABSOLUTE dsa_temp_addr_array_value = 0 ; Address to copy to
134ABSOLUTE dsa_temp_addr_dsa_value = 0 ; Address of this DSA value
135
136;
137; Once a device has initiated reselection, we need to compare it
138; against the singly linked list of commands which have disconnected
139; and are pending reselection. These commands are maintained in
140; an unordered singly linked list of DSA structures, through the
141; DSA pointers at their 'centers' headed by the reconnect_dsa_head
142; pointer.
143;
144; To avoid complications in removing commands from the list,
145; I minimize the amount of expensive (at eight operations per
146; addition @ 500-600ns each) pointer operations which must
147; be done in the NCR driver by precomputing them on the
148; host processor during dsa structure generation.
149;
150; The fixed-up per DSA code knows how to recognize the nexus
151; associated with the corresponding SCSI command, and modifies
152; the source and destination pointers for the MOVE MEMORY
153; instruction which is executed when reselected_ok is called
154; to remove the command from the list. Similarly, DSA is
155; loaded with the address of the next DSA structure and
156; reselected_check_next is called if a failure occurs.
157;
158; Perhaps more concisely, the net effect of the mess is
159;
160; for (dsa = reconnect_dsa_head, dest = &reconnect_dsa_head,
161; src = NULL; dsa; dest = &dsa->next, dsa = dsa->next) {
162; src = &dsa->next;
163; if (target_id == dsa->id && target_lun == dsa->lun) {
164; *dest = *src;
165; break;
166; }
167; }
168;
169; if (!dsa)
170; error (int_err_unexpected_reselect);
171; else
172; longjmp (dsa->jump_resume, 0);
173;
174;
175
176
177; Define DSA structure used for mailboxes
178ENTRY dsa_code_template
179dsa_code_template:
180ENTRY dsa_code_begin
181dsa_code_begin:
182; RGH: Don't care about TEMP and DSA here
183
184 MOVE MEMORY 4, dsa_temp_addr_dsa_value, addr_scratch
185
186at 0x00000000 : */ 0xc0000004,0x00000000,0x00000000,
187/*
188
189
190 MOVE MEMORY 4, addr_scratch, saved_dsa
191
192at 0x00000003 : */ 0xc0000004,0x00000000,0x00000000,
193/*
194 ; We are about to go and select the device, so must set SSCF bits
195 MOVE MEMORY 4, dsa_sscf_710, addr_scratch
196
197at 0x00000006 : */ 0xc0000004,0x00000000,0x00000000,
198/*
199
200 MOVE SCRATCH3 TO SFBR
201
202at 0x00000009 : */ 0x72370000,0x00000000,
203/*
204
205
206
207 MOVE SFBR TO SBCL
208
209at 0x0000000b : */ 0x6a0b0000,0x00000000,
210/*
211 MOVE MEMORY 4, saved_dsa, addr_dsa
212
213at 0x0000000d : */ 0xc0000004,0x00000000,0x00000000,
214/*
215
216
217
218 CALL select
219
220at 0x00000010 : */ 0x88080000,0x000001f8,
221/*
222; Handle the phase mismatch which may have resulted from the
223; MOVE FROM dsa_msgout if we returned here. The CLEAR ATN
224; may or may not be necessary, and we should update script_asm.pl
225; to handle multiple pieces.
226 CLEAR ATN
227
228at 0x00000012 : */ 0x60000008,0x00000000,
229/*
230 CLEAR ACK
231
232at 0x00000014 : */ 0x60000040,0x00000000,
233/*
234
235; Replace second operand with address of JUMP instruction dest operand
236; in schedule table for this DSA. Becomes dsa_jump_dest in 53c7,8xx.c.
237ENTRY dsa_code_fix_jump
238dsa_code_fix_jump:
239 MOVE MEMORY 4, NOP_insn, 0
240
241at 0x00000016 : */ 0xc0000004,0x00000000,0x00000000,
242/*
243 JUMP select_done
244
245at 0x00000019 : */ 0x80080000,0x00000230,
246/*
247
248; wrong_dsa loads the DSA register with the value of the dsa_next
249; field.
250;
251wrong_dsa:
252
253; NOTE DSA is corrupt when we arrive here!
254
255; Patch the MOVE MEMORY INSTRUCTION such that
256; the destination address is the address of the OLD
257; next pointer.
258;
259 MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok_patch + 8
260
261at 0x0000001b : */ 0xc0000004,0x00000000,0x000007ec,
262/*
263
264;
265; Move the _contents_ of the next pointer into the DSA register as
266; the next I_T_L or I_T_L_Q tupple to check against the established
267; nexus.
268;
269 MOVE MEMORY 4, dsa_temp_next, addr_scratch
270
271at 0x0000001e : */ 0xc0000004,0x00000000,0x00000000,
272/*
273
274
275 MOVE MEMORY 4, addr_scratch, saved_dsa
276
277at 0x00000021 : */ 0xc0000004,0x00000000,0x00000000,
278/*
279 MOVE MEMORY 4, saved_dsa, addr_dsa
280
281at 0x00000024 : */ 0xc0000004,0x00000000,0x00000000,
282/*
283
284
285
286 JUMP reselected_check_next
287
288at 0x00000027 : */ 0x80080000,0x000006f0,
289/*
290
291ABSOLUTE dsa_save_data_pointer = 0
292ENTRY dsa_code_save_data_pointer
293dsa_code_save_data_pointer:
294
295 ; When we get here, TEMP has been saved in jump_temp+4, DSA is corrupt
296 ; We MUST return with DSA correct
297 MOVE MEMORY 4, jump_temp+4, dsa_temp_addr_saved_pointer
298
299at 0x00000029 : */ 0xc0000004,0x000009c8,0x00000000,
300/*
301; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
302 MOVE MEMORY 24, dsa_temp_addr_residual, dsa_temp_addr_saved_residual
303
304at 0x0000002c : */ 0xc0000018,0x00000000,0x00000000,
305/*
306 CLEAR ACK
307
308at 0x0000002f : */ 0x60000040,0x00000000,
309/*
310
311
312
313 MOVE MEMORY 4, saved_dsa, addr_dsa
314
315at 0x00000031 : */ 0xc0000004,0x00000000,0x00000000,
316/*
317 JUMP jump_temp
318
319at 0x00000034 : */ 0x80080000,0x000009c4,
320/*
321
322ABSOLUTE dsa_restore_pointers = 0
323ENTRY dsa_code_restore_pointers
324dsa_code_restore_pointers:
325
326 ; TEMP and DSA are corrupt when we get here, but who cares!
327 MOVE MEMORY 4, dsa_temp_addr_saved_pointer, jump_temp + 4
328
329at 0x00000036 : */ 0xc0000004,0x00000000,0x000009c8,
330/*
331; HARD CODED : 24 bytes needs to agree with 53c7,8xx.h
332 MOVE MEMORY 24, dsa_temp_addr_saved_residual, dsa_temp_addr_residual
333
334at 0x00000039 : */ 0xc0000018,0x00000000,0x00000000,
335/*
336 CLEAR ACK
337
338at 0x0000003c : */ 0x60000040,0x00000000,
339/*
340 ; Restore DSA, note we don't care about TEMP
341 MOVE MEMORY 4, saved_dsa, addr_dsa
342
343at 0x0000003e : */ 0xc0000004,0x00000000,0x00000000,
344/*
345
346
347
348 JUMP jump_temp
349
350at 0x00000041 : */ 0x80080000,0x000009c4,
351/*
352
353
354ABSOLUTE dsa_check_reselect = 0
355; dsa_check_reselect determines whether or not the current target and
356; lun match the current DSA
357ENTRY dsa_code_check_reselect
358dsa_code_check_reselect:
359
360
361
362 MOVE LCRC TO SFBR ; LCRC has our ID and his ID bits set
363
364at 0x00000043 : */ 0x72230000,0x00000000,
365/*
366 JUMP REL (wrong_dsa), IF NOT dsa_temp_target, AND MASK 0x80
367
368at 0x00000045 : */ 0x80848000,0x00ffff50,
369/*
370
371
372
373
374
375;
376; Hack - move to scratch first, since SFBR is not writeable
377; via the CPU and hence a MOVE MEMORY instruction.
378;
379
380 MOVE MEMORY 1, reselected_identify, addr_scratch
381
382at 0x00000047 : */ 0xc0000001,0x00000000,0x00000000,
383/*
384
385
386 ; BIG ENDIAN ON MVME16x
387 MOVE SCRATCH3 TO SFBR
388
389at 0x0000004a : */ 0x72370000,0x00000000,
390/*
391
392
393
394; FIXME : we need to accommodate bit fielded and binary here for '7xx/'8xx chips
395; Are you sure about that? richard@sleepie.demon.co.uk
396 JUMP REL (wrong_dsa), IF NOT dsa_temp_lun, AND MASK 0xf8
397
398at 0x0000004c : */ 0x8084f800,0x00ffff34,
399/*
400; Patch the MOVE MEMORY INSTRUCTION such that
401; the source address is the address of this dsa's
402; next pointer.
403 MOVE MEMORY 4, dsa_temp_addr_next, reselected_ok_patch + 4
404
405at 0x0000004e : */ 0xc0000004,0x00000000,0x000007e8,
406/*
407 CALL reselected_ok
408
409at 0x00000051 : */ 0x88080000,0x00000798,
410/*
411
412; Restore DSA following memory moves in reselected_ok
413; dsa_temp_sync doesn't really care about DSA, but it has an
414; optional debug INT so a valid DSA is a good idea.
415 MOVE MEMORY 4, saved_dsa, addr_dsa
416
417at 0x00000053 : */ 0xc0000004,0x00000000,0x00000000,
418/*
419
420 CALL dsa_temp_sync
421
422at 0x00000056 : */ 0x88080000,0x00000000,
423/*
424; Release ACK on the IDENTIFY message _after_ we've set the synchronous
425; transfer parameters!
426 CLEAR ACK
427
428at 0x00000058 : */ 0x60000040,0x00000000,
429/*
430; Implicitly restore pointers on reselection, so a RETURN
431; will transfer control back to the right spot.
432 CALL REL (dsa_code_restore_pointers)
433
434at 0x0000005a : */ 0x88880000,0x00ffff68,
435/*
436 RETURN
437
438at 0x0000005c : */ 0x90080000,0x00000000,
439/*
440ENTRY dsa_zero
441dsa_zero:
442ENTRY dsa_code_template_end
443dsa_code_template_end:
444
445; Perform sanity check for dsa_fields_start == dsa_code_template_end -
446; dsa_zero, puke.
447
448ABSOLUTE dsa_fields_start = 0 ; Sanity marker
449 ; pad 48 bytes (fix this RSN)
450ABSOLUTE dsa_next = 48 ; len 4 Next DSA
451 ; del 4 Previous DSA address
452ABSOLUTE dsa_cmnd = 56 ; len 4 Scsi_Cmnd * for this thread.
453ABSOLUTE dsa_select = 60 ; len 4 Device ID, Period, Offset for
454 ; table indirect select
455ABSOLUTE dsa_msgout = 64 ; len 8 table indirect move parameter for
456 ; select message
457ABSOLUTE dsa_cmdout = 72 ; len 8 table indirect move parameter for
458 ; command
459ABSOLUTE dsa_dataout = 80 ; len 4 code pointer for dataout
460ABSOLUTE dsa_datain = 84 ; len 4 code pointer for datain
461ABSOLUTE dsa_msgin = 88 ; len 8 table indirect move for msgin
462ABSOLUTE dsa_status = 96 ; len 8 table indirect move for status byte
463ABSOLUTE dsa_msgout_other = 104 ; len 8 table indirect for normal message out
464 ; (Synchronous transfer negotiation, etc).
465ABSOLUTE dsa_end = 112
466
467ABSOLUTE schedule = 0 ; Array of JUMP dsa_begin or JUMP (next),
468 ; terminated by a call to JUMP wait_reselect
469
470; Linked lists of DSA structures
471ABSOLUTE reconnect_dsa_head = 0 ; Link list of DSAs which can reconnect
472ABSOLUTE addr_reconnect_dsa_head = 0 ; Address of variable containing
473 ; address of reconnect_dsa_head
474
475; These select the source and destination of a MOVE MEMORY instruction
476ABSOLUTE dmode_memory_to_memory = 0x0
477ABSOLUTE dmode_memory_to_ncr = 0x0
478ABSOLUTE dmode_ncr_to_memory = 0x0
479
480ABSOLUTE addr_scratch = 0x0
481ABSOLUTE addr_temp = 0x0
482
483ABSOLUTE saved_dsa = 0x0
484ABSOLUTE emulfly = 0x0
485ABSOLUTE addr_dsa = 0x0
486
487
488
489; Interrupts -
490; MSB indicates type
491; 0 handle error condition
492; 1 handle message
493; 2 handle normal condition
494; 3 debugging interrupt
495; 4 testing interrupt
496; Next byte indicates specific error
497
498; XXX not yet implemented, I'm not sure if I want to -
499; Next byte indicates the routine the error occurred in
500; The LSB indicates the specific place the error occurred
501
502ABSOLUTE int_err_unexpected_phase = 0x00000000 ; Unexpected phase encountered
503ABSOLUTE int_err_selected = 0x00010000 ; SELECTED (nee RESELECTED)
504ABSOLUTE int_err_unexpected_reselect = 0x00020000
505ABSOLUTE int_err_check_condition = 0x00030000
506ABSOLUTE int_err_no_phase = 0x00040000
507ABSOLUTE int_msg_wdtr = 0x01000000 ; WDTR message received
508ABSOLUTE int_msg_sdtr = 0x01010000 ; SDTR received
509ABSOLUTE int_msg_1 = 0x01020000 ; single byte special message
510 ; received
511
512ABSOLUTE int_norm_select_complete = 0x02000000 ; Select complete, reprogram
513 ; registers.
514ABSOLUTE int_norm_reselect_complete = 0x02010000 ; Nexus established
515ABSOLUTE int_norm_command_complete = 0x02020000 ; Command complete
516ABSOLUTE int_norm_disconnected = 0x02030000 ; Disconnected
517ABSOLUTE int_norm_aborted =0x02040000 ; Aborted *dsa
518ABSOLUTE int_norm_reset = 0x02050000 ; Generated BUS reset.
519ABSOLUTE int_norm_emulateintfly = 0x02060000 ; 53C710 Emulated intfly
520ABSOLUTE int_debug_break = 0x03000000 ; Break point
521
522ABSOLUTE int_debug_panic = 0x030b0000 ; Panic driver
523
524
525ABSOLUTE int_test_1 = 0x04000000 ; Test 1 complete
526ABSOLUTE int_test_2 = 0x04010000 ; Test 2 complete
527ABSOLUTE int_test_3 = 0x04020000 ; Test 3 complete
528
529
530; These should start with 0x05000000, with low bits incrementing for
531; each one.
532
533
534
535ABSOLUTE NCR53c7xx_msg_abort = 0 ; Pointer to abort message
536ABSOLUTE NCR53c7xx_msg_reject = 0 ; Pointer to reject message
537ABSOLUTE NCR53c7xx_zero = 0 ; long with zero in it, use for source
538ABSOLUTE NCR53c7xx_sink = 0 ; long to dump worthless data in
539ABSOLUTE NOP_insn = 0 ; NOP instruction
540
541; Pointer to message, potentially multi-byte
542ABSOLUTE msg_buf = 0
543
544; Pointer to holding area for reselection information
545ABSOLUTE reselected_identify = 0
546ABSOLUTE reselected_tag = 0
547
548; Request sense command pointer, it's a 6 byte command, should
549; be constant for all commands since we always want 16 bytes of
550; sense and we don't need to change any fields as we did under
551; SCSI-I when we actually cared about the LUN field.
552;EXTERNAL NCR53c7xx_sense ; Request sense command
553
554
555; dsa_schedule
556; PURPOSE : after a DISCONNECT message has been received, and pointers
557; saved, insert the current DSA structure at the head of the
558; disconnected queue and fall through to the scheduler.
559;
560; CALLS : OK
561;
562; INPUTS : dsa - current DSA structure, reconnect_dsa_head - list
563; of disconnected commands
564;
565; MODIFIES : SCRATCH, reconnect_dsa_head
566;
567; EXITS : always passes control to schedule
568
569ENTRY dsa_schedule
570dsa_schedule:
571
572
573
574
575;
576; Calculate the address of the next pointer within the DSA
577; structure of the command that is currently disconnecting
578;
579
580 ; Read what should be the current DSA from memory - actual DSA
581 ; register is probably corrupt
582 MOVE MEMORY 4, saved_dsa, addr_scratch
583
584at 0x0000005e : */ 0xc0000004,0x00000000,0x00000000,
585/*
586
587
588
589 MOVE SCRATCH0 + dsa_next TO SCRATCH0
590
591at 0x00000061 : */ 0x7e343000,0x00000000,
592/*
593 MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
594
595at 0x00000063 : */ 0x7f350000,0x00000000,
596/*
597 MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
598
599at 0x00000065 : */ 0x7f360000,0x00000000,
600/*
601 MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
602
603at 0x00000067 : */ 0x7f370000,0x00000000,
604/*
605
606; Point the next field of this DSA structure at the current disconnected
607; list
608
609 MOVE MEMORY 4, addr_scratch, dsa_schedule_insert + 8
610
611at 0x00000069 : */ 0xc0000004,0x00000000,0x000001b8,
612/*
613
614dsa_schedule_insert:
615 MOVE MEMORY 4, reconnect_dsa_head, 0
616
617at 0x0000006c : */ 0xc0000004,0x00000000,0x00000000,
618/*
619
620; And update the head pointer.
621
622 ; Read what should be the current DSA from memory - actual DSA
623 ; register is probably corrupt
624 MOVE MEMORY 4, saved_dsa, addr_scratch
625
626at 0x0000006f : */ 0xc0000004,0x00000000,0x00000000,
627/*
628
629
630
631
632 MOVE MEMORY 4, addr_scratch, reconnect_dsa_head
633
634at 0x00000072 : */ 0xc0000004,0x00000000,0x00000000,
635/*
636
637
638
639
640
641
642 CLEAR ACK
643
644at 0x00000075 : */ 0x60000040,0x00000000,
645/*
646
647
648 ; Time to correct DSA following memory move
649 MOVE MEMORY 4, saved_dsa, addr_dsa
650
651at 0x00000077 : */ 0xc0000004,0x00000000,0x00000000,
652/*
653
654 WAIT DISCONNECT
655
656at 0x0000007a : */ 0x48000000,0x00000000,
657/*
658
659
660
661
662
663
664 JUMP schedule
665
666at 0x0000007c : */ 0x80080000,0x00000000,
667/*
668
669
670;
671; select
672;
673; PURPOSE : establish a nexus for the SCSI command referenced by DSA.
674; On success, the current DSA structure is removed from the issue
675; queue. Usually, this is entered as a fall-through from schedule,
676; although the contingent allegiance handling code will write
677; the select entry address to the DSP to restart a command as a
678; REQUEST SENSE. A message is sent (usually IDENTIFY, although
679; additional SDTR or WDTR messages may be sent). COMMAND OUT
680; is handled.
681;
682; INPUTS : DSA - SCSI command, issue_dsa_head
683;
684; CALLS : NOT OK
685;
686; MODIFIES : SCRATCH, issue_dsa_head
687;
688; EXITS : on reselection or selection, go to select_failed
689; otherwise, RETURN so control is passed back to
690; dsa_begin.
691;
692
693ENTRY select
694select:
695
696
697
698
699
700
701
702
703 CLEAR TARGET
704
705at 0x0000007e : */ 0x60000200,0x00000000,
706/*
707
708; XXX
709;
710; In effect, SELECTION operations are backgrounded, with execution
711; continuing until code which waits for REQ or a fatal interrupt is
712; encountered.
713;
714; So, for more performance, we could overlap the code which removes
715; the command from the NCRs issue queue with the selection, but
716; at this point I don't want to deal with the error recovery.
717;
718
719
720
721 ; Enable selection timer
722
723
724
725 MOVE CTEST7 & 0xef TO CTEST7
726
727at 0x00000080 : */ 0x7c1bef00,0x00000000,
728/*
729
730
731 SELECT ATN FROM dsa_select, select_failed
732
733at 0x00000082 : */ 0x4300003c,0x00000828,
734/*
735 JUMP select_msgout, WHEN MSG_OUT
736
737at 0x00000084 : */ 0x860b0000,0x00000218,
738/*
739ENTRY select_msgout
740select_msgout:
741
742 ; Disable selection timer
743 MOVE CTEST7 | 0x10 TO CTEST7
744
745at 0x00000086 : */ 0x7a1b1000,0x00000000,
746/*
747
748 MOVE FROM dsa_msgout, WHEN MSG_OUT
749
750at 0x00000088 : */ 0x1e000000,0x00000040,
751/*
752
753
754
755
756
757
758
759
760
761
762 RETURN
763
764at 0x0000008a : */ 0x90080000,0x00000000,
765/*
766
767;
768; select_done
769;
770; PURPOSE: continue on to normal data transfer; called as the exit
771; point from dsa_begin.
772;
773; INPUTS: dsa
774;
775; CALLS: OK
776;
777;
778
779select_done:
780
781; NOTE DSA is corrupt when we arrive here!
782 MOVE MEMORY 4, saved_dsa, addr_dsa
783
784at 0x0000008c : */ 0xc0000004,0x00000000,0x00000000,
785/*
786
787
788
789
790
791
792
793
794; After a successful selection, we should get either a CMD phase or
795; some transfer request negotiation message.
796
797 JUMP cmdout, WHEN CMD
798
799at 0x0000008f : */ 0x820b0000,0x0000025c,
800/*
801 INT int_err_unexpected_phase, WHEN NOT MSG_IN
802
803at 0x00000091 : */ 0x9f030000,0x00000000,
804/*
805
806select_msg_in:
807 CALL msg_in, WHEN MSG_IN
808
809at 0x00000093 : */ 0x8f0b0000,0x0000041c,
810/*
811 JUMP select_msg_in, WHEN MSG_IN
812
813at 0x00000095 : */ 0x870b0000,0x0000024c,
814/*
815
816cmdout:
817 INT int_err_unexpected_phase, WHEN NOT CMD
818
819at 0x00000097 : */ 0x9a030000,0x00000000,
820/*
821
822
823
824ENTRY cmdout_cmdout
825cmdout_cmdout:
826
827 MOVE FROM dsa_cmdout, WHEN CMD
828
829at 0x00000099 : */ 0x1a000000,0x00000048,
830/*
831
832
833
834
835;
836; data_transfer
837; other_out
838; other_in
839; other_transfer
840;
841; PURPOSE : handle the main data transfer for a SCSI command in
842; several parts. In the first part, data_transfer, DATA_IN
843; and DATA_OUT phases are allowed, with the user provided
844; code (usually dynamically generated based on the scatter/gather
845; list associated with a SCSI command) called to handle these
846; phases.
847;
848; After control has passed to one of the user provided
849; DATA_IN or DATA_OUT routines, back calls are made to
850; other_transfer_in or other_transfer_out to handle non-DATA IN
851; and DATA OUT phases respectively, with the state of the active
852; data pointer being preserved in TEMP.
853;
854; On completion, the user code passes control to other_transfer
855; which causes DATA_IN and DATA_OUT to result in unexpected_phase
856; interrupts so that data overruns may be trapped.
857;
858; INPUTS : DSA - SCSI command
859;
860; CALLS : OK in data_transfer_start, not ok in other_out and other_in, ok in
861; other_transfer
862;
863; MODIFIES : SCRATCH
864;
865; EXITS : if STATUS IN is detected, signifying command completion,
866; the NCR jumps to command_complete. If MSG IN occurs, a
867; CALL is made to msg_in. Otherwise, other_transfer runs in
868; an infinite loop.
869;
870
871ENTRY data_transfer
872data_transfer:
873 JUMP cmdout_cmdout, WHEN CMD
874
875at 0x0000009b : */ 0x820b0000,0x00000264,
876/*
877 CALL msg_in, WHEN MSG_IN
878
879at 0x0000009d : */ 0x8f0b0000,0x0000041c,
880/*
881 INT int_err_unexpected_phase, WHEN MSG_OUT
882
883at 0x0000009f : */ 0x9e0b0000,0x00000000,
884/*
885 JUMP do_dataout, WHEN DATA_OUT
886
887at 0x000000a1 : */ 0x800b0000,0x000002a4,
888/*
889 JUMP do_datain, WHEN DATA_IN
890
891at 0x000000a3 : */ 0x810b0000,0x000002fc,
892/*
893 JUMP command_complete, WHEN STATUS
894
895at 0x000000a5 : */ 0x830b0000,0x0000065c,
896/*
897 JUMP data_transfer
898
899at 0x000000a7 : */ 0x80080000,0x0000026c,
900/*
901ENTRY end_data_transfer
902end_data_transfer:
903
904;
905; FIXME: On NCR53c700 and NCR53c700-66 chips, do_dataout/do_datain
906; should be fixed up whenever the nexus changes so it can point to the
907; correct routine for that command.
908;
909
910
911; Nasty jump to dsa->dataout
912do_dataout:
913
914 MOVE MEMORY 4, saved_dsa, addr_scratch
915
916at 0x000000a9 : */ 0xc0000004,0x00000000,0x00000000,
917/*
918
919
920
921 MOVE SCRATCH0 + dsa_dataout TO SCRATCH0
922
923at 0x000000ac : */ 0x7e345000,0x00000000,
924/*
925 MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
926
927at 0x000000ae : */ 0x7f350000,0x00000000,
928/*
929 MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
930
931at 0x000000b0 : */ 0x7f360000,0x00000000,
932/*
933 MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
934
935at 0x000000b2 : */ 0x7f370000,0x00000000,
936/*
937
938 MOVE MEMORY 4, addr_scratch, dataout_to_jump + 4
939
940at 0x000000b4 : */ 0xc0000004,0x00000000,0x000002e0,
941/*
942
943dataout_to_jump:
944 MOVE MEMORY 4, 0, dataout_jump + 4
945
946at 0x000000b7 : */ 0xc0000004,0x00000000,0x000002f8,
947/*
948
949 ; Time to correct DSA following memory move
950 MOVE MEMORY 4, saved_dsa, addr_dsa
951
952at 0x000000ba : */ 0xc0000004,0x00000000,0x00000000,
953/*
954
955dataout_jump:
956 JUMP 0
957
958at 0x000000bd : */ 0x80080000,0x00000000,
959/*
960
961; Nasty jump to dsa->dsain
962do_datain:
963
964 MOVE MEMORY 4, saved_dsa, addr_scratch
965
966at 0x000000bf : */ 0xc0000004,0x00000000,0x00000000,
967/*
968
969
970
971 MOVE SCRATCH0 + dsa_datain TO SCRATCH0
972
973at 0x000000c2 : */ 0x7e345400,0x00000000,
974/*
975 MOVE SCRATCH1 + 0 TO SCRATCH1 WITH CARRY
976
977at 0x000000c4 : */ 0x7f350000,0x00000000,
978/*
979 MOVE SCRATCH2 + 0 TO SCRATCH2 WITH CARRY
980
981at 0x000000c6 : */ 0x7f360000,0x00000000,
982/*
983 MOVE SCRATCH3 + 0 TO SCRATCH3 WITH CARRY
984
985at 0x000000c8 : */ 0x7f370000,0x00000000,
986/*
987
988 MOVE MEMORY 4, addr_scratch, datain_to_jump + 4
989
990at 0x000000ca : */ 0xc0000004,0x00000000,0x00000338,
991/*
992
993ENTRY datain_to_jump
994datain_to_jump:
995 MOVE MEMORY 4, 0, datain_jump + 4
996
997at 0x000000cd : */ 0xc0000004,0x00000000,0x00000350,
998/*
999
1000 ; Time to correct DSA following memory move
1001 MOVE MEMORY 4, saved_dsa, addr_dsa
1002
1003at 0x000000d0 : */ 0xc0000004,0x00000000,0x00000000,
1004/*
1005
1006
1007
1008
1009datain_jump:
1010 JUMP 0
1011
1012at 0x000000d3 : */ 0x80080000,0x00000000,
1013/*
1014
1015
1016
1017; Note that other_out and other_in loop until a non-data phase
1018; is discovered, so we only execute return statements when we
1019; can go on to the next data phase block move statement.
1020
1021ENTRY other_out
1022other_out:
1023
1024
1025
1026 INT int_err_unexpected_phase, WHEN CMD
1027
1028at 0x000000d5 : */ 0x9a0b0000,0x00000000,
1029/*
1030 JUMP msg_in_restart, WHEN MSG_IN
1031
1032at 0x000000d7 : */ 0x870b0000,0x000003fc,
1033/*
1034 INT int_err_unexpected_phase, WHEN MSG_OUT
1035
1036at 0x000000d9 : */ 0x9e0b0000,0x00000000,
1037/*
1038 INT int_err_unexpected_phase, WHEN DATA_IN
1039
1040at 0x000000db : */ 0x990b0000,0x00000000,
1041/*
1042 JUMP command_complete, WHEN STATUS
1043
1044at 0x000000dd : */ 0x830b0000,0x0000065c,
1045/*
1046 JUMP other_out, WHEN NOT DATA_OUT
1047
1048at 0x000000df : */ 0x80030000,0x00000354,
1049/*
1050
1051; TEMP should be OK, as we got here from a call in the user dataout code.
1052
1053 RETURN
1054
1055at 0x000000e1 : */ 0x90080000,0x00000000,
1056/*
1057
1058ENTRY other_in
1059other_in:
1060
1061
1062
1063 INT int_err_unexpected_phase, WHEN CMD
1064
1065at 0x000000e3 : */ 0x9a0b0000,0x00000000,
1066/*
1067 JUMP msg_in_restart, WHEN MSG_IN
1068
1069at 0x000000e5 : */ 0x870b0000,0x000003fc,
1070/*
1071 INT int_err_unexpected_phase, WHEN MSG_OUT
1072
1073at 0x000000e7 : */ 0x9e0b0000,0x00000000,
1074/*
1075 INT int_err_unexpected_phase, WHEN DATA_OUT
1076
1077at 0x000000e9 : */ 0x980b0000,0x00000000,
1078/*
1079 JUMP command_complete, WHEN STATUS
1080
1081at 0x000000eb : */ 0x830b0000,0x0000065c,
1082/*
1083 JUMP other_in, WHEN NOT DATA_IN
1084
1085at 0x000000ed : */ 0x81030000,0x0000038c,
1086/*
1087
1088; TEMP should be OK, as we got here from a call in the user datain code.
1089
1090 RETURN
1091
1092at 0x000000ef : */ 0x90080000,0x00000000,
1093/*
1094
1095
1096ENTRY other_transfer
1097other_transfer:
1098 INT int_err_unexpected_phase, WHEN CMD
1099
1100at 0x000000f1 : */ 0x9a0b0000,0x00000000,
1101/*
1102 CALL msg_in, WHEN MSG_IN
1103
1104at 0x000000f3 : */ 0x8f0b0000,0x0000041c,
1105/*
1106 INT int_err_unexpected_phase, WHEN MSG_OUT
1107
1108at 0x000000f5 : */ 0x9e0b0000,0x00000000,
1109/*
1110 INT int_err_unexpected_phase, WHEN DATA_OUT
1111
1112at 0x000000f7 : */ 0x980b0000,0x00000000,
1113/*
1114 INT int_err_unexpected_phase, WHEN DATA_IN
1115
1116at 0x000000f9 : */ 0x990b0000,0x00000000,
1117/*
1118 JUMP command_complete, WHEN STATUS
1119
1120at 0x000000fb : */ 0x830b0000,0x0000065c,
1121/*
1122 JUMP other_transfer
1123
1124at 0x000000fd : */ 0x80080000,0x000003c4,
1125/*
1126
1127;
1128; msg_in_restart
1129; msg_in
1130; munge_msg
1131;
1132; PURPOSE : process messages from a target. msg_in is called when the
1133; caller hasn't read the first byte of the message. munge_message
1134; is called when the caller has read the first byte of the message,
1135; and left it in SFBR. msg_in_restart is called when the caller
1136; hasn't read the first byte of the message, and wishes RETURN
1137; to transfer control back to the address of the conditional
1138; CALL instruction rather than to the instruction after it.
1139;
1140; Various int_* interrupts are generated when the host system
1141; needs to intervene, as is the case with SDTR, WDTR, and
1142; INITIATE RECOVERY messages.
1143;
1144; When the host system handles one of these interrupts,
1145; it can respond by reentering at reject_message,
1146; which rejects the message and returns control to
1147; the caller of msg_in or munge_msg, accept_message
1148; which clears ACK and returns control, or reply_message
1149; which sends the message pointed to by the DSA
1150; msgout_other table indirect field.
1151;
1152; DISCONNECT messages are handled by moving the command
1153; to the reconnect_dsa_queue.
1154
1155; NOTE: DSA should be valid when we get here - we cannot save both it
1156; and TEMP in this routine.
1157
1158;
1159; INPUTS : DSA - SCSI COMMAND, SFBR - first byte of message (munge_msg
1160; only)
1161;
1162; CALLS : NO. The TEMP register isn't backed up to allow nested calls.
1163;
1164; MODIFIES : SCRATCH, DSA on DISCONNECT
1165;
1166; EXITS : On receipt of SAVE DATA POINTER, RESTORE POINTERS,
1167; and normal return from message handlers running under
1168; Linux, control is returned to the caller. Receipt
1169; of DISCONNECT messages pass control to dsa_schedule.
1170;
1171ENTRY msg_in_restart
1172msg_in_restart:
1173; XXX - hackish
1174;
1175; Since it's easier to debug changes to the statically
1176; compiled code, rather than the dynamically generated
1177; stuff, such as
1178;
1179; MOVE x, y, WHEN data_phase
1180; CALL other_z, WHEN NOT data_phase
1181; MOVE x, y, WHEN data_phase
1182;
1183; I'd like to have certain routines (notably the message handler)
1184; restart on the conditional call rather than the next instruction.
1185;
1186; So, subtract 8 from the return address
1187
1188 MOVE TEMP0 + 0xf8 TO TEMP0
1189
1190at 0x000000ff : */ 0x7e1cf800,0x00000000,
1191/*
1192 MOVE TEMP1 + 0xff TO TEMP1 WITH CARRY
1193
1194at 0x00000101 : */ 0x7f1dff00,0x00000000,
1195/*
1196 MOVE TEMP2 + 0xff TO TEMP2 WITH CARRY
1197
1198at 0x00000103 : */ 0x7f1eff00,0x00000000,
1199/*
1200 MOVE TEMP3 + 0xff TO TEMP3 WITH CARRY
1201
1202at 0x00000105 : */ 0x7f1fff00,0x00000000,
1203/*
1204
1205ENTRY msg_in
1206msg_in:
1207 MOVE 1, msg_buf, WHEN MSG_IN
1208
1209at 0x00000107 : */ 0x0f000001,0x00000000,
1210/*
1211
1212munge_msg:
1213 JUMP munge_extended, IF 0x01 ; EXTENDED MESSAGE
1214
1215at 0x00000109 : */ 0x800c0001,0x00000574,
1216/*
1217 JUMP munge_2, IF 0x20, AND MASK 0xdf ; two byte message
1218
1219at 0x0000010b : */ 0x800cdf20,0x00000464,
1220/*
1221;
1222; XXX - I've seen a handful of broken SCSI devices which fail to issue
1223; a SAVE POINTERS message before disconnecting in the middle of
1224; a transfer, assuming that the DATA POINTER will be implicitly
1225; restored.
1226;
1227; Historically, I've often done an implicit save when the DISCONNECT
1228; message is processed. We may want to consider having the option of
1229; doing that here.
1230;
1231 JUMP munge_save_data_pointer, IF 0x02 ; SAVE DATA POINTER
1232
1233at 0x0000010d : */ 0x800c0002,0x0000046c,
1234/*
1235 JUMP munge_restore_pointers, IF 0x03 ; RESTORE POINTERS
1236
1237at 0x0000010f : */ 0x800c0003,0x00000518,
1238/*
1239 JUMP munge_disconnect, IF 0x04 ; DISCONNECT
1240
1241at 0x00000111 : */ 0x800c0004,0x0000056c,
1242/*
1243 INT int_msg_1, IF 0x07 ; MESSAGE REJECT
1244
1245at 0x00000113 : */ 0x980c0007,0x01020000,
1246/*
1247 INT int_msg_1, IF 0x0f ; INITIATE RECOVERY
1248
1249at 0x00000115 : */ 0x980c000f,0x01020000,
1250/*
1251
1252
1253
1254 JUMP reject_message
1255
1256at 0x00000117 : */ 0x80080000,0x00000604,
1257/*
1258
1259munge_2:
1260 JUMP reject_message
1261
1262at 0x00000119 : */ 0x80080000,0x00000604,
1263/*
1264;
1265; The SCSI standard allows targets to recover from transient
1266; error conditions by backing up the data pointer with a
1267; RESTORE POINTERS message.
1268;
1269; So, we must save and restore the _residual_ code as well as
1270; the current instruction pointer. Because of this messiness,
1271; it is simpler to put dynamic code in the dsa for this and to
1272; just do a simple jump down there.
1273;
1274
1275munge_save_data_pointer:
1276
1277 ; We have something in TEMP here, so first we must save that
1278 MOVE TEMP0 TO SFBR
1279
1280at 0x0000011b : */ 0x721c0000,0x00000000,
1281/*
1282 MOVE SFBR TO SCRATCH0
1283
1284at 0x0000011d : */ 0x6a340000,0x00000000,
1285/*
1286 MOVE TEMP1 TO SFBR
1287
1288at 0x0000011f : */ 0x721d0000,0x00000000,
1289/*
1290 MOVE SFBR TO SCRATCH1
1291
1292at 0x00000121 : */ 0x6a350000,0x00000000,
1293/*
1294 MOVE TEMP2 TO SFBR
1295
1296at 0x00000123 : */ 0x721e0000,0x00000000,
1297/*
1298 MOVE SFBR TO SCRATCH2
1299
1300at 0x00000125 : */ 0x6a360000,0x00000000,
1301/*
1302 MOVE TEMP3 TO SFBR
1303
1304at 0x00000127 : */ 0x721f0000,0x00000000,
1305/*
1306 MOVE SFBR TO SCRATCH3
1307
1308at 0x00000129 : */ 0x6a370000,0x00000000,
1309/*
1310 MOVE MEMORY 4, addr_scratch, jump_temp + 4
1311
1312at 0x0000012b : */ 0xc0000004,0x00000000,0x000009c8,
1313/*
1314 ; Now restore DSA
1315 MOVE MEMORY 4, saved_dsa, addr_dsa
1316
1317at 0x0000012e : */ 0xc0000004,0x00000000,0x00000000,
1318/*
1319
1320 MOVE DSA0 + dsa_save_data_pointer TO SFBR
1321
1322at 0x00000131 : */ 0x76100000,0x00000000,
1323/*
1324 MOVE SFBR TO SCRATCH0
1325
1326at 0x00000133 : */ 0x6a340000,0x00000000,
1327/*
1328 MOVE DSA1 + 0xff TO SFBR WITH CARRY
1329
1330at 0x00000135 : */ 0x7711ff00,0x00000000,
1331/*
1332 MOVE SFBR TO SCRATCH1
1333
1334at 0x00000137 : */ 0x6a350000,0x00000000,
1335/*
1336 MOVE DSA2 + 0xff TO SFBR WITH CARRY
1337
1338at 0x00000139 : */ 0x7712ff00,0x00000000,
1339/*
1340 MOVE SFBR TO SCRATCH2
1341
1342at 0x0000013b : */ 0x6a360000,0x00000000,
1343/*
1344 MOVE DSA3 + 0xff TO SFBR WITH CARRY
1345
1346at 0x0000013d : */ 0x7713ff00,0x00000000,
1347/*
1348 MOVE SFBR TO SCRATCH3
1349
1350at 0x0000013f : */ 0x6a370000,0x00000000,
1351/*
1352
1353
1354 MOVE MEMORY 4, addr_scratch, jump_dsa_save + 4
1355
1356at 0x00000141 : */ 0xc0000004,0x00000000,0x00000514,
1357/*
1358
1359jump_dsa_save:
1360 JUMP 0
1361
1362at 0x00000144 : */ 0x80080000,0x00000000,
1363/*
1364
1365munge_restore_pointers:
1366
1367 ; The code at dsa_restore_pointers will RETURN, but we don't care
1368 ; about TEMP here, as it will overwrite it anyway.
1369
1370 MOVE DSA0 + dsa_restore_pointers TO SFBR
1371
1372at 0x00000146 : */ 0x76100000,0x00000000,
1373/*
1374 MOVE SFBR TO SCRATCH0
1375
1376at 0x00000148 : */ 0x6a340000,0x00000000,
1377/*
1378 MOVE DSA1 + 0xff TO SFBR WITH CARRY
1379
1380at 0x0000014a : */ 0x7711ff00,0x00000000,
1381/*
1382 MOVE SFBR TO SCRATCH1
1383
1384at 0x0000014c : */ 0x6a350000,0x00000000,
1385/*
1386 MOVE DSA2 + 0xff TO SFBR WITH CARRY
1387
1388at 0x0000014e : */ 0x7712ff00,0x00000000,
1389/*
1390 MOVE SFBR TO SCRATCH2
1391
1392at 0x00000150 : */ 0x6a360000,0x00000000,
1393/*
1394 MOVE DSA3 + 0xff TO SFBR WITH CARRY
1395
1396at 0x00000152 : */ 0x7713ff00,0x00000000,
1397/*
1398 MOVE SFBR TO SCRATCH3
1399
1400at 0x00000154 : */ 0x6a370000,0x00000000,
1401/*
1402
1403
1404 MOVE MEMORY 4, addr_scratch, jump_dsa_restore + 4
1405
1406at 0x00000156 : */ 0xc0000004,0x00000000,0x00000568,
1407/*
1408
1409jump_dsa_restore:
1410 JUMP 0
1411
1412at 0x00000159 : */ 0x80080000,0x00000000,
1413/*
1414
1415
1416munge_disconnect:
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437 JUMP dsa_schedule
1438
1439at 0x0000015b : */ 0x80080000,0x00000178,
1440/*
1441
1442
1443
1444
1445
1446munge_extended:
1447 CLEAR ACK
1448
1449at 0x0000015d : */ 0x60000040,0x00000000,
1450/*
1451 INT int_err_unexpected_phase, WHEN NOT MSG_IN
1452
1453at 0x0000015f : */ 0x9f030000,0x00000000,
1454/*
1455 MOVE 1, msg_buf + 1, WHEN MSG_IN
1456
1457at 0x00000161 : */ 0x0f000001,0x00000001,
1458/*
1459 JUMP munge_extended_2, IF 0x02
1460
1461at 0x00000163 : */ 0x800c0002,0x000005a4,
1462/*
1463 JUMP munge_extended_3, IF 0x03
1464
1465at 0x00000165 : */ 0x800c0003,0x000005d4,
1466/*
1467 JUMP reject_message
1468
1469at 0x00000167 : */ 0x80080000,0x00000604,
1470/*
1471
1472munge_extended_2:
1473 CLEAR ACK
1474
1475at 0x00000169 : */ 0x60000040,0x00000000,
1476/*
1477 MOVE 1, msg_buf + 2, WHEN MSG_IN
1478
1479at 0x0000016b : */ 0x0f000001,0x00000002,
1480/*
1481 JUMP reject_message, IF NOT 0x02 ; Must be WDTR
1482
1483at 0x0000016d : */ 0x80040002,0x00000604,
1484/*
1485 CLEAR ACK
1486
1487at 0x0000016f : */ 0x60000040,0x00000000,
1488/*
1489 MOVE 1, msg_buf + 3, WHEN MSG_IN
1490
1491at 0x00000171 : */ 0x0f000001,0x00000003,
1492/*
1493 INT int_msg_wdtr
1494
1495at 0x00000173 : */ 0x98080000,0x01000000,
1496/*
1497
1498munge_extended_3:
1499 CLEAR ACK
1500
1501at 0x00000175 : */ 0x60000040,0x00000000,
1502/*
1503 MOVE 1, msg_buf + 2, WHEN MSG_IN
1504
1505at 0x00000177 : */ 0x0f000001,0x00000002,
1506/*
1507 JUMP reject_message, IF NOT 0x01 ; Must be SDTR
1508
1509at 0x00000179 : */ 0x80040001,0x00000604,
1510/*
1511 CLEAR ACK
1512
1513at 0x0000017b : */ 0x60000040,0x00000000,
1514/*
1515 MOVE 2, msg_buf + 3, WHEN MSG_IN
1516
1517at 0x0000017d : */ 0x0f000002,0x00000003,
1518/*
1519 INT int_msg_sdtr
1520
1521at 0x0000017f : */ 0x98080000,0x01010000,
1522/*
1523
1524ENTRY reject_message
1525reject_message:
1526 SET ATN
1527
1528at 0x00000181 : */ 0x58000008,0x00000000,
1529/*
1530 CLEAR ACK
1531
1532at 0x00000183 : */ 0x60000040,0x00000000,
1533/*
1534 MOVE 1, NCR53c7xx_msg_reject, WHEN MSG_OUT
1535
1536at 0x00000185 : */ 0x0e000001,0x00000000,
1537/*
1538 RETURN
1539
1540at 0x00000187 : */ 0x90080000,0x00000000,
1541/*
1542
1543ENTRY accept_message
1544accept_message:
1545 CLEAR ATN
1546
1547at 0x00000189 : */ 0x60000008,0x00000000,
1548/*
1549 CLEAR ACK
1550
1551at 0x0000018b : */ 0x60000040,0x00000000,
1552/*
1553 RETURN
1554
1555at 0x0000018d : */ 0x90080000,0x00000000,
1556/*
1557
1558ENTRY respond_message
1559respond_message:
1560 SET ATN
1561
1562at 0x0000018f : */ 0x58000008,0x00000000,
1563/*
1564 CLEAR ACK
1565
1566at 0x00000191 : */ 0x60000040,0x00000000,
1567/*
1568 MOVE FROM dsa_msgout_other, WHEN MSG_OUT
1569
1570at 0x00000193 : */ 0x1e000000,0x00000068,
1571/*
1572 RETURN
1573
1574at 0x00000195 : */ 0x90080000,0x00000000,
1575/*
1576
1577;
1578; command_complete
1579;
1580; PURPOSE : handle command termination when STATUS IN is detected by reading
1581; a status byte followed by a command termination message.
1582;
1583; Normal termination results in an INTFLY instruction, and
1584; the host system can pick out which command terminated by
1585; examining the MESSAGE and STATUS buffers of all currently
1586; executing commands;
1587;
1588; Abnormal (CHECK_CONDITION) termination results in an
1589; int_err_check_condition interrupt so that a REQUEST SENSE
1590; command can be issued out-of-order so that no other command
1591; clears the contingent allegiance condition.
1592;
1593;
1594; INPUTS : DSA - command
1595;
1596; CALLS : OK
1597;
1598; EXITS : On successful termination, control is passed to schedule.
1599; On abnormal termination, the user will usually modify the
1600; DSA fields and corresponding buffers and return control
1601; to select.
1602;
1603
1604ENTRY command_complete
1605command_complete:
1606 MOVE FROM dsa_status, WHEN STATUS
1607
1608at 0x00000197 : */ 0x1b000000,0x00000060,
1609/*
1610
1611 MOVE SFBR TO SCRATCH0 ; Save status
1612
1613at 0x00000199 : */ 0x6a340000,0x00000000,
1614/*
1615
1616ENTRY command_complete_msgin
1617command_complete_msgin:
1618 MOVE FROM dsa_msgin, WHEN MSG_IN
1619
1620at 0x0000019b : */ 0x1f000000,0x00000058,
1621/*
1622; Indicate that we should be expecting a disconnect
1623
1624
1625
1626 ; Above code cleared the Unexpected Disconnect bit, what do we do?
1627
1628 CLEAR ACK
1629
1630at 0x0000019d : */ 0x60000040,0x00000000,
1631/*
1632
1633 WAIT DISCONNECT
1634
1635at 0x0000019f : */ 0x48000000,0x00000000,
1636/*
1637
1638;
1639; The SCSI specification states that when a UNIT ATTENTION condition
1640; is pending, as indicated by a CHECK CONDITION status message,
1641; the target shall revert to asynchronous transfers. Since
1642; synchronous transfers parameters are maintained on a per INITIATOR/TARGET
1643; basis, and returning control to our scheduler could work on a command
1644; running on another lun on that target using the old parameters, we must
1645; interrupt the host processor to get them changed, or change them ourselves.
1646;
1647; Once SCSI-II tagged queueing is implemented, things will be even more
1648; hairy, since contingent allegiance conditions exist on a per-target/lun
1649; basis, and issuing a new command with a different tag would clear it.
1650; In these cases, we must interrupt the host processor to get a request
1651; added to the HEAD of the queue with the request sense command, or we
1652; must automatically issue the request sense command.
1653
1654
1655
1656
1657
1658
1659
1660 INT int_norm_emulateintfly
1661
1662at 0x000001a1 : */ 0x98080000,0x02060000,
1663/*
1664
1665
1666
1667
1668
1669
1670 ; Time to correct DSA following memory move
1671 MOVE MEMORY 4, saved_dsa, addr_dsa
1672
1673at 0x000001a3 : */ 0xc0000004,0x00000000,0x00000000,
1674/*
1675
1676
1677
1678
1679
1680 JUMP schedule
1681
1682at 0x000001a6 : */ 0x80080000,0x00000000,
1683/*
1684command_failed:
1685 INT int_err_check_condition
1686
1687at 0x000001a8 : */ 0x98080000,0x00030000,
1688/*
1689
1690
1691
1692
1693;
1694; wait_reselect
1695;
1696; PURPOSE : This is essentially the idle routine, where control lands
1697; when there are no new processes to schedule. wait_reselect
1698; waits for reselection, selection, and new commands.
1699;
1700; When a successful reselection occurs, with the aid
1701; of fixed up code in each DSA, wait_reselect walks the
1702; reconnect_dsa_queue, asking each dsa if the target ID
1703; and LUN match its.
1704;
1705; If a match is found, a call is made back to reselected_ok,
1706; which through the miracles of self modifying code, extracts
1707; the found DSA from the reconnect_dsa_queue and then
1708; returns control to the DSAs thread of execution.
1709;
1710; INPUTS : NONE
1711;
1712; CALLS : OK
1713;
1714; MODIFIES : DSA,
1715;
1716; EXITS : On successful reselection, control is returned to the
1717; DSA which called reselected_ok. If the WAIT RESELECT
1718; was interrupted by a new commands arrival signaled by
1719; SIG_P, control is passed to schedule. If the NCR is
1720; selected, the host system is interrupted with an
1721; int_err_selected which is usually responded to by
1722; setting DSP to the target_abort address.
1723
1724ENTRY wait_reselect
1725wait_reselect:
1726
1727
1728
1729
1730
1731
1732 WAIT RESELECT wait_reselect_failed
1733
1734at 0x000001aa : */ 0x50000000,0x00000800,
1735/*
1736
1737reselected:
1738
1739
1740
1741 CLEAR TARGET
1742
1743at 0x000001ac : */ 0x60000200,0x00000000,
1744/*
1745
1746 ; Read all data needed to reestablish the nexus -
1747 MOVE 1, reselected_identify, WHEN MSG_IN
1748
1749at 0x000001ae : */ 0x0f000001,0x00000000,
1750/*
1751 ; We used to CLEAR ACK here.
1752
1753
1754
1755
1756
1757 ; Point DSA at the current head of the disconnected queue.
1758
1759 MOVE MEMORY 4, reconnect_dsa_head, addr_scratch
1760
1761at 0x000001b0 : */ 0xc0000004,0x00000000,0x00000000,
1762/*
1763
1764
1765 MOVE MEMORY 4, addr_scratch, saved_dsa
1766
1767at 0x000001b3 : */ 0xc0000004,0x00000000,0x00000000,
1768/*
1769
1770
1771
1772
1773 ; Fix the update-next pointer so that the reconnect_dsa_head
1774 ; pointer is the one that will be updated if this DSA is a hit
1775 ; and we remove it from the queue.
1776
1777 MOVE MEMORY 4, addr_reconnect_dsa_head, reselected_ok_patch + 8
1778
1779at 0x000001b6 : */ 0xc0000004,0x00000000,0x000007ec,
1780/*
1781
1782 ; Time to correct DSA following memory move
1783 MOVE MEMORY 4, saved_dsa, addr_dsa
1784
1785at 0x000001b9 : */ 0xc0000004,0x00000000,0x00000000,
1786/*
1787
1788
1789ENTRY reselected_check_next
1790reselected_check_next:
1791
1792
1793
1794 ; Check for a NULL pointer.
1795 MOVE DSA0 TO SFBR
1796
1797at 0x000001bc : */ 0x72100000,0x00000000,
1798/*
1799 JUMP reselected_not_end, IF NOT 0
1800
1801at 0x000001be : */ 0x80040000,0x00000738,
1802/*
1803 MOVE DSA1 TO SFBR
1804
1805at 0x000001c0 : */ 0x72110000,0x00000000,
1806/*
1807 JUMP reselected_not_end, IF NOT 0
1808
1809at 0x000001c2 : */ 0x80040000,0x00000738,
1810/*
1811 MOVE DSA2 TO SFBR
1812
1813at 0x000001c4 : */ 0x72120000,0x00000000,
1814/*
1815 JUMP reselected_not_end, IF NOT 0
1816
1817at 0x000001c6 : */ 0x80040000,0x00000738,
1818/*
1819 MOVE DSA3 TO SFBR
1820
1821at 0x000001c8 : */ 0x72130000,0x00000000,
1822/*
1823 JUMP reselected_not_end, IF NOT 0
1824
1825at 0x000001ca : */ 0x80040000,0x00000738,
1826/*
1827 INT int_err_unexpected_reselect
1828
1829at 0x000001cc : */ 0x98080000,0x00020000,
1830/*
1831
1832reselected_not_end:
1833 ;
1834 ; XXX the ALU is only eight bits wide, and the assembler
1835 ; wont do the dirt work for us. As long as dsa_check_reselect
1836 ; is negative, we need to sign extend with 1 bits to the full
1837 ; 32 bit width of the address.
1838 ;
1839 ; A potential work around would be to have a known alignment
1840 ; of the DSA structure such that the base address plus
1841 ; dsa_check_reselect doesn't require carrying from bytes
1842 ; higher than the LSB.
1843 ;
1844
1845 MOVE DSA0 TO SFBR
1846
1847at 0x000001ce : */ 0x72100000,0x00000000,
1848/*
1849 MOVE SFBR + dsa_check_reselect TO SCRATCH0
1850
1851at 0x000001d0 : */ 0x6e340000,0x00000000,
1852/*
1853 MOVE DSA1 TO SFBR
1854
1855at 0x000001d2 : */ 0x72110000,0x00000000,
1856/*
1857 MOVE SFBR + 0xff TO SCRATCH1 WITH CARRY
1858
1859at 0x000001d4 : */ 0x6f35ff00,0x00000000,
1860/*
1861 MOVE DSA2 TO SFBR
1862
1863at 0x000001d6 : */ 0x72120000,0x00000000,
1864/*
1865 MOVE SFBR + 0xff TO SCRATCH2 WITH CARRY
1866
1867at 0x000001d8 : */ 0x6f36ff00,0x00000000,
1868/*
1869 MOVE DSA3 TO SFBR
1870
1871at 0x000001da : */ 0x72130000,0x00000000,
1872/*
1873 MOVE SFBR + 0xff TO SCRATCH3 WITH CARRY
1874
1875at 0x000001dc : */ 0x6f37ff00,0x00000000,
1876/*
1877
1878
1879 MOVE MEMORY 4, addr_scratch, reselected_check + 4
1880
1881at 0x000001de : */ 0xc0000004,0x00000000,0x00000794,
1882/*
1883
1884
1885 ; Time to correct DSA following memory move
1886 MOVE MEMORY 4, saved_dsa, addr_dsa
1887
1888at 0x000001e1 : */ 0xc0000004,0x00000000,0x00000000,
1889/*
1890
1891reselected_check:
1892 JUMP 0
1893
1894at 0x000001e4 : */ 0x80080000,0x00000000,
1895/*
1896
1897
1898;
1899;
1900
1901; We have problems here - the memory move corrupts TEMP and DSA. This
1902; routine is called from DSA code, and patched from many places. Scratch
1903; is probably free when it is called.
1904; We have to:
1905; copy temp to scratch, one byte at a time
1906; write scratch to patch a jump in place of the return
1907; do the move memory
1908; jump to the patched in return address
1909; DSA is corrupt when we get here, and can be left corrupt
1910
1911ENTRY reselected_ok
1912reselected_ok:
1913 MOVE TEMP0 TO SFBR
1914
1915at 0x000001e6 : */ 0x721c0000,0x00000000,
1916/*
1917 MOVE SFBR TO SCRATCH0
1918
1919at 0x000001e8 : */ 0x6a340000,0x00000000,
1920/*
1921 MOVE TEMP1 TO SFBR
1922
1923at 0x000001ea : */ 0x721d0000,0x00000000,
1924/*
1925 MOVE SFBR TO SCRATCH1
1926
1927at 0x000001ec : */ 0x6a350000,0x00000000,
1928/*
1929 MOVE TEMP2 TO SFBR
1930
1931at 0x000001ee : */ 0x721e0000,0x00000000,
1932/*
1933 MOVE SFBR TO SCRATCH2
1934
1935at 0x000001f0 : */ 0x6a360000,0x00000000,
1936/*
1937 MOVE TEMP3 TO SFBR
1938
1939at 0x000001f2 : */ 0x721f0000,0x00000000,
1940/*
1941 MOVE SFBR TO SCRATCH3
1942
1943at 0x000001f4 : */ 0x6a370000,0x00000000,
1944/*
1945 MOVE MEMORY 4, addr_scratch, reselected_ok_jump + 4
1946
1947at 0x000001f6 : */ 0xc0000004,0x00000000,0x000007f4,
1948/*
1949reselected_ok_patch:
1950 MOVE MEMORY 4, 0, 0
1951
1952at 0x000001f9 : */ 0xc0000004,0x00000000,0x00000000,
1953/*
1954reselected_ok_jump:
1955 JUMP 0
1956
1957at 0x000001fc : */ 0x80080000,0x00000000,
1958/*
1959
1960
1961
1962
1963
1964selected:
1965 INT int_err_selected;
1966
1967at 0x000001fe : */ 0x98080000,0x00010000,
1968/*
1969
1970;
1971; A select or reselect failure can be caused by one of two conditions :
1972; 1. SIG_P was set. This will be the case if the user has written
1973; a new value to a previously NULL head of the issue queue.
1974;
1975; 2. The NCR53c810 was selected or reselected by another device.
1976;
1977; 3. The bus was already busy since we were selected or reselected
1978; before starting the command.
1979
1980wait_reselect_failed:
1981
1982
1983
1984; Check selected bit.
1985
1986 ; Must work out how to tell if we are selected....
1987
1988
1989
1990
1991; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
1992 MOVE CTEST2 & 0x40 TO SFBR
1993
1994at 0x00000200 : */ 0x74164000,0x00000000,
1995/*
1996 JUMP schedule, IF 0x40
1997
1998at 0x00000202 : */ 0x800c0040,0x00000000,
1999/*
2000; Check connected bit.
2001; FIXME: this needs to change if we support target mode
2002 MOVE ISTAT & 0x08 TO SFBR
2003
2004at 0x00000204 : */ 0x74210800,0x00000000,
2005/*
2006 JUMP reselected, IF 0x08
2007
2008at 0x00000206 : */ 0x800c0008,0x000006b0,
2009/*
2010; FIXME : Something bogus happened, and we shouldn't fail silently.
2011
2012
2013
2014 INT int_debug_panic
2015
2016at 0x00000208 : */ 0x98080000,0x030b0000,
2017/*
2018
2019
2020
2021select_failed:
2022
2023 ; Disable selection timer
2024 MOVE CTEST7 | 0x10 TO CTEST7
2025
2026at 0x0000020a : */ 0x7a1b1000,0x00000000,
2027/*
2028
2029
2030
2031
2032; Otherwise, mask the selected and reselected bits off SIST0
2033
2034 ; Let's assume we don't get selected for now
2035 MOVE SSTAT0 & 0x10 TO SFBR
2036
2037at 0x0000020c : */ 0x740d1000,0x00000000,
2038/*
2039
2040
2041
2042
2043 JUMP reselected, IF 0x10
2044
2045at 0x0000020e : */ 0x800c0010,0x000006b0,
2046/*
2047; If SIGP is set, the user just gave us another command, and
2048; we should restart or return to the scheduler.
2049; Reading CTEST2 clears the SIG_P bit in the ISTAT register.
2050 MOVE CTEST2 & 0x40 TO SFBR
2051
2052at 0x00000210 : */ 0x74164000,0x00000000,
2053/*
2054 JUMP select, IF 0x40
2055
2056at 0x00000212 : */ 0x800c0040,0x000001f8,
2057/*
2058; Check connected bit.
2059; FIXME: this needs to change if we support target mode
2060; FIXME: is this really necessary?
2061 MOVE ISTAT & 0x08 TO SFBR
2062
2063at 0x00000214 : */ 0x74210800,0x00000000,
2064/*
2065 JUMP reselected, IF 0x08
2066
2067at 0x00000216 : */ 0x800c0008,0x000006b0,
2068/*
2069; FIXME : Something bogus happened, and we shouldn't fail silently.
2070
2071
2072
2073 INT int_debug_panic
2074
2075at 0x00000218 : */ 0x98080000,0x030b0000,
2076/*
2077
2078
2079;
2080; test_1
2081; test_2
2082;
2083; PURPOSE : run some verification tests on the NCR. test_1
2084; copies test_src to test_dest and interrupts the host
2085; processor, testing for cache coherency and interrupt
2086; problems in the processes.
2087;
2088; test_2 runs a command with offsets relative to the
2089; DSA on entry, and is useful for miscellaneous experimentation.
2090;
2091
2092; Verify that interrupts are working correctly and that we don't
2093; have a cache invalidation problem.
2094
2095ABSOLUTE test_src = 0, test_dest = 0
2096ENTRY test_1
2097test_1:
2098 MOVE MEMORY 4, test_src, test_dest
2099
2100at 0x0000021a : */ 0xc0000004,0x00000000,0x00000000,
2101/*
2102 INT int_test_1
2103
2104at 0x0000021d : */ 0x98080000,0x04000000,
2105/*
2106
2107;
2108; Run arbitrary commands, with test code establishing a DSA
2109;
2110
2111ENTRY test_2
2112test_2:
2113 CLEAR TARGET
2114
2115at 0x0000021f : */ 0x60000200,0x00000000,
2116/*
2117
2118 ; Enable selection timer
2119
2120
2121
2122 MOVE CTEST7 & 0xef TO CTEST7
2123
2124at 0x00000221 : */ 0x7c1bef00,0x00000000,
2125/*
2126
2127
2128 SELECT ATN FROM 0, test_2_fail
2129
2130at 0x00000223 : */ 0x43000000,0x000008dc,
2131/*
2132 JUMP test_2_msgout, WHEN MSG_OUT
2133
2134at 0x00000225 : */ 0x860b0000,0x0000089c,
2135/*
2136ENTRY test_2_msgout
2137test_2_msgout:
2138
2139 ; Disable selection timer
2140 MOVE CTEST7 | 0x10 TO CTEST7
2141
2142at 0x00000227 : */ 0x7a1b1000,0x00000000,
2143/*
2144
2145 MOVE FROM 8, WHEN MSG_OUT
2146
2147at 0x00000229 : */ 0x1e000000,0x00000008,
2148/*
2149 MOVE FROM 16, WHEN CMD
2150
2151at 0x0000022b : */ 0x1a000000,0x00000010,
2152/*
2153 MOVE FROM 24, WHEN DATA_IN
2154
2155at 0x0000022d : */ 0x19000000,0x00000018,
2156/*
2157 MOVE FROM 32, WHEN STATUS
2158
2159at 0x0000022f : */ 0x1b000000,0x00000020,
2160/*
2161 MOVE FROM 40, WHEN MSG_IN
2162
2163at 0x00000231 : */ 0x1f000000,0x00000028,
2164/*
2165
2166
2167
2168 CLEAR ACK
2169
2170at 0x00000233 : */ 0x60000040,0x00000000,
2171/*
2172 WAIT DISCONNECT
2173
2174at 0x00000235 : */ 0x48000000,0x00000000,
2175/*
2176test_2_fail:
2177
2178 ; Disable selection timer
2179 MOVE CTEST7 | 0x10 TO CTEST7
2180
2181at 0x00000237 : */ 0x7a1b1000,0x00000000,
2182/*
2183
2184 INT int_test_2
2185
2186at 0x00000239 : */ 0x98080000,0x04010000,
2187/*
2188
2189ENTRY debug_break
2190debug_break:
2191 INT int_debug_break
2192
2193at 0x0000023b : */ 0x98080000,0x03000000,
2194/*
2195
2196;
2197; initiator_abort
2198; target_abort
2199;
2200; PURPOSE : Abort the currently established nexus from with initiator
2201; or target mode.
2202;
2203;
2204
2205ENTRY target_abort
2206target_abort:
2207 SET TARGET
2208
2209at 0x0000023d : */ 0x58000200,0x00000000,
2210/*
2211 DISCONNECT
2212
2213at 0x0000023f : */ 0x48000000,0x00000000,
2214/*
2215 CLEAR TARGET
2216
2217at 0x00000241 : */ 0x60000200,0x00000000,
2218/*
2219 JUMP schedule
2220
2221at 0x00000243 : */ 0x80080000,0x00000000,
2222/*
2223
2224ENTRY initiator_abort
2225initiator_abort:
2226 SET ATN
2227
2228at 0x00000245 : */ 0x58000008,0x00000000,
2229/*
2230;
2231; The SCSI-I specification says that targets may go into MSG out at
2232; their leisure upon receipt of the ATN single. On all versions of the
2233; specification, we can't change phases until REQ transitions true->false,
2234; so we need to sink/source one byte of data to allow the transition.
2235;
2236; For the sake of safety, we'll only source one byte of data in all
2237; cases, but to accommodate the SCSI-I dain bramage, we'll sink an
2238; arbitrary number of bytes.
2239 JUMP spew_cmd, WHEN CMD
2240
2241at 0x00000247 : */ 0x820b0000,0x0000094c,
2242/*
2243 JUMP eat_msgin, WHEN MSG_IN
2244
2245at 0x00000249 : */ 0x870b0000,0x0000095c,
2246/*
2247 JUMP eat_datain, WHEN DATA_IN
2248
2249at 0x0000024b : */ 0x810b0000,0x0000098c,
2250/*
2251 JUMP eat_status, WHEN STATUS
2252
2253at 0x0000024d : */ 0x830b0000,0x00000974,
2254/*
2255 JUMP spew_dataout, WHEN DATA_OUT
2256
2257at 0x0000024f : */ 0x800b0000,0x000009a4,
2258/*
2259 JUMP sated
2260
2261at 0x00000251 : */ 0x80080000,0x000009ac,
2262/*
2263spew_cmd:
2264 MOVE 1, NCR53c7xx_zero, WHEN CMD
2265
2266at 0x00000253 : */ 0x0a000001,0x00000000,
2267/*
2268 JUMP sated
2269
2270at 0x00000255 : */ 0x80080000,0x000009ac,
2271/*
2272eat_msgin:
2273 MOVE 1, NCR53c7xx_sink, WHEN MSG_IN
2274
2275at 0x00000257 : */ 0x0f000001,0x00000000,
2276/*
2277 JUMP eat_msgin, WHEN MSG_IN
2278
2279at 0x00000259 : */ 0x870b0000,0x0000095c,
2280/*
2281 JUMP sated
2282
2283at 0x0000025b : */ 0x80080000,0x000009ac,
2284/*
2285eat_status:
2286 MOVE 1, NCR53c7xx_sink, WHEN STATUS
2287
2288at 0x0000025d : */ 0x0b000001,0x00000000,
2289/*
2290 JUMP eat_status, WHEN STATUS
2291
2292at 0x0000025f : */ 0x830b0000,0x00000974,
2293/*
2294 JUMP sated
2295
2296at 0x00000261 : */ 0x80080000,0x000009ac,
2297/*
2298eat_datain:
2299 MOVE 1, NCR53c7xx_sink, WHEN DATA_IN
2300
2301at 0x00000263 : */ 0x09000001,0x00000000,
2302/*
2303 JUMP eat_datain, WHEN DATA_IN
2304
2305at 0x00000265 : */ 0x810b0000,0x0000098c,
2306/*
2307 JUMP sated
2308
2309at 0x00000267 : */ 0x80080000,0x000009ac,
2310/*
2311spew_dataout:
2312 MOVE 1, NCR53c7xx_zero, WHEN DATA_OUT
2313
2314at 0x00000269 : */ 0x08000001,0x00000000,
2315/*
2316sated:
2317
2318
2319
2320 MOVE 1, NCR53c7xx_msg_abort, WHEN MSG_OUT
2321
2322at 0x0000026b : */ 0x0e000001,0x00000000,
2323/*
2324 WAIT DISCONNECT
2325
2326at 0x0000026d : */ 0x48000000,0x00000000,
2327/*
2328 INT int_norm_aborted
2329
2330at 0x0000026f : */ 0x98080000,0x02040000,
2331/*
2332
2333
2334
2335
2336; Little patched jump, used to overcome problems with TEMP getting
2337; corrupted on memory moves.
2338
2339jump_temp:
2340 JUMP 0
2341
2342at 0x00000271 : */ 0x80080000,0x00000000,
2343};
2344
2345#define A_NCR53c7xx_msg_abort 0x00000000
2346static u32 A_NCR53c7xx_msg_abort_used[] __attribute((unused)) = {
2347 0x0000026c,
2348};
2349
2350#define A_NCR53c7xx_msg_reject 0x00000000
2351static u32 A_NCR53c7xx_msg_reject_used[] __attribute((unused)) = {
2352 0x00000186,
2353};
2354
2355#define A_NCR53c7xx_sink 0x00000000
2356static u32 A_NCR53c7xx_sink_used[] __attribute((unused)) = {
2357 0x00000258,
2358 0x0000025e,
2359 0x00000264,
2360};
2361
2362#define A_NCR53c7xx_zero 0x00000000
2363static u32 A_NCR53c7xx_zero_used[] __attribute((unused)) = {
2364 0x00000254,
2365 0x0000026a,
2366};
2367
2368#define A_NOP_insn 0x00000000
2369static u32 A_NOP_insn_used[] __attribute((unused)) = {
2370 0x00000017,
2371};
2372
2373#define A_addr_dsa 0x00000000
2374static u32 A_addr_dsa_used[] __attribute((unused)) = {
2375 0x0000000f,
2376 0x00000026,
2377 0x00000033,
2378 0x00000040,
2379 0x00000055,
2380 0x00000079,
2381 0x0000008e,
2382 0x000000bc,
2383 0x000000d2,
2384 0x00000130,
2385 0x000001a5,
2386 0x000001bb,
2387 0x000001e3,
2388};
2389
2390#define A_addr_reconnect_dsa_head 0x00000000
2391static u32 A_addr_reconnect_dsa_head_used[] __attribute((unused)) = {
2392 0x000001b7,
2393};
2394
2395#define A_addr_scratch 0x00000000
2396static u32 A_addr_scratch_used[] __attribute((unused)) = {
2397 0x00000002,
2398 0x00000004,
2399 0x00000008,
2400 0x00000020,
2401 0x00000022,
2402 0x00000049,
2403 0x00000060,
2404 0x0000006a,
2405 0x00000071,
2406 0x00000073,
2407 0x000000ab,
2408 0x000000b5,
2409 0x000000c1,
2410 0x000000cb,
2411 0x0000012c,
2412 0x00000142,
2413 0x00000157,
2414 0x000001b2,
2415 0x000001b4,
2416 0x000001df,
2417 0x000001f7,
2418};
2419
2420#define A_addr_temp 0x00000000
2421static u32 A_addr_temp_used[] __attribute((unused)) = {
2422};
2423
2424#define A_dmode_memory_to_memory 0x00000000
2425static u32 A_dmode_memory_to_memory_used[] __attribute((unused)) = {
2426};
2427
2428#define A_dmode_memory_to_ncr 0x00000000
2429static u32 A_dmode_memory_to_ncr_used[] __attribute((unused)) = {
2430};
2431
2432#define A_dmode_ncr_to_memory 0x00000000
2433static u32 A_dmode_ncr_to_memory_used[] __attribute((unused)) = {
2434};
2435
2436#define A_dsa_check_reselect 0x00000000
2437static u32 A_dsa_check_reselect_used[] __attribute((unused)) = {
2438 0x000001d0,
2439};
2440
2441#define A_dsa_cmdout 0x00000048
2442static u32 A_dsa_cmdout_used[] __attribute((unused)) = {
2443 0x0000009a,
2444};
2445
2446#define A_dsa_cmnd 0x00000038
2447static u32 A_dsa_cmnd_used[] __attribute((unused)) = {
2448};
2449
2450#define A_dsa_datain 0x00000054
2451static u32 A_dsa_datain_used[] __attribute((unused)) = {
2452 0x000000c2,
2453};
2454
2455#define A_dsa_dataout 0x00000050
2456static u32 A_dsa_dataout_used[] __attribute((unused)) = {
2457 0x000000ac,
2458};
2459
2460#define A_dsa_end 0x00000070
2461static u32 A_dsa_end_used[] __attribute((unused)) = {
2462};
2463
2464#define A_dsa_fields_start 0x00000000
2465static u32 A_dsa_fields_start_used[] __attribute((unused)) = {
2466};
2467
2468#define A_dsa_msgin 0x00000058
2469static u32 A_dsa_msgin_used[] __attribute((unused)) = {
2470 0x0000019c,
2471};
2472
2473#define A_dsa_msgout 0x00000040
2474static u32 A_dsa_msgout_used[] __attribute((unused)) = {
2475 0x00000089,
2476};
2477
2478#define A_dsa_msgout_other 0x00000068
2479static u32 A_dsa_msgout_other_used[] __attribute((unused)) = {
2480 0x00000194,
2481};
2482
2483#define A_dsa_next 0x00000030
2484static u32 A_dsa_next_used[] __attribute((unused)) = {
2485 0x00000061,
2486};
2487
2488#define A_dsa_restore_pointers 0x00000000
2489static u32 A_dsa_restore_pointers_used[] __attribute((unused)) = {
2490 0x00000146,
2491};
2492
2493#define A_dsa_save_data_pointer 0x00000000
2494static u32 A_dsa_save_data_pointer_used[] __attribute((unused)) = {
2495 0x00000131,
2496};
2497
2498#define A_dsa_select 0x0000003c
2499static u32 A_dsa_select_used[] __attribute((unused)) = {
2500 0x00000082,
2501};
2502
2503#define A_dsa_sscf_710 0x00000000
2504static u32 A_dsa_sscf_710_used[] __attribute((unused)) = {
2505 0x00000007,
2506};
2507
2508#define A_dsa_status 0x00000060
2509static u32 A_dsa_status_used[] __attribute((unused)) = {
2510 0x00000198,
2511};
2512
2513#define A_dsa_temp_addr_array_value 0x00000000
2514static u32 A_dsa_temp_addr_array_value_used[] __attribute((unused)) = {
2515};
2516
2517#define A_dsa_temp_addr_dsa_value 0x00000000
2518static u32 A_dsa_temp_addr_dsa_value_used[] __attribute((unused)) = {
2519 0x00000001,
2520};
2521
2522#define A_dsa_temp_addr_new_value 0x00000000
2523static u32 A_dsa_temp_addr_new_value_used[] __attribute((unused)) = {
2524};
2525
2526#define A_dsa_temp_addr_next 0x00000000
2527static u32 A_dsa_temp_addr_next_used[] __attribute((unused)) = {
2528 0x0000001c,
2529 0x0000004f,
2530};
2531
2532#define A_dsa_temp_addr_residual 0x00000000
2533static u32 A_dsa_temp_addr_residual_used[] __attribute((unused)) = {
2534 0x0000002d,
2535 0x0000003b,
2536};
2537
2538#define A_dsa_temp_addr_saved_pointer 0x00000000
2539static u32 A_dsa_temp_addr_saved_pointer_used[] __attribute((unused)) = {
2540 0x0000002b,
2541 0x00000037,
2542};
2543
2544#define A_dsa_temp_addr_saved_residual 0x00000000
2545static u32 A_dsa_temp_addr_saved_residual_used[] __attribute((unused)) = {
2546 0x0000002e,
2547 0x0000003a,
2548};
2549
2550#define A_dsa_temp_lun 0x00000000
2551static u32 A_dsa_temp_lun_used[] __attribute((unused)) = {
2552 0x0000004c,
2553};
2554
2555#define A_dsa_temp_next 0x00000000
2556static u32 A_dsa_temp_next_used[] __attribute((unused)) = {
2557 0x0000001f,
2558};
2559
2560#define A_dsa_temp_sync 0x00000000
2561static u32 A_dsa_temp_sync_used[] __attribute((unused)) = {
2562 0x00000057,
2563};
2564
2565#define A_dsa_temp_target 0x00000000
2566static u32 A_dsa_temp_target_used[] __attribute((unused)) = {
2567 0x00000045,
2568};
2569
2570#define A_emulfly 0x00000000
2571static u32 A_emulfly_used[] __attribute((unused)) = {
2572};
2573
2574#define A_int_debug_break 0x03000000
2575static u32 A_int_debug_break_used[] __attribute((unused)) = {
2576 0x0000023c,
2577};
2578
2579#define A_int_debug_panic 0x030b0000
2580static u32 A_int_debug_panic_used[] __attribute((unused)) = {
2581 0x00000209,
2582 0x00000219,
2583};
2584
2585#define A_int_err_check_condition 0x00030000
2586static u32 A_int_err_check_condition_used[] __attribute((unused)) = {
2587 0x000001a9,
2588};
2589
2590#define A_int_err_no_phase 0x00040000
2591static u32 A_int_err_no_phase_used[] __attribute((unused)) = {
2592};
2593
2594#define A_int_err_selected 0x00010000
2595static u32 A_int_err_selected_used[] __attribute((unused)) = {
2596 0x000001ff,
2597};
2598
2599#define A_int_err_unexpected_phase 0x00000000
2600static u32 A_int_err_unexpected_phase_used[] __attribute((unused)) = {
2601 0x00000092,
2602 0x00000098,
2603 0x000000a0,
2604 0x000000d6,
2605 0x000000da,
2606 0x000000dc,
2607 0x000000e4,
2608 0x000000e8,
2609 0x000000ea,
2610 0x000000f2,
2611 0x000000f6,
2612 0x000000f8,
2613 0x000000fa,
2614 0x00000160,
2615};
2616
2617#define A_int_err_unexpected_reselect 0x00020000
2618static u32 A_int_err_unexpected_reselect_used[] __attribute((unused)) = {
2619 0x000001cd,
2620};
2621
2622#define A_int_msg_1 0x01020000
2623static u32 A_int_msg_1_used[] __attribute((unused)) = {
2624 0x00000114,
2625 0x00000116,
2626};
2627
2628#define A_int_msg_sdtr 0x01010000
2629static u32 A_int_msg_sdtr_used[] __attribute((unused)) = {
2630 0x00000180,
2631};
2632
2633#define A_int_msg_wdtr 0x01000000
2634static u32 A_int_msg_wdtr_used[] __attribute((unused)) = {
2635 0x00000174,
2636};
2637
2638#define A_int_norm_aborted 0x02040000
2639static u32 A_int_norm_aborted_used[] __attribute((unused)) = {
2640 0x00000270,
2641};
2642
2643#define A_int_norm_command_complete 0x02020000
2644static u32 A_int_norm_command_complete_used[] __attribute((unused)) = {
2645};
2646
2647#define A_int_norm_disconnected 0x02030000
2648static u32 A_int_norm_disconnected_used[] __attribute((unused)) = {
2649};
2650
2651#define A_int_norm_emulateintfly 0x02060000
2652static u32 A_int_norm_emulateintfly_used[] __attribute((unused)) = {
2653 0x000001a2,
2654};
2655
2656#define A_int_norm_reselect_complete 0x02010000
2657static u32 A_int_norm_reselect_complete_used[] __attribute((unused)) = {
2658};
2659
2660#define A_int_norm_reset 0x02050000
2661static u32 A_int_norm_reset_used[] __attribute((unused)) = {
2662};
2663
2664#define A_int_norm_select_complete 0x02000000
2665static u32 A_int_norm_select_complete_used[] __attribute((unused)) = {
2666};
2667
2668#define A_int_test_1 0x04000000
2669static u32 A_int_test_1_used[] __attribute((unused)) = {
2670 0x0000021e,
2671};
2672
2673#define A_int_test_2 0x04010000
2674static u32 A_int_test_2_used[] __attribute((unused)) = {
2675 0x0000023a,
2676};
2677
2678#define A_int_test_3 0x04020000
2679static u32 A_int_test_3_used[] __attribute((unused)) = {
2680};
2681
2682#define A_msg_buf 0x00000000
2683static u32 A_msg_buf_used[] __attribute((unused)) = {
2684 0x00000108,
2685 0x00000162,
2686 0x0000016c,
2687 0x00000172,
2688 0x00000178,
2689 0x0000017e,
2690};
2691
2692#define A_reconnect_dsa_head 0x00000000
2693static u32 A_reconnect_dsa_head_used[] __attribute((unused)) = {
2694 0x0000006d,
2695 0x00000074,
2696 0x000001b1,
2697};
2698
2699#define A_reselected_identify 0x00000000
2700static u32 A_reselected_identify_used[] __attribute((unused)) = {
2701 0x00000048,
2702 0x000001af,
2703};
2704
2705#define A_reselected_tag 0x00000000
2706static u32 A_reselected_tag_used[] __attribute((unused)) = {
2707};
2708
2709#define A_saved_dsa 0x00000000
2710static u32 A_saved_dsa_used[] __attribute((unused)) = {
2711 0x00000005,
2712 0x0000000e,
2713 0x00000023,
2714 0x00000025,
2715 0x00000032,
2716 0x0000003f,
2717 0x00000054,
2718 0x0000005f,
2719 0x00000070,
2720 0x00000078,
2721 0x0000008d,
2722 0x000000aa,
2723 0x000000bb,
2724 0x000000c0,
2725 0x000000d1,
2726 0x0000012f,
2727 0x000001a4,
2728 0x000001b5,
2729 0x000001ba,
2730 0x000001e2,
2731};
2732
2733#define A_schedule 0x00000000
2734static u32 A_schedule_used[] __attribute((unused)) = {
2735 0x0000007d,
2736 0x000001a7,
2737 0x00000203,
2738 0x00000244,
2739};
2740
2741#define A_test_dest 0x00000000
2742static u32 A_test_dest_used[] __attribute((unused)) = {
2743 0x0000021c,
2744};
2745
2746#define A_test_src 0x00000000
2747static u32 A_test_src_used[] __attribute((unused)) = {
2748 0x0000021b,
2749};
2750
2751#define Ent_accept_message 0x00000624
2752#define Ent_cmdout_cmdout 0x00000264
2753#define Ent_command_complete 0x0000065c
2754#define Ent_command_complete_msgin 0x0000066c
2755#define Ent_data_transfer 0x0000026c
2756#define Ent_datain_to_jump 0x00000334
2757#define Ent_debug_break 0x000008ec
2758#define Ent_dsa_code_begin 0x00000000
2759#define Ent_dsa_code_check_reselect 0x0000010c
2760#define Ent_dsa_code_fix_jump 0x00000058
2761#define Ent_dsa_code_restore_pointers 0x000000d8
2762#define Ent_dsa_code_save_data_pointer 0x000000a4
2763#define Ent_dsa_code_template 0x00000000
2764#define Ent_dsa_code_template_end 0x00000178
2765#define Ent_dsa_schedule 0x00000178
2766#define Ent_dsa_zero 0x00000178
2767#define Ent_end_data_transfer 0x000002a4
2768#define Ent_initiator_abort 0x00000914
2769#define Ent_msg_in 0x0000041c
2770#define Ent_msg_in_restart 0x000003fc
2771#define Ent_other_in 0x0000038c
2772#define Ent_other_out 0x00000354
2773#define Ent_other_transfer 0x000003c4
2774#define Ent_reject_message 0x00000604
2775#define Ent_reselected_check_next 0x000006f0
2776#define Ent_reselected_ok 0x00000798
2777#define Ent_respond_message 0x0000063c
2778#define Ent_select 0x000001f8
2779#define Ent_select_msgout 0x00000218
2780#define Ent_target_abort 0x000008f4
2781#define Ent_test_1 0x00000868
2782#define Ent_test_2 0x0000087c
2783#define Ent_test_2_msgout 0x0000089c
2784#define Ent_wait_reselect 0x000006a8
2785static u32 LABELPATCHES[] __attribute((unused)) = {
2786 0x00000011,
2787 0x0000001a,
2788 0x0000001d,
2789 0x00000028,
2790 0x0000002a,
2791 0x00000035,
2792 0x00000038,
2793 0x00000042,
2794 0x00000050,
2795 0x00000052,
2796 0x0000006b,
2797 0x00000083,
2798 0x00000085,
2799 0x00000090,
2800 0x00000094,
2801 0x00000096,
2802 0x0000009c,
2803 0x0000009e,
2804 0x000000a2,
2805 0x000000a4,
2806 0x000000a6,
2807 0x000000a8,
2808 0x000000b6,
2809 0x000000b9,
2810 0x000000cc,
2811 0x000000cf,
2812 0x000000d8,
2813 0x000000de,
2814 0x000000e0,
2815 0x000000e6,
2816 0x000000ec,
2817 0x000000ee,
2818 0x000000f4,
2819 0x000000fc,
2820 0x000000fe,
2821 0x0000010a,
2822 0x0000010c,
2823 0x0000010e,
2824 0x00000110,
2825 0x00000112,
2826 0x00000118,
2827 0x0000011a,
2828 0x0000012d,
2829 0x00000143,
2830 0x00000158,
2831 0x0000015c,
2832 0x00000164,
2833 0x00000166,
2834 0x00000168,
2835 0x0000016e,
2836 0x0000017a,
2837 0x000001ab,
2838 0x000001b8,
2839 0x000001bf,
2840 0x000001c3,
2841 0x000001c7,
2842 0x000001cb,
2843 0x000001e0,
2844 0x000001f8,
2845 0x00000207,
2846 0x0000020f,
2847 0x00000213,
2848 0x00000217,
2849 0x00000224,
2850 0x00000226,
2851 0x00000248,
2852 0x0000024a,
2853 0x0000024c,
2854 0x0000024e,
2855 0x00000250,
2856 0x00000252,
2857 0x00000256,
2858 0x0000025a,
2859 0x0000025c,
2860 0x00000260,
2861 0x00000262,
2862 0x00000266,
2863 0x00000268,
2864};
2865
2866static struct {
2867 u32 offset;
2868 void *address;
2869} EXTERNAL_PATCHES[] __attribute((unused)) = {
2870};
2871
2872static u32 INSTRUCTIONS __attribute((unused)) = 290;
2873static u32 PATCHES __attribute((unused)) = 78;
2874static u32 EXTERNAL_PATCHES_LEN __attribute((unused)) = 0;
diff --git a/drivers/scsi/53c7xx_u.h_shipped b/drivers/scsi/53c7xx_u.h_shipped
deleted file mode 100644
index 7b337174e228..000000000000
--- a/drivers/scsi/53c7xx_u.h_shipped
+++ /dev/null
@@ -1,102 +0,0 @@
1#undef A_NCR53c7xx_msg_abort
2#undef A_NCR53c7xx_msg_reject
3#undef A_NCR53c7xx_sink
4#undef A_NCR53c7xx_zero
5#undef A_NOP_insn
6#undef A_addr_dsa
7#undef A_addr_reconnect_dsa_head
8#undef A_addr_scratch
9#undef A_addr_temp
10#undef A_dmode_memory_to_memory
11#undef A_dmode_memory_to_ncr
12#undef A_dmode_ncr_to_memory
13#undef A_dsa_check_reselect
14#undef A_dsa_cmdout
15#undef A_dsa_cmnd
16#undef A_dsa_datain
17#undef A_dsa_dataout
18#undef A_dsa_end
19#undef A_dsa_fields_start
20#undef A_dsa_msgin
21#undef A_dsa_msgout
22#undef A_dsa_msgout_other
23#undef A_dsa_next
24#undef A_dsa_restore_pointers
25#undef A_dsa_save_data_pointer
26#undef A_dsa_select
27#undef A_dsa_sscf_710
28#undef A_dsa_status
29#undef A_dsa_temp_addr_array_value
30#undef A_dsa_temp_addr_dsa_value
31#undef A_dsa_temp_addr_new_value
32#undef A_dsa_temp_addr_next
33#undef A_dsa_temp_addr_residual
34#undef A_dsa_temp_addr_saved_pointer
35#undef A_dsa_temp_addr_saved_residual
36#undef A_dsa_temp_lun
37#undef A_dsa_temp_next
38#undef A_dsa_temp_sync
39#undef A_dsa_temp_target
40#undef A_emulfly
41#undef A_int_debug_break
42#undef A_int_debug_panic
43#undef A_int_err_check_condition
44#undef A_int_err_no_phase
45#undef A_int_err_selected
46#undef A_int_err_unexpected_phase
47#undef A_int_err_unexpected_reselect
48#undef A_int_msg_1
49#undef A_int_msg_sdtr
50#undef A_int_msg_wdtr
51#undef A_int_norm_aborted
52#undef A_int_norm_command_complete
53#undef A_int_norm_disconnected
54#undef A_int_norm_emulateintfly
55#undef A_int_norm_reselect_complete
56#undef A_int_norm_reset
57#undef A_int_norm_select_complete
58#undef A_int_test_1
59#undef A_int_test_2
60#undef A_int_test_3
61#undef A_msg_buf
62#undef A_reconnect_dsa_head
63#undef A_reselected_identify
64#undef A_reselected_tag
65#undef A_saved_dsa
66#undef A_schedule
67#undef A_test_dest
68#undef A_test_src
69#undef Ent_accept_message
70#undef Ent_cmdout_cmdout
71#undef Ent_command_complete
72#undef Ent_command_complete_msgin
73#undef Ent_data_transfer
74#undef Ent_datain_to_jump
75#undef Ent_debug_break
76#undef Ent_dsa_code_begin
77#undef Ent_dsa_code_check_reselect
78#undef Ent_dsa_code_fix_jump
79#undef Ent_dsa_code_restore_pointers
80#undef Ent_dsa_code_save_data_pointer
81#undef Ent_dsa_code_template
82#undef Ent_dsa_code_template_end
83#undef Ent_dsa_schedule
84#undef Ent_dsa_zero
85#undef Ent_end_data_transfer
86#undef Ent_initiator_abort
87#undef Ent_msg_in
88#undef Ent_msg_in_restart
89#undef Ent_other_in
90#undef Ent_other_out
91#undef Ent_other_transfer
92#undef Ent_reject_message
93#undef Ent_reselected_check_next
94#undef Ent_reselected_ok
95#undef Ent_respond_message
96#undef Ent_select
97#undef Ent_select_msgout
98#undef Ent_target_abort
99#undef Ent_test_1
100#undef Ent_test_2
101#undef Ent_test_2_msgout
102#undef Ent_wait_reselect
diff --git a/drivers/scsi/BusLogic.c b/drivers/scsi/BusLogic.c
index 96f4cab07614..9b206176f717 100644
--- a/drivers/scsi/BusLogic.c
+++ b/drivers/scsi/BusLogic.c
@@ -304,18 +304,10 @@ static struct BusLogic_CCB *BusLogic_AllocateCCB(struct BusLogic_HostAdapter
304static void BusLogic_DeallocateCCB(struct BusLogic_CCB *CCB) 304static void BusLogic_DeallocateCCB(struct BusLogic_CCB *CCB)
305{ 305{
306 struct BusLogic_HostAdapter *HostAdapter = CCB->HostAdapter; 306 struct BusLogic_HostAdapter *HostAdapter = CCB->HostAdapter;
307 struct scsi_cmnd *cmd = CCB->Command;
308 307
309 if (cmd->use_sg != 0) { 308 scsi_dma_unmap(CCB->Command);
310 pci_unmap_sg(HostAdapter->PCI_Device,
311 (struct scatterlist *)cmd->request_buffer,
312 cmd->use_sg, cmd->sc_data_direction);
313 } else if (cmd->request_bufflen != 0) {
314 pci_unmap_single(HostAdapter->PCI_Device, CCB->DataPointer,
315 CCB->DataLength, cmd->sc_data_direction);
316 }
317 pci_unmap_single(HostAdapter->PCI_Device, CCB->SenseDataPointer, 309 pci_unmap_single(HostAdapter->PCI_Device, CCB->SenseDataPointer,
318 CCB->SenseDataLength, PCI_DMA_FROMDEVICE); 310 CCB->SenseDataLength, PCI_DMA_FROMDEVICE);
319 311
320 CCB->Command = NULL; 312 CCB->Command = NULL;
321 CCB->Status = BusLogic_CCB_Free; 313 CCB->Status = BusLogic_CCB_Free;
@@ -2648,7 +2640,8 @@ static void BusLogic_ProcessCompletedCCBs(struct BusLogic_HostAdapter *HostAdapt
2648 */ 2640 */
2649 if (CCB->CDB[0] == INQUIRY && CCB->CDB[1] == 0 && CCB->HostAdapterStatus == BusLogic_CommandCompletedNormally) { 2641 if (CCB->CDB[0] == INQUIRY && CCB->CDB[1] == 0 && CCB->HostAdapterStatus == BusLogic_CommandCompletedNormally) {
2650 struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[CCB->TargetID]; 2642 struct BusLogic_TargetFlags *TargetFlags = &HostAdapter->TargetFlags[CCB->TargetID];
2651 struct SCSI_Inquiry *InquiryResult = (struct SCSI_Inquiry *) Command->request_buffer; 2643 struct SCSI_Inquiry *InquiryResult =
2644 (struct SCSI_Inquiry *) scsi_sglist(Command);
2652 TargetFlags->TargetExists = true; 2645 TargetFlags->TargetExists = true;
2653 TargetFlags->TaggedQueuingSupported = InquiryResult->CmdQue; 2646 TargetFlags->TaggedQueuingSupported = InquiryResult->CmdQue;
2654 TargetFlags->WideTransfersSupported = InquiryResult->WBus16; 2647 TargetFlags->WideTransfersSupported = InquiryResult->WBus16;
@@ -2819,9 +2812,8 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou
2819 int CDB_Length = Command->cmd_len; 2812 int CDB_Length = Command->cmd_len;
2820 int TargetID = Command->device->id; 2813 int TargetID = Command->device->id;
2821 int LogicalUnit = Command->device->lun; 2814 int LogicalUnit = Command->device->lun;
2822 void *BufferPointer = Command->request_buffer; 2815 int BufferLength = scsi_bufflen(Command);
2823 int BufferLength = Command->request_bufflen; 2816 int Count;
2824 int SegmentCount = Command->use_sg;
2825 struct BusLogic_CCB *CCB; 2817 struct BusLogic_CCB *CCB;
2826 /* 2818 /*
2827 SCSI REQUEST_SENSE commands will be executed automatically by the Host 2819 SCSI REQUEST_SENSE commands will be executed automatically by the Host
@@ -2851,36 +2843,35 @@ static int BusLogic_QueueCommand(struct scsi_cmnd *Command, void (*CompletionRou
2851 return 0; 2843 return 0;
2852 } 2844 }
2853 } 2845 }
2846
2854 /* 2847 /*
2855 Initialize the fields in the BusLogic Command Control Block (CCB). 2848 Initialize the fields in the BusLogic Command Control Block (CCB).
2856 */ 2849 */
2857 if (SegmentCount == 0 && BufferLength != 0) { 2850 Count = scsi_dma_map(Command);
2858 CCB->Opcode = BusLogic_InitiatorCCB; 2851 BUG_ON(Count < 0);
2859 CCB->DataLength = BufferLength; 2852 if (Count) {
2860 CCB->DataPointer = pci_map_single(HostAdapter->PCI_Device, 2853 struct scatterlist *sg;
2861 BufferPointer, BufferLength, 2854 int i;
2862 Command->sc_data_direction); 2855
2863 } else if (SegmentCount != 0) {
2864 struct scatterlist *ScatterList = (struct scatterlist *) BufferPointer;
2865 int Segment, Count;
2866
2867 Count = pci_map_sg(HostAdapter->PCI_Device, ScatterList, SegmentCount,
2868 Command->sc_data_direction);
2869 CCB->Opcode = BusLogic_InitiatorCCB_ScatterGather; 2856 CCB->Opcode = BusLogic_InitiatorCCB_ScatterGather;
2870 CCB->DataLength = Count * sizeof(struct BusLogic_ScatterGatherSegment); 2857 CCB->DataLength = Count * sizeof(struct BusLogic_ScatterGatherSegment);
2871 if (BusLogic_MultiMasterHostAdapterP(HostAdapter)) 2858 if (BusLogic_MultiMasterHostAdapterP(HostAdapter))
2872 CCB->DataPointer = (unsigned int) CCB->DMA_Handle + ((unsigned long) &CCB->ScatterGatherList - (unsigned long) CCB); 2859 CCB->DataPointer = (unsigned int) CCB->DMA_Handle + ((unsigned long) &CCB->ScatterGatherList - (unsigned long) CCB);
2873 else 2860 else
2874 CCB->DataPointer = Virtual_to_32Bit_Virtual(CCB->ScatterGatherList); 2861 CCB->DataPointer = Virtual_to_32Bit_Virtual(CCB->ScatterGatherList);
2875 for (Segment = 0; Segment < Count; Segment++) { 2862
2876 CCB->ScatterGatherList[Segment].SegmentByteCount = sg_dma_len(ScatterList + Segment); 2863 scsi_for_each_sg(Command, sg, Count, i) {
2877 CCB->ScatterGatherList[Segment].SegmentDataPointer = sg_dma_address(ScatterList + Segment); 2864 CCB->ScatterGatherList[i].SegmentByteCount =
2865 sg_dma_len(sg);
2866 CCB->ScatterGatherList[i].SegmentDataPointer =
2867 sg_dma_address(sg);
2878 } 2868 }
2879 } else { 2869 } else if (!Count) {
2880 CCB->Opcode = BusLogic_InitiatorCCB; 2870 CCB->Opcode = BusLogic_InitiatorCCB;
2881 CCB->DataLength = BufferLength; 2871 CCB->DataLength = BufferLength;
2882 CCB->DataPointer = 0; 2872 CCB->DataPointer = 0;
2883 } 2873 }
2874
2884 switch (CDB[0]) { 2875 switch (CDB[0]) {
2885 case READ_6: 2876 case READ_6:
2886 case READ_10: 2877 case READ_10:
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 572034ceb143..aac9cd9a172f 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -738,7 +738,7 @@ config SCSI_GENERIC_NCR53C400
738 738
739config SCSI_IBMMCA 739config SCSI_IBMMCA
740 tristate "IBMMCA SCSI support" 740 tristate "IBMMCA SCSI support"
741 depends on MCA_LEGACY && SCSI 741 depends on MCA && SCSI
742 ---help--- 742 ---help---
743 This is support for the IBM SCSI adapter found in many of the PS/2 743 This is support for the IBM SCSI adapter found in many of the PS/2
744 series computers. These machines have an MCA bus, so you need to 744 series computers. These machines have an MCA bus, so you need to
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index b1b632791580..cba39679f947 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -37,7 +37,6 @@ obj-$(CONFIG_SCSI_SAS_LIBSAS) += libsas/
37 37
38obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o 38obj-$(CONFIG_ISCSI_TCP) += libiscsi.o iscsi_tcp.o
39obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o 39obj-$(CONFIG_INFINIBAND_ISER) += libiscsi.o
40obj-$(CONFIG_SCSI_AMIGA7XX) += amiga7xx.o 53c7xx.o
41obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o 40obj-$(CONFIG_A3000_SCSI) += a3000.o wd33c93.o
42obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o 41obj-$(CONFIG_A2091_SCSI) += a2091.o wd33c93.o
43obj-$(CONFIG_GVP11_SCSI) += gvp11.o wd33c93.o 42obj-$(CONFIG_GVP11_SCSI) += gvp11.o wd33c93.o
@@ -53,8 +52,6 @@ obj-$(CONFIG_ATARI_SCSI) += atari_scsi.o
53obj-$(CONFIG_MAC_SCSI) += mac_scsi.o 52obj-$(CONFIG_MAC_SCSI) += mac_scsi.o
54obj-$(CONFIG_SCSI_MAC_ESP) += mac_esp.o NCR53C9x.o 53obj-$(CONFIG_SCSI_MAC_ESP) += mac_esp.o NCR53C9x.o
55obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o 54obj-$(CONFIG_SUN3_SCSI) += sun3_scsi.o sun3_scsi_vme.o
56obj-$(CONFIG_MVME16x_SCSI) += mvme16x.o 53c7xx.o
57obj-$(CONFIG_BVME6000_SCSI) += bvme6000.o 53c7xx.o
58obj-$(CONFIG_SCSI_SIM710) += 53c700.o sim710.o 55obj-$(CONFIG_SCSI_SIM710) += 53c700.o sim710.o
59obj-$(CONFIG_SCSI_ADVANSYS) += advansys.o 56obj-$(CONFIG_SCSI_ADVANSYS) += advansys.o
60obj-$(CONFIG_SCSI_PSI240I) += psi240i.o 57obj-$(CONFIG_SCSI_PSI240I) += psi240i.o
@@ -168,10 +165,8 @@ NCR_Q720_mod-objs := NCR_Q720.o ncr53c8xx.o
168oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o 165oktagon_esp_mod-objs := oktagon_esp.o oktagon_io.o
169 166
170# Files generated that shall be removed upon make clean 167# Files generated that shall be removed upon make clean
171clean-files := 53c7xx_d.h 53c700_d.h \ 168clean-files := 53c700_d.h 53c700_u.h
172 53c7xx_u.h 53c700_u.h
173 169
174$(obj)/53c7xx.o: $(obj)/53c7xx_d.h $(obj)/53c7xx_u.h
175$(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h 170$(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
176 171
177# If you want to play with the firmware, uncomment 172# If you want to play with the firmware, uncomment
@@ -179,11 +174,6 @@ $(obj)/53c700.o $(MODVERDIR)/$(obj)/53c700.ver: $(obj)/53c700_d.h
179 174
180ifdef GENERATE_FIRMWARE 175ifdef GENERATE_FIRMWARE
181 176
182$(obj)/53c7xx_d.h: $(src)/53c7xx.scr $(src)/script_asm.pl
183 $(CPP) -traditional -DCHIP=710 - < $< | grep -v '^#' | $(PERL) -s $(src)/script_asm.pl -ncr7x0_family $@ $(@:_d.h=_u.h)
184
185$(obj)/53c7xx_u.h: $(obj)/53c7xx_d.h
186
187$(obj)/53c700_d.h: $(src)/53c700.scr $(src)/script_asm.pl 177$(obj)/53c700_d.h: $(src)/53c700.scr $(src)/script_asm.pl
188 $(PERL) -s $(src)/script_asm.pl -ncr7x0_family $@ $(@:_d.h=_u.h) < $< 178 $(PERL) -s $(src)/script_asm.pl -ncr7x0_family $@ $(@:_d.h=_u.h) < $<
189 179
diff --git a/drivers/scsi/NCR5380.c b/drivers/scsi/NCR5380.c
index bb3cb3360541..37de6b37b084 100644
--- a/drivers/scsi/NCR5380.c
+++ b/drivers/scsi/NCR5380.c
@@ -347,7 +347,7 @@ static int NCR5380_poll_politely(struct Scsi_Host *instance, int reg, int bit, i
347 if((r & bit) == val) 347 if((r & bit) == val)
348 return 0; 348 return 0;
349 if(!in_interrupt()) 349 if(!in_interrupt())
350 yield(); 350 cond_resched();
351 else 351 else
352 cpu_relax(); 352 cpu_relax();
353 } 353 }
@@ -357,7 +357,7 @@ static int NCR5380_poll_politely(struct Scsi_Host *instance, int reg, int bit, i
357static struct { 357static struct {
358 unsigned char value; 358 unsigned char value;
359 const char *name; 359 const char *name;
360} phases[] = { 360} phases[] __maybe_unused = {
361 {PHASE_DATAOUT, "DATAOUT"}, 361 {PHASE_DATAOUT, "DATAOUT"},
362 {PHASE_DATAIN, "DATAIN"}, 362 {PHASE_DATAIN, "DATAIN"},
363 {PHASE_CMDOUT, "CMDOUT"}, 363 {PHASE_CMDOUT, "CMDOUT"},
@@ -575,7 +575,8 @@ static irqreturn_t __init probe_intr(int irq, void *dev_id)
575 * Locks: none, irqs must be enabled on entry 575 * Locks: none, irqs must be enabled on entry
576 */ 576 */
577 577
578static int __init NCR5380_probe_irq(struct Scsi_Host *instance, int possible) 578static int __init __maybe_unused NCR5380_probe_irq(struct Scsi_Host *instance,
579 int possible)
579{ 580{
580 NCR5380_local_declare(); 581 NCR5380_local_declare();
581 struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata; 582 struct NCR5380_hostdata *hostdata = (struct NCR5380_hostdata *) instance->hostdata;
@@ -629,7 +630,8 @@ static int __init NCR5380_probe_irq(struct Scsi_Host *instance, int possible)
629 * Locks: none 630 * Locks: none
630 */ 631 */
631 632
632static void __init NCR5380_print_options(struct Scsi_Host *instance) 633static void __init __maybe_unused
634NCR5380_print_options(struct Scsi_Host *instance)
633{ 635{
634 printk(" generic options" 636 printk(" generic options"
635#ifdef AUTOPROBE_IRQ 637#ifdef AUTOPROBE_IRQ
@@ -703,8 +705,8 @@ char *lprint_command(unsigned char *cmd, char *pos, char *buffer, int len);
703static 705static
704char *lprint_opcode(int opcode, char *pos, char *buffer, int length); 706char *lprint_opcode(int opcode, char *pos, char *buffer, int length);
705 707
706static 708static int __maybe_unused NCR5380_proc_info(struct Scsi_Host *instance,
707int NCR5380_proc_info(struct Scsi_Host *instance, char *buffer, char **start, off_t offset, int length, int inout) 709 char *buffer, char **start, off_t offset, int length, int inout)
708{ 710{
709 char *pos = buffer; 711 char *pos = buffer;
710 struct NCR5380_hostdata *hostdata; 712 struct NCR5380_hostdata *hostdata;
diff --git a/drivers/scsi/NCR5380.h b/drivers/scsi/NCR5380.h
index 713a108c02ef..bccf13f71532 100644
--- a/drivers/scsi/NCR5380.h
+++ b/drivers/scsi/NCR5380.h
@@ -299,7 +299,7 @@ static void NCR5380_information_transfer(struct Scsi_Host *instance);
299static irqreturn_t NCR5380_intr(int irq, void *dev_id); 299static irqreturn_t NCR5380_intr(int irq, void *dev_id);
300#endif 300#endif
301static void NCR5380_main(struct work_struct *work); 301static void NCR5380_main(struct work_struct *work);
302static void NCR5380_print_options(struct Scsi_Host *instance); 302static void __maybe_unused NCR5380_print_options(struct Scsi_Host *instance);
303#ifdef NDEBUG 303#ifdef NDEBUG
304static void NCR5380_print_phase(struct Scsi_Host *instance); 304static void NCR5380_print_phase(struct Scsi_Host *instance);
305static void NCR5380_print(struct Scsi_Host *instance); 305static void NCR5380_print(struct Scsi_Host *instance);
@@ -307,8 +307,8 @@ static void NCR5380_print(struct Scsi_Host *instance);
307static int NCR5380_abort(Scsi_Cmnd * cmd); 307static int NCR5380_abort(Scsi_Cmnd * cmd);
308static int NCR5380_bus_reset(Scsi_Cmnd * cmd); 308static int NCR5380_bus_reset(Scsi_Cmnd * cmd);
309static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *)); 309static int NCR5380_queue_command(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *));
310static int NCR5380_proc_info(struct Scsi_Host *instance, char *buffer, char **start, 310static int __maybe_unused NCR5380_proc_info(struct Scsi_Host *instance,
311off_t offset, int length, int inout); 311 char *buffer, char **start, off_t offset, int length, int inout);
312 312
313static void NCR5380_reselect(struct Scsi_Host *instance); 313static void NCR5380_reselect(struct Scsi_Host *instance);
314static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag); 314static int NCR5380_select(struct Scsi_Host *instance, Scsi_Cmnd * cmd, int tag);
diff --git a/drivers/scsi/NCR53c406a.c b/drivers/scsi/NCR53c406a.c
index 7c0b17f86903..eda8c48f6be7 100644
--- a/drivers/scsi/NCR53c406a.c
+++ b/drivers/scsi/NCR53c406a.c
@@ -698,7 +698,7 @@ static int NCR53c406a_queue(Scsi_Cmnd * SCpnt, void (*done) (Scsi_Cmnd *))
698 int i; 698 int i;
699 699
700 VDEB(printk("NCR53c406a_queue called\n")); 700 VDEB(printk("NCR53c406a_queue called\n"));
701 DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->target, SCpnt->lun, SCpnt->request_bufflen)); 701 DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->target, SCpnt->lun, scsi_bufflen(SCpnt)));
702 702
703#if 0 703#if 0
704 VDEB(for (i = 0; i < SCpnt->cmd_len; i++) 704 VDEB(for (i = 0; i < SCpnt->cmd_len; i++)
@@ -785,8 +785,8 @@ static void NCR53c406a_intr(void *dev_id)
785 unsigned char status, int_reg; 785 unsigned char status, int_reg;
786#if USE_PIO 786#if USE_PIO
787 unsigned char pio_status; 787 unsigned char pio_status;
788 struct scatterlist *sglist; 788 struct scatterlist *sg;
789 unsigned int sgcount; 789 int i;
790#endif 790#endif
791 791
792 VDEB(printk("NCR53c406a_intr called\n")); 792 VDEB(printk("NCR53c406a_intr called\n"));
@@ -866,22 +866,18 @@ static void NCR53c406a_intr(void *dev_id)
866 current_SC->SCp.phase = data_out; 866 current_SC->SCp.phase = data_out;
867 VDEB(printk("NCR53c406a: Data-Out phase\n")); 867 VDEB(printk("NCR53c406a: Data-Out phase\n"));
868 outb(FLUSH_FIFO, CMD_REG); 868 outb(FLUSH_FIFO, CMD_REG);
869 LOAD_DMA_COUNT(current_SC->request_bufflen); /* Max transfer size */ 869 LOAD_DMA_COUNT(scsi_bufflen(current_SC)); /* Max transfer size */
870#if USE_DMA /* No s/g support for DMA */ 870#if USE_DMA /* No s/g support for DMA */
871 NCR53c406a_dma_write(current_SC->request_buffer, current_SC->request_bufflen); 871 NCR53c406a_dma_write(scsi_sglist(current_SC),
872 scsdi_bufflen(current_SC));
873
872#endif /* USE_DMA */ 874#endif /* USE_DMA */
873 outb(TRANSFER_INFO | DMA_OP, CMD_REG); 875 outb(TRANSFER_INFO | DMA_OP, CMD_REG);
874#if USE_PIO 876#if USE_PIO
875 if (!current_SC->use_sg) /* Don't use scatter-gather */ 877 scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) {
876 NCR53c406a_pio_write(current_SC->request_buffer, current_SC->request_bufflen); 878 NCR53c406a_pio_write(page_address(sg->page) + sg->offset,
877 else { /* use scatter-gather */ 879 sg->length);
878 sgcount = current_SC->use_sg; 880 }
879 sglist = current_SC->request_buffer;
880 while (sgcount--) {
881 NCR53c406a_pio_write(page_address(sglist->page) + sglist->offset, sglist->length);
882 sglist++;
883 }
884 }
885 REG0; 881 REG0;
886#endif /* USE_PIO */ 882#endif /* USE_PIO */
887 } 883 }
@@ -893,22 +889,17 @@ static void NCR53c406a_intr(void *dev_id)
893 current_SC->SCp.phase = data_in; 889 current_SC->SCp.phase = data_in;
894 VDEB(printk("NCR53c406a: Data-In phase\n")); 890 VDEB(printk("NCR53c406a: Data-In phase\n"));
895 outb(FLUSH_FIFO, CMD_REG); 891 outb(FLUSH_FIFO, CMD_REG);
896 LOAD_DMA_COUNT(current_SC->request_bufflen); /* Max transfer size */ 892 LOAD_DMA_COUNT(scsi_bufflen(current_SC)); /* Max transfer size */
897#if USE_DMA /* No s/g support for DMA */ 893#if USE_DMA /* No s/g support for DMA */
898 NCR53c406a_dma_read(current_SC->request_buffer, current_SC->request_bufflen); 894 NCR53c406a_dma_read(scsi_sglist(current_SC),
895 scsdi_bufflen(current_SC));
899#endif /* USE_DMA */ 896#endif /* USE_DMA */
900 outb(TRANSFER_INFO | DMA_OP, CMD_REG); 897 outb(TRANSFER_INFO | DMA_OP, CMD_REG);
901#if USE_PIO 898#if USE_PIO
902 if (!current_SC->use_sg) /* Don't use scatter-gather */ 899 scsi_for_each_sg(current_SC, sg, scsi_sg_count(current_SC), i) {
903 NCR53c406a_pio_read(current_SC->request_buffer, current_SC->request_bufflen); 900 NCR53c406a_pio_read(page_address(sg->page) + sg->offset,
904 else { /* Use scatter-gather */ 901 sg->length);
905 sgcount = current_SC->use_sg; 902 }
906 sglist = current_SC->request_buffer;
907 while (sgcount--) {
908 NCR53c406a_pio_read(page_address(sglist->page) + sglist->offset, sglist->length);
909 sglist++;
910 }
911 }
912 REG0; 903 REG0;
913#endif /* USE_PIO */ 904#endif /* USE_PIO */
914 } 905 }
diff --git a/drivers/scsi/a100u2w.c b/drivers/scsi/a100u2w.c
index 7f4241bfb9c4..7cedc722fad9 100644
--- a/drivers/scsi/a100u2w.c
+++ b/drivers/scsi/a100u2w.c
@@ -796,7 +796,7 @@ static void orc_interrupt(
796*****************************************************************************/ 796*****************************************************************************/
797static void inia100BuildSCB(ORC_HCS * pHCB, ORC_SCB * pSCB, struct scsi_cmnd * SCpnt) 797static void inia100BuildSCB(ORC_HCS * pHCB, ORC_SCB * pSCB, struct scsi_cmnd * SCpnt)
798{ /* Create corresponding SCB */ 798{ /* Create corresponding SCB */
799 struct scatterlist *pSrbSG; 799 struct scatterlist *sg;
800 ORC_SG *pSG; /* Pointer to SG list */ 800 ORC_SG *pSG; /* Pointer to SG list */
801 int i, count_sg; 801 int i, count_sg;
802 ESCB *pEScb; 802 ESCB *pEScb;
@@ -813,30 +813,22 @@ static void inia100BuildSCB(ORC_HCS * pHCB, ORC_SCB * pSCB, struct scsi_cmnd * S
813 pSCB->SCB_Reserved1 = 0; 813 pSCB->SCB_Reserved1 = 0;
814 pSCB->SCB_SGLen = 0; 814 pSCB->SCB_SGLen = 0;
815 815
816 if ((pSCB->SCB_XferLen = (U32) SCpnt->request_bufflen)) { 816 pSCB->SCB_XferLen = (U32) scsi_bufflen(SCpnt);
817 pSG = (ORC_SG *) & pEScb->ESCB_SGList[0]; 817 pSG = (ORC_SG *) & pEScb->ESCB_SGList[0];
818 if (SCpnt->use_sg) { 818
819 pSrbSG = (struct scatterlist *) SCpnt->request_buffer; 819 count_sg = scsi_dma_map(SCpnt);
820 count_sg = pci_map_sg(pHCB->pdev, pSrbSG, SCpnt->use_sg, 820 BUG_ON(count_sg < 0);
821 SCpnt->sc_data_direction); 821 if (count_sg) {
822 pSCB->SCB_SGLen = (U32) (count_sg * 8); 822 pSCB->SCB_SGLen = (U32) (count_sg * 8);
823 for (i = 0; i < count_sg; i++, pSG++, pSrbSG++) { 823 scsi_for_each_sg(SCpnt, sg, count_sg, i) {
824 pSG->SG_Ptr = (U32) sg_dma_address(pSrbSG); 824 pSG->SG_Ptr = (U32) sg_dma_address(sg);
825 pSG->SG_Len = (U32) sg_dma_len(pSrbSG); 825 pSG->SG_Len = (U32) sg_dma_len(sg);
826 } 826 pSG++;
827 } else if (SCpnt->request_bufflen != 0) {/* Non SG */
828 pSCB->SCB_SGLen = 0x8;
829 SCpnt->SCp.dma_handle = pci_map_single(pHCB->pdev,
830 SCpnt->request_buffer,
831 SCpnt->request_bufflen,
832 SCpnt->sc_data_direction);
833 pSG->SG_Ptr = (U32) SCpnt->SCp.dma_handle;
834 pSG->SG_Len = (U32) SCpnt->request_bufflen;
835 } else {
836 pSCB->SCB_SGLen = 0;
837 pSG->SG_Ptr = 0;
838 pSG->SG_Len = 0;
839 } 827 }
828 } else {
829 pSCB->SCB_SGLen = 0;
830 pSG->SG_Ptr = 0;
831 pSG->SG_Len = 0;
840 } 832 }
841 pSCB->SCB_SGPAddr = (U32) pSCB->SCB_SensePAddr; 833 pSCB->SCB_SGPAddr = (U32) pSCB->SCB_SensePAddr;
842 pSCB->SCB_HaStat = 0; 834 pSCB->SCB_HaStat = 0;
@@ -995,15 +987,7 @@ static void inia100SCBPost(BYTE * pHcb, BYTE * pScb)
995 } 987 }
996 pSRB->result = pSCB->SCB_TaStat | (pSCB->SCB_HaStat << 16); 988 pSRB->result = pSCB->SCB_TaStat | (pSCB->SCB_HaStat << 16);
997 989
998 if (pSRB->use_sg) { 990 scsi_dma_unmap(pSRB);
999 pci_unmap_sg(pHCB->pdev,
1000 (struct scatterlist *)pSRB->request_buffer,
1001 pSRB->use_sg, pSRB->sc_data_direction);
1002 } else if (pSRB->request_bufflen != 0) {
1003 pci_unmap_single(pHCB->pdev, pSRB->SCp.dma_handle,
1004 pSRB->request_bufflen,
1005 pSRB->sc_data_direction);
1006 }
1007 991
1008 pSRB->scsi_done(pSRB); /* Notify system DONE */ 992 pSRB->scsi_done(pSRB); /* Notify system DONE */
1009 993
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index 8dcfe4ec35c2..47014beef96e 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -825,7 +825,7 @@ static int aac_read_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u3
825 readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); 825 readcmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
826 readcmd->count = cpu_to_le32(count<<9); 826 readcmd->count = cpu_to_le32(count<<9);
827 readcmd->cid = cpu_to_le16(scmd_id(cmd)); 827 readcmd->cid = cpu_to_le16(scmd_id(cmd));
828 readcmd->flags = cpu_to_le16(1); 828 readcmd->flags = cpu_to_le16(IO_TYPE_READ);
829 readcmd->bpTotal = 0; 829 readcmd->bpTotal = 0;
830 readcmd->bpComplete = 0; 830 readcmd->bpComplete = 0;
831 831
@@ -904,7 +904,7 @@ static int aac_read_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32
904 (void *) cmd); 904 (void *) cmd);
905} 905}
906 906
907static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) 907static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
908{ 908{
909 u16 fibsize; 909 u16 fibsize;
910 struct aac_raw_io *writecmd; 910 struct aac_raw_io *writecmd;
@@ -914,7 +914,9 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
914 writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32)); 914 writecmd->block[1] = cpu_to_le32((u32)((lba&0xffffffff00000000LL)>>32));
915 writecmd->count = cpu_to_le32(count<<9); 915 writecmd->count = cpu_to_le32(count<<9);
916 writecmd->cid = cpu_to_le16(scmd_id(cmd)); 916 writecmd->cid = cpu_to_le16(scmd_id(cmd));
917 writecmd->flags = 0; 917 writecmd->flags = fua ?
918 cpu_to_le16(IO_TYPE_WRITE|IO_SUREWRITE) :
919 cpu_to_le16(IO_TYPE_WRITE);
918 writecmd->bpTotal = 0; 920 writecmd->bpTotal = 0;
919 writecmd->bpComplete = 0; 921 writecmd->bpComplete = 0;
920 922
@@ -933,7 +935,7 @@ static int aac_write_raw_io(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u
933 (void *) cmd); 935 (void *) cmd);
934} 936}
935 937
936static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) 938static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
937{ 939{
938 u16 fibsize; 940 u16 fibsize;
939 struct aac_write64 *writecmd; 941 struct aac_write64 *writecmd;
@@ -964,7 +966,7 @@ static int aac_write_block64(struct fib * fib, struct scsi_cmnd * cmd, u64 lba,
964 (void *) cmd); 966 (void *) cmd);
965} 967}
966 968
967static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count) 969static int aac_write_block(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua)
968{ 970{
969 u16 fibsize; 971 u16 fibsize;
970 struct aac_write *writecmd; 972 struct aac_write *writecmd;
@@ -1498,6 +1500,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
1498{ 1500{
1499 u64 lba; 1501 u64 lba;
1500 u32 count; 1502 u32 count;
1503 int fua;
1501 int status; 1504 int status;
1502 struct aac_dev *dev; 1505 struct aac_dev *dev;
1503 struct fib * cmd_fibcontext; 1506 struct fib * cmd_fibcontext;
@@ -1512,6 +1515,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
1512 count = scsicmd->cmnd[4]; 1515 count = scsicmd->cmnd[4];
1513 if (count == 0) 1516 if (count == 0)
1514 count = 256; 1517 count = 256;
1518 fua = 0;
1515 } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */ 1519 } else if (scsicmd->cmnd[0] == WRITE_16) { /* 16 byte command */
1516 dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd))); 1520 dprintk((KERN_DEBUG "aachba: received a write(16) command on id %d.\n", scmd_id(scsicmd)));
1517 1521
@@ -1524,6 +1528,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
1524 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; 1528 (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1525 count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) | 1529 count = (scsicmd->cmnd[10] << 24) | (scsicmd->cmnd[11] << 16) |
1526 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13]; 1530 (scsicmd->cmnd[12] << 8) | scsicmd->cmnd[13];
1531 fua = scsicmd->cmnd[1] & 0x8;
1527 } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */ 1532 } else if (scsicmd->cmnd[0] == WRITE_12) { /* 12 byte command */
1528 dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd))); 1533 dprintk((KERN_DEBUG "aachba: received a write(12) command on id %d.\n", scmd_id(scsicmd)));
1529 1534
@@ -1531,10 +1536,12 @@ static int aac_write(struct scsi_cmnd * scsicmd)
1531 | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 1536 | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1532 count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16) 1537 count = (scsicmd->cmnd[6] << 24) | (scsicmd->cmnd[7] << 16)
1533 | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9]; 1538 | (scsicmd->cmnd[8] << 8) | scsicmd->cmnd[9];
1539 fua = scsicmd->cmnd[1] & 0x8;
1534 } else { 1540 } else {
1535 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd))); 1541 dprintk((KERN_DEBUG "aachba: received a write(10) command on id %d.\n", scmd_id(scsicmd)));
1536 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5]; 1542 lba = ((u64)scsicmd->cmnd[2] << 24) | (scsicmd->cmnd[3] << 16) | (scsicmd->cmnd[4] << 8) | scsicmd->cmnd[5];
1537 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8]; 1543 count = (scsicmd->cmnd[7] << 8) | scsicmd->cmnd[8];
1544 fua = scsicmd->cmnd[1] & 0x8;
1538 } 1545 }
1539 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n", 1546 dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
1540 smp_processor_id(), (unsigned long long)lba, jiffies)); 1547 smp_processor_id(), (unsigned long long)lba, jiffies));
@@ -1549,7 +1556,7 @@ static int aac_write(struct scsi_cmnd * scsicmd)
1549 return 0; 1556 return 0;
1550 } 1557 }
1551 1558
1552 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count); 1559 status = aac_adapter_write(cmd_fibcontext, scsicmd, lba, count, fua);
1553 1560
1554 /* 1561 /*
1555 * Check that the command queued to the controller 1562 * Check that the command queued to the controller
@@ -1886,15 +1893,29 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1886 1893
1887 case MODE_SENSE: 1894 case MODE_SENSE:
1888 { 1895 {
1889 char mode_buf[4]; 1896 char mode_buf[7];
1897 int mode_buf_length = 4;
1890 1898
1891 dprintk((KERN_DEBUG "MODE SENSE command.\n")); 1899 dprintk((KERN_DEBUG "MODE SENSE command.\n"));
1892 mode_buf[0] = 3; /* Mode data length */ 1900 mode_buf[0] = 3; /* Mode data length */
1893 mode_buf[1] = 0; /* Medium type - default */ 1901 mode_buf[1] = 0; /* Medium type - default */
1894 mode_buf[2] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */ 1902 mode_buf[2] = 0; /* Device-specific param,
1903 bit 8: 0/1 = write enabled/protected
1904 bit 4: 0/1 = FUA enabled */
1905 if (dev->raw_io_interface)
1906 mode_buf[2] = 0x10;
1895 mode_buf[3] = 0; /* Block descriptor length */ 1907 mode_buf[3] = 0; /* Block descriptor length */
1896 1908 if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
1897 aac_internal_transfer(scsicmd, mode_buf, 0, sizeof(mode_buf)); 1909 ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
1910 mode_buf[0] = 6;
1911 mode_buf[4] = 8;
1912 mode_buf[5] = 1;
1913 mode_buf[6] = 0x04; /* WCE */
1914 mode_buf_length = 7;
1915 if (mode_buf_length > scsicmd->cmnd[4])
1916 mode_buf_length = scsicmd->cmnd[4];
1917 }
1918 aac_internal_transfer(scsicmd, mode_buf, 0, mode_buf_length);
1898 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1919 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1899 scsicmd->scsi_done(scsicmd); 1920 scsicmd->scsi_done(scsicmd);
1900 1921
@@ -1902,18 +1923,33 @@ int aac_scsi_cmd(struct scsi_cmnd * scsicmd)
1902 } 1923 }
1903 case MODE_SENSE_10: 1924 case MODE_SENSE_10:
1904 { 1925 {
1905 char mode_buf[8]; 1926 char mode_buf[11];
1927 int mode_buf_length = 8;
1906 1928
1907 dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n")); 1929 dprintk((KERN_DEBUG "MODE SENSE 10 byte command.\n"));
1908 mode_buf[0] = 0; /* Mode data length (MSB) */ 1930 mode_buf[0] = 0; /* Mode data length (MSB) */
1909 mode_buf[1] = 6; /* Mode data length (LSB) */ 1931 mode_buf[1] = 6; /* Mode data length (LSB) */
1910 mode_buf[2] = 0; /* Medium type - default */ 1932 mode_buf[2] = 0; /* Medium type - default */
1911 mode_buf[3] = 0; /* Device-specific param, bit 8: 0/1 = write enabled/protected */ 1933 mode_buf[3] = 0; /* Device-specific param,
1934 bit 8: 0/1 = write enabled/protected
1935 bit 4: 0/1 = FUA enabled */
1936 if (dev->raw_io_interface)
1937 mode_buf[3] = 0x10;
1912 mode_buf[4] = 0; /* reserved */ 1938 mode_buf[4] = 0; /* reserved */
1913 mode_buf[5] = 0; /* reserved */ 1939 mode_buf[5] = 0; /* reserved */
1914 mode_buf[6] = 0; /* Block descriptor length (MSB) */ 1940 mode_buf[6] = 0; /* Block descriptor length (MSB) */
1915 mode_buf[7] = 0; /* Block descriptor length (LSB) */ 1941 mode_buf[7] = 0; /* Block descriptor length (LSB) */
1916 aac_internal_transfer(scsicmd, mode_buf, 0, sizeof(mode_buf)); 1942 if (((scsicmd->cmnd[2] & 0x3f) == 8) ||
1943 ((scsicmd->cmnd[2] & 0x3f) == 0x3f)) {
1944 mode_buf[1] = 9;
1945 mode_buf[8] = 8;
1946 mode_buf[9] = 1;
1947 mode_buf[10] = 0x04; /* WCE */
1948 mode_buf_length = 11;
1949 if (mode_buf_length > scsicmd->cmnd[8])
1950 mode_buf_length = scsicmd->cmnd[8];
1951 }
1952 aac_internal_transfer(scsicmd, mode_buf, 0, mode_buf_length);
1917 1953
1918 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD; 1954 scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 | SAM_STAT_GOOD;
1919 scsicmd->scsi_done(scsicmd); 1955 scsicmd->scsi_done(scsicmd);
diff --git a/drivers/scsi/aacraid/aacraid.h b/drivers/scsi/aacraid/aacraid.h
index c81edf36913f..fdbedb17d03b 100644
--- a/drivers/scsi/aacraid/aacraid.h
+++ b/drivers/scsi/aacraid/aacraid.h
@@ -464,12 +464,12 @@ struct adapter_ops
464 int (*adapter_restart)(struct aac_dev *dev, int bled); 464 int (*adapter_restart)(struct aac_dev *dev, int bled);
465 /* Transport operations */ 465 /* Transport operations */
466 int (*adapter_ioremap)(struct aac_dev * dev, u32 size); 466 int (*adapter_ioremap)(struct aac_dev * dev, u32 size);
467 irqreturn_t (*adapter_intr)(int irq, void *dev_id); 467 irq_handler_t adapter_intr;
468 /* Packet operations */ 468 /* Packet operations */
469 int (*adapter_deliver)(struct fib * fib); 469 int (*adapter_deliver)(struct fib * fib);
470 int (*adapter_bounds)(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba); 470 int (*adapter_bounds)(struct aac_dev * dev, struct scsi_cmnd * cmd, u64 lba);
471 int (*adapter_read)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count); 471 int (*adapter_read)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count);
472 int (*adapter_write)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count); 472 int (*adapter_write)(struct fib * fib, struct scsi_cmnd * cmd, u64 lba, u32 count, int fua);
473 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd); 473 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
474 /* Administrative operations */ 474 /* Administrative operations */
475 int (*adapter_comm)(struct aac_dev * dev, int comm); 475 int (*adapter_comm)(struct aac_dev * dev, int comm);
@@ -1054,8 +1054,8 @@ struct aac_dev
1054#define aac_adapter_read(fib,cmd,lba,count) \ 1054#define aac_adapter_read(fib,cmd,lba,count) \
1055 ((fib)->dev)->a_ops.adapter_read(fib,cmd,lba,count) 1055 ((fib)->dev)->a_ops.adapter_read(fib,cmd,lba,count)
1056 1056
1057#define aac_adapter_write(fib,cmd,lba,count) \ 1057#define aac_adapter_write(fib,cmd,lba,count,fua) \
1058 ((fib)->dev)->a_ops.adapter_write(fib,cmd,lba,count) 1058 ((fib)->dev)->a_ops.adapter_write(fib,cmd,lba,count,fua)
1059 1059
1060#define aac_adapter_scsi(fib,cmd) \ 1060#define aac_adapter_scsi(fib,cmd) \
1061 ((fib)->dev)->a_ops.adapter_scsi(fib,cmd) 1061 ((fib)->dev)->a_ops.adapter_scsi(fib,cmd)
@@ -1213,6 +1213,9 @@ struct aac_write64
1213 __le32 block; 1213 __le32 block;
1214 __le16 pad; 1214 __le16 pad;
1215 __le16 flags; 1215 __le16 flags;
1216#define IO_TYPE_WRITE 0x00000000
1217#define IO_TYPE_READ 0x00000001
1218#define IO_SUREWRITE 0x00000008
1216 struct sgmap64 sg; // Must be last in struct because it is variable 1219 struct sgmap64 sg; // Must be last in struct because it is variable
1217}; 1220};
1218struct aac_write_reply 1221struct aac_write_reply
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 350ea7feb61d..a270a3f00647 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -403,10 +403,6 @@ static int aac_biosparm(struct scsi_device *sdev, struct block_device *bdev,
403 403
404static int aac_slave_configure(struct scsi_device *sdev) 404static int aac_slave_configure(struct scsi_device *sdev)
405{ 405{
406 if (sdev_channel(sdev) == CONTAINER_CHANNEL) {
407 sdev->skip_ms_page_8 = 1;
408 sdev->skip_ms_page_3f = 1;
409 }
410 if ((sdev->type == TYPE_DISK) && 406 if ((sdev->type == TYPE_DISK) &&
411 (sdev_channel(sdev) != CONTAINER_CHANNEL)) { 407 (sdev_channel(sdev) != CONTAINER_CHANNEL)) {
412 if (expose_physicals == 0) 408 if (expose_physicals == 0)
diff --git a/drivers/scsi/advansys.c b/drivers/scsi/advansys.c
index 9b3303b64113..2b6689709e53 100644
--- a/drivers/scsi/advansys.c
+++ b/drivers/scsi/advansys.c
@@ -798,7 +798,6 @@
798#include <scsi/scsi_tcq.h> 798#include <scsi/scsi_tcq.h>
799#include <scsi/scsi.h> 799#include <scsi/scsi.h>
800#include <scsi/scsi_host.h> 800#include <scsi/scsi_host.h>
801#include "advansys.h"
802#ifdef CONFIG_PCI 801#ifdef CONFIG_PCI
803#include <linux/pci.h> 802#include <linux/pci.h>
804#endif /* CONFIG_PCI */ 803#endif /* CONFIG_PCI */
@@ -2014,7 +2013,7 @@ STATIC int AscSgListToQueue(int);
2014STATIC void AscEnableIsaDma(uchar); 2013STATIC void AscEnableIsaDma(uchar);
2015#endif /* CONFIG_ISA */ 2014#endif /* CONFIG_ISA */
2016STATIC ASC_DCNT AscGetMaxDmaCount(ushort); 2015STATIC ASC_DCNT AscGetMaxDmaCount(ushort);
2017 2016static const char *advansys_info(struct Scsi_Host *shp);
2018 2017
2019/* 2018/*
2020 * --- Adv Library Constants and Macros 2019 * --- Adv Library Constants and Macros
@@ -3970,10 +3969,6 @@ STATIC ushort asc_bus[ASC_NUM_BUS] __initdata = {
3970 ASC_IS_PCI, 3969 ASC_IS_PCI,
3971}; 3970};
3972 3971
3973/*
3974 * Used with the LILO 'advansys' option to eliminate or
3975 * limit I/O port probing at boot time, cf. advansys_setup().
3976 */
3977STATIC int asc_iopflag = ASC_FALSE; 3972STATIC int asc_iopflag = ASC_FALSE;
3978STATIC int asc_ioport[ASC_NUM_IOPORT_PROBE] = { 0, 0, 0, 0 }; 3973STATIC int asc_ioport[ASC_NUM_IOPORT_PROBE] = { 0, 0, 0, 0 };
3979 3974
@@ -4055,10 +4050,6 @@ STATIC void asc_prt_hex(char *f, uchar *, int);
4055#endif /* ADVANSYS_DEBUG */ 4050#endif /* ADVANSYS_DEBUG */
4056 4051
4057 4052
4058/*
4059 * --- Linux 'struct scsi_host_template' and advansys_setup() Functions
4060 */
4061
4062#ifdef CONFIG_PROC_FS 4053#ifdef CONFIG_PROC_FS
4063/* 4054/*
4064 * advansys_proc_info() - /proc/scsi/advansys/[0-(ASC_NUM_BOARD_SUPPORTED-1)] 4055 * advansys_proc_info() - /proc/scsi/advansys/[0-(ASC_NUM_BOARD_SUPPORTED-1)]
@@ -4080,7 +4071,7 @@ STATIC void asc_prt_hex(char *f, uchar *, int);
4080 * if 'prtbuf' is too small it will not be overwritten. Instead the 4071 * if 'prtbuf' is too small it will not be overwritten. Instead the
4081 * user just won't get all the available statistics. 4072 * user just won't get all the available statistics.
4082 */ 4073 */
4083int 4074static int
4084advansys_proc_info(struct Scsi_Host *shost, char *buffer, char **start, 4075advansys_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
4085 off_t offset, int length, int inout) 4076 off_t offset, int length, int inout)
4086{ 4077{
@@ -4296,7 +4287,7 @@ advansys_proc_info(struct Scsi_Host *shost, char *buffer, char **start,
4296 * it must not call SCSI mid-level functions including scsi_malloc() 4287 * it must not call SCSI mid-level functions including scsi_malloc()
4297 * and scsi_free(). 4288 * and scsi_free().
4298 */ 4289 */
4299int __init 4290static int __init
4300advansys_detect(struct scsi_host_template *tpnt) 4291advansys_detect(struct scsi_host_template *tpnt)
4301{ 4292{
4302 static int detect_called = ASC_FALSE; 4293 static int detect_called = ASC_FALSE;
@@ -5428,7 +5419,7 @@ advansys_detect(struct scsi_host_template *tpnt)
5428 * 5419 *
5429 * Release resources allocated for a single AdvanSys adapter. 5420 * Release resources allocated for a single AdvanSys adapter.
5430 */ 5421 */
5431int 5422static int
5432advansys_release(struct Scsi_Host *shp) 5423advansys_release(struct Scsi_Host *shp)
5433{ 5424{
5434 asc_board_t *boardp; 5425 asc_board_t *boardp;
@@ -5475,7 +5466,7 @@ advansys_release(struct Scsi_Host *shp)
5475 * Note: The information line should not exceed ASC_INFO_SIZE bytes, 5466 * Note: The information line should not exceed ASC_INFO_SIZE bytes,
5476 * otherwise the static 'info' array will be overrun. 5467 * otherwise the static 'info' array will be overrun.
5477 */ 5468 */
5478const char * 5469static const char *
5479advansys_info(struct Scsi_Host *shp) 5470advansys_info(struct Scsi_Host *shp)
5480{ 5471{
5481 static char info[ASC_INFO_SIZE]; 5472 static char info[ASC_INFO_SIZE];
@@ -5568,7 +5559,7 @@ advansys_info(struct Scsi_Host *shp)
5568 * This function always returns 0. Command return status is saved 5559 * This function always returns 0. Command return status is saved
5569 * in the 'scp' result field. 5560 * in the 'scp' result field.
5570 */ 5561 */
5571int 5562static int
5572advansys_queuecommand(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *)) 5563advansys_queuecommand(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
5573{ 5564{
5574 struct Scsi_Host *shp; 5565 struct Scsi_Host *shp;
@@ -5656,7 +5647,7 @@ advansys_queuecommand(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
5656 * sleeping is allowed and no locking other than for host structures is 5647 * sleeping is allowed and no locking other than for host structures is
5657 * required. Returns SUCCESS or FAILED. 5648 * required. Returns SUCCESS or FAILED.
5658 */ 5649 */
5659int 5650static int
5660advansys_reset(struct scsi_cmnd *scp) 5651advansys_reset(struct scsi_cmnd *scp)
5661{ 5652{
5662 struct Scsi_Host *shp; 5653 struct Scsi_Host *shp;
@@ -5841,7 +5832,7 @@ advansys_reset(struct scsi_cmnd *scp)
5841 * ip[1]: sectors 5832 * ip[1]: sectors
5842 * ip[2]: cylinders 5833 * ip[2]: cylinders
5843 */ 5834 */
5844int 5835static int
5845advansys_biosparam(struct scsi_device *sdev, struct block_device *bdev, 5836advansys_biosparam(struct scsi_device *sdev, struct block_device *bdev,
5846 sector_t capacity, int ip[]) 5837 sector_t capacity, int ip[])
5847{ 5838{
@@ -5875,82 +5866,6 @@ advansys_biosparam(struct scsi_device *sdev, struct block_device *bdev,
5875} 5866}
5876 5867
5877/* 5868/*
5878 * advansys_setup()
5879 *
5880 * This function is called from init/main.c at boot time.
5881 * It it passed LILO parameters that can be set from the
5882 * LILO command line or in /etc/lilo.conf.
5883 *
5884 * It is used by the AdvanSys driver to either disable I/O
5885 * port scanning or to limit scanning to 1 - 4 I/O ports.
5886 * Regardless of the option setting EISA and PCI boards
5887 * will still be searched for and detected. This option
5888 * only affects searching for ISA and VL boards.
5889 *
5890 * If ADVANSYS_DEBUG is defined the driver debug level may
5891 * be set using the 5th (ASC_NUM_IOPORT_PROBE + 1) I/O Port.
5892 *
5893 * Examples:
5894 * 1. Eliminate I/O port scanning:
5895 * boot: linux advansys=
5896 * or
5897 * boot: linux advansys=0x0
5898 * 2. Limit I/O port scanning to one I/O port:
5899 * boot: linux advansys=0x110
5900 * 3. Limit I/O port scanning to four I/O ports:
5901 * boot: linux advansys=0x110,0x210,0x230,0x330
5902 * 4. If ADVANSYS_DEBUG, limit I/O port scanning to four I/O ports and
5903 * set the driver debug level to 2.
5904 * boot: linux advansys=0x110,0x210,0x230,0x330,0xdeb2
5905 *
5906 * ints[0] - number of arguments
5907 * ints[1] - first argument
5908 * ints[2] - second argument
5909 * ...
5910 */
5911void __init
5912advansys_setup(char *str, int *ints)
5913{
5914 int i;
5915
5916 if (asc_iopflag == ASC_TRUE) {
5917 printk("AdvanSys SCSI: 'advansys' LILO option may appear only once\n");
5918 return;
5919 }
5920
5921 asc_iopflag = ASC_TRUE;
5922
5923 if (ints[0] > ASC_NUM_IOPORT_PROBE) {
5924#ifdef ADVANSYS_DEBUG
5925 if ((ints[0] == ASC_NUM_IOPORT_PROBE + 1) &&
5926 (ints[ASC_NUM_IOPORT_PROBE + 1] >> 4 == 0xdeb)) {
5927 asc_dbglvl = ints[ASC_NUM_IOPORT_PROBE + 1] & 0xf;
5928 } else {
5929#endif /* ADVANSYS_DEBUG */
5930 printk("AdvanSys SCSI: only %d I/O ports accepted\n",
5931 ASC_NUM_IOPORT_PROBE);
5932#ifdef ADVANSYS_DEBUG
5933 }
5934#endif /* ADVANSYS_DEBUG */
5935 }
5936
5937#ifdef ADVANSYS_DEBUG
5938 ASC_DBG1(1, "advansys_setup: ints[0] %d\n", ints[0]);
5939 for (i = 1; i < ints[0]; i++) {
5940 ASC_DBG2(1, " ints[%d] 0x%x", i, ints[i]);
5941 }
5942 ASC_DBG(1, "\n");
5943#endif /* ADVANSYS_DEBUG */
5944
5945 for (i = 1; i <= ints[0] && i <= ASC_NUM_IOPORT_PROBE; i++) {
5946 asc_ioport[i-1] = ints[i];
5947 ASC_DBG2(1, "advansys_setup: asc_ioport[%d] 0x%x\n",
5948 i - 1, asc_ioport[i-1]);
5949 }
5950}
5951
5952
5953/*
5954 * --- Loadable Driver Support 5869 * --- Loadable Driver Support
5955 */ 5870 */
5956 5871
diff --git a/drivers/scsi/advansys.h b/drivers/scsi/advansys.h
deleted file mode 100644
index 8ee7fb16a725..000000000000
--- a/drivers/scsi/advansys.h
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * advansys.h - Linux Host Driver for AdvanSys SCSI Adapters
3 *
4 * Copyright (c) 1995-2000 Advanced System Products, Inc.
5 * Copyright (c) 2000-2001 ConnectCom Solutions, Inc.
6 * All Rights Reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that redistributions of source
10 * code retain the above copyright notice and this comment without
11 * modification.
12 *
13 * As of March 8, 2000 Advanced System Products, Inc. (AdvanSys)
14 * changed its name to ConnectCom Solutions, Inc.
15 *
16 */
17
18#ifndef _ADVANSYS_H
19#define _ADVANSYS_H
20
21/*
22 * struct scsi_host_template function prototypes.
23 */
24int advansys_detect(struct scsi_host_template *);
25int advansys_release(struct Scsi_Host *);
26const char *advansys_info(struct Scsi_Host *);
27int advansys_queuecommand(struct scsi_cmnd *, void (* done)(struct scsi_cmnd *));
28int advansys_reset(struct scsi_cmnd *);
29int advansys_biosparam(struct scsi_device *, struct block_device *,
30 sector_t, int[]);
31static int advansys_slave_configure(struct scsi_device *);
32
33/* init/main.c setup function */
34void advansys_setup(char *, int *);
35
36#endif /* _ADVANSYS_H */
diff --git a/drivers/scsi/aha152x.c b/drivers/scsi/aha152x.c
index 4b4d1233ce8a..85f2394ffc3e 100644
--- a/drivers/scsi/aha152x.c
+++ b/drivers/scsi/aha152x.c
@@ -240,6 +240,7 @@
240#include <linux/io.h> 240#include <linux/io.h>
241#include <linux/blkdev.h> 241#include <linux/blkdev.h>
242#include <asm/system.h> 242#include <asm/system.h>
243#include <linux/completion.h>
243#include <linux/errno.h> 244#include <linux/errno.h>
244#include <linux/string.h> 245#include <linux/string.h>
245#include <linux/wait.h> 246#include <linux/wait.h>
@@ -253,7 +254,6 @@
253#include <linux/spinlock.h> 254#include <linux/spinlock.h>
254#include <linux/workqueue.h> 255#include <linux/workqueue.h>
255#include <linux/list.h> 256#include <linux/list.h>
256#include <asm/semaphore.h>
257#include <scsi/scsicam.h> 257#include <scsi/scsicam.h>
258 258
259#include "scsi.h" 259#include "scsi.h"
@@ -551,7 +551,7 @@ struct aha152x_hostdata {
551 */ 551 */
552struct aha152x_scdata { 552struct aha152x_scdata {
553 Scsi_Cmnd *next; /* next sc in queue */ 553 Scsi_Cmnd *next; /* next sc in queue */
554 struct semaphore *sem; /* semaphore to block on */ 554 struct completion *done;/* semaphore to block on */
555 unsigned char cmd_len; 555 unsigned char cmd_len;
556 unsigned char cmnd[MAX_COMMAND_SIZE]; 556 unsigned char cmnd[MAX_COMMAND_SIZE];
557 unsigned short use_sg; 557 unsigned short use_sg;
@@ -608,7 +608,7 @@ struct aha152x_scdata {
608 608
609#define SCDATA(SCpnt) ((struct aha152x_scdata *) (SCpnt)->host_scribble) 609#define SCDATA(SCpnt) ((struct aha152x_scdata *) (SCpnt)->host_scribble)
610#define SCNEXT(SCpnt) SCDATA(SCpnt)->next 610#define SCNEXT(SCpnt) SCDATA(SCpnt)->next
611#define SCSEM(SCpnt) SCDATA(SCpnt)->sem 611#define SCSEM(SCpnt) SCDATA(SCpnt)->done
612 612
613#define SG_ADDRESS(buffer) ((char *) (page_address((buffer)->page)+(buffer)->offset)) 613#define SG_ADDRESS(buffer) ((char *) (page_address((buffer)->page)+(buffer)->offset))
614 614
@@ -969,7 +969,8 @@ static int setup_expected_interrupts(struct Scsi_Host *shpnt)
969/* 969/*
970 * Queue a command and setup interrupts for a free bus. 970 * Queue a command and setup interrupts for a free bus.
971 */ 971 */
972static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct semaphore *sem, int phase, void (*done)(Scsi_Cmnd *)) 972static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct completion *complete,
973 int phase, void (*done)(Scsi_Cmnd *))
973{ 974{
974 struct Scsi_Host *shpnt = SCpnt->device->host; 975 struct Scsi_Host *shpnt = SCpnt->device->host;
975 unsigned long flags; 976 unsigned long flags;
@@ -1013,7 +1014,7 @@ static int aha152x_internal_queue(Scsi_Cmnd *SCpnt, struct semaphore *sem, int p
1013 } 1014 }
1014 1015
1015 SCNEXT(SCpnt) = NULL; 1016 SCNEXT(SCpnt) = NULL;
1016 SCSEM(SCpnt) = sem; 1017 SCSEM(SCpnt) = complete;
1017 1018
1018 /* setup scratch area 1019 /* setup scratch area
1019 SCp.ptr : buffer pointer 1020 SCp.ptr : buffer pointer
@@ -1084,9 +1085,9 @@ static void reset_done(Scsi_Cmnd *SCpnt)
1084 DPRINTK(debug_eh, INFO_LEAD "reset_done called\n", CMDINFO(SCpnt)); 1085 DPRINTK(debug_eh, INFO_LEAD "reset_done called\n", CMDINFO(SCpnt));
1085#endif 1086#endif
1086 if(SCSEM(SCpnt)) { 1087 if(SCSEM(SCpnt)) {
1087 up(SCSEM(SCpnt)); 1088 complete(SCSEM(SCpnt));
1088 } else { 1089 } else {
1089 printk(KERN_ERR "aha152x: reset_done w/o semaphore\n"); 1090 printk(KERN_ERR "aha152x: reset_done w/o completion\n");
1090 } 1091 }
1091} 1092}
1092 1093
@@ -1139,21 +1140,6 @@ static int aha152x_abort(Scsi_Cmnd *SCpnt)
1139 return FAILED; 1140 return FAILED;
1140} 1141}
1141 1142
1142static void timer_expired(unsigned long p)
1143{
1144 Scsi_Cmnd *SCp = (Scsi_Cmnd *)p;
1145 struct semaphore *sem = SCSEM(SCp);
1146 struct Scsi_Host *shpnt = SCp->device->host;
1147 unsigned long flags;
1148
1149 /* remove command from issue queue */
1150 DO_LOCK(flags);
1151 remove_SC(&ISSUE_SC, SCp);
1152 DO_UNLOCK(flags);
1153
1154 up(sem);
1155}
1156
1157/* 1143/*
1158 * Reset a device 1144 * Reset a device
1159 * 1145 *
@@ -1161,14 +1147,14 @@ static void timer_expired(unsigned long p)
1161static int aha152x_device_reset(Scsi_Cmnd * SCpnt) 1147static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
1162{ 1148{
1163 struct Scsi_Host *shpnt = SCpnt->device->host; 1149 struct Scsi_Host *shpnt = SCpnt->device->host;
1164 DECLARE_MUTEX_LOCKED(sem); 1150 DECLARE_COMPLETION(done);
1165 struct timer_list timer;
1166 int ret, issued, disconnected; 1151 int ret, issued, disconnected;
1167 unsigned char old_cmd_len = SCpnt->cmd_len; 1152 unsigned char old_cmd_len = SCpnt->cmd_len;
1168 unsigned short old_use_sg = SCpnt->use_sg; 1153 unsigned short old_use_sg = SCpnt->use_sg;
1169 void *old_buffer = SCpnt->request_buffer; 1154 void *old_buffer = SCpnt->request_buffer;
1170 unsigned old_bufflen = SCpnt->request_bufflen; 1155 unsigned old_bufflen = SCpnt->request_bufflen;
1171 unsigned long flags; 1156 unsigned long flags;
1157 unsigned long timeleft;
1172 1158
1173#if defined(AHA152X_DEBUG) 1159#if defined(AHA152X_DEBUG)
1174 if(HOSTDATA(shpnt)->debug & debug_eh) { 1160 if(HOSTDATA(shpnt)->debug & debug_eh) {
@@ -1192,15 +1178,15 @@ static int aha152x_device_reset(Scsi_Cmnd * SCpnt)
1192 SCpnt->request_buffer = NULL; 1178 SCpnt->request_buffer = NULL;
1193 SCpnt->request_bufflen = 0; 1179 SCpnt->request_bufflen = 0;
1194 1180
1195 init_timer(&timer); 1181 aha152x_internal_queue(SCpnt, &done, resetting, reset_done);
1196 timer.data = (unsigned long) SCpnt;
1197 timer.expires = jiffies + 100*HZ; /* 10s */
1198 timer.function = (void (*)(unsigned long)) timer_expired;
1199 1182
1200 aha152x_internal_queue(SCpnt, &sem, resetting, reset_done); 1183 timeleft = wait_for_completion_timeout(&done, 100*HZ);
1201 add_timer(&timer); 1184 if (!timeleft) {
1202 down(&sem); 1185 /* remove command from issue queue */
1203 del_timer(&timer); 1186 DO_LOCK(flags);
1187 remove_SC(&ISSUE_SC, SCpnt);
1188 DO_UNLOCK(flags);
1189 }
1204 1190
1205 SCpnt->cmd_len = old_cmd_len; 1191 SCpnt->cmd_len = old_cmd_len;
1206 SCpnt->use_sg = old_use_sg; 1192 SCpnt->use_sg = old_use_sg;
diff --git a/drivers/scsi/aha1740.c b/drivers/scsi/aha1740.c
index d7af9c63a04d..e4a4f3a965d9 100644
--- a/drivers/scsi/aha1740.c
+++ b/drivers/scsi/aha1740.c
@@ -271,20 +271,8 @@ static irqreturn_t aha1740_intr_handle(int irq, void *dev_id)
271 continue; 271 continue;
272 } 272 }
273 sgptr = (struct aha1740_sg *) SCtmp->host_scribble; 273 sgptr = (struct aha1740_sg *) SCtmp->host_scribble;
274 if (SCtmp->use_sg) { 274 scsi_dma_unmap(SCtmp);
275 /* We used scatter-gather. 275
276 Do the unmapping dance. */
277 dma_unmap_sg (&edev->dev,
278 (struct scatterlist *) SCtmp->request_buffer,
279 SCtmp->use_sg,
280 SCtmp->sc_data_direction);
281 } else {
282 dma_unmap_single (&edev->dev,
283 sgptr->buf_dma_addr,
284 SCtmp->request_bufflen,
285 DMA_BIDIRECTIONAL);
286 }
287
288 /* Free the sg block */ 276 /* Free the sg block */
289 dma_free_coherent (&edev->dev, 277 dma_free_coherent (&edev->dev,
290 sizeof (struct aha1740_sg), 278 sizeof (struct aha1740_sg),
@@ -349,11 +337,9 @@ static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
349 unchar target = scmd_id(SCpnt); 337 unchar target = scmd_id(SCpnt);
350 struct aha1740_hostdata *host = HOSTDATA(SCpnt->device->host); 338 struct aha1740_hostdata *host = HOSTDATA(SCpnt->device->host);
351 unsigned long flags; 339 unsigned long flags;
352 void *buff = SCpnt->request_buffer;
353 int bufflen = SCpnt->request_bufflen;
354 dma_addr_t sg_dma; 340 dma_addr_t sg_dma;
355 struct aha1740_sg *sgptr; 341 struct aha1740_sg *sgptr;
356 int ecbno; 342 int ecbno, nseg;
357 DEB(int i); 343 DEB(int i);
358 344
359 if(*cmd == REQUEST_SENSE) { 345 if(*cmd == REQUEST_SENSE) {
@@ -423,24 +409,23 @@ static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
423 } 409 }
424 sgptr = (struct aha1740_sg *) SCpnt->host_scribble; 410 sgptr = (struct aha1740_sg *) SCpnt->host_scribble;
425 sgptr->sg_dma_addr = sg_dma; 411 sgptr->sg_dma_addr = sg_dma;
426 412
427 if (SCpnt->use_sg) { 413 nseg = scsi_dma_map(SCpnt);
428 struct scatterlist * sgpnt; 414 BUG_ON(nseg < 0);
415 if (nseg) {
416 struct scatterlist *sg;
429 struct aha1740_chain * cptr; 417 struct aha1740_chain * cptr;
430 int i, count; 418 int i;
431 DEB(unsigned char * ptr); 419 DEB(unsigned char * ptr);
432 420
433 host->ecb[ecbno].sg = 1; /* SCSI Initiator Command 421 host->ecb[ecbno].sg = 1; /* SCSI Initiator Command
434 * w/scatter-gather*/ 422 * w/scatter-gather*/
435 sgpnt = (struct scatterlist *) SCpnt->request_buffer;
436 cptr = sgptr->sg_chain; 423 cptr = sgptr->sg_chain;
437 count = dma_map_sg (&host->edev->dev, sgpnt, SCpnt->use_sg, 424 scsi_for_each_sg(SCpnt, sg, nseg, i) {
438 SCpnt->sc_data_direction); 425 cptr[i].datalen = sg_dma_len (sg);
439 for(i=0; i < count; i++) { 426 cptr[i].dataptr = sg_dma_address (sg);
440 cptr[i].datalen = sg_dma_len (sgpnt + i);
441 cptr[i].dataptr = sg_dma_address (sgpnt + i);
442 } 427 }
443 host->ecb[ecbno].datalen = count*sizeof(struct aha1740_chain); 428 host->ecb[ecbno].datalen = nseg * sizeof(struct aha1740_chain);
444 host->ecb[ecbno].dataptr = sg_dma; 429 host->ecb[ecbno].dataptr = sg_dma;
445#ifdef DEBUG 430#ifdef DEBUG
446 printk("cptr %x: ",cptr); 431 printk("cptr %x: ",cptr);
@@ -448,11 +433,8 @@ static int aha1740_queuecommand(Scsi_Cmnd * SCpnt, void (*done)(Scsi_Cmnd *))
448 for(i=0;i<24;i++) printk("%02x ", ptr[i]); 433 for(i=0;i<24;i++) printk("%02x ", ptr[i]);
449#endif 434#endif
450 } else { 435 } else {
451 host->ecb[ecbno].datalen = bufflen; 436 host->ecb[ecbno].datalen = 0;
452 sgptr->buf_dma_addr = dma_map_single (&host->edev->dev, 437 host->ecb[ecbno].dataptr = 0;
453 buff, bufflen,
454 DMA_BIDIRECTIONAL);
455 host->ecb[ecbno].dataptr = sgptr->buf_dma_addr;
456 } 438 }
457 host->ecb[ecbno].lun = SCpnt->device->lun; 439 host->ecb[ecbno].lun = SCpnt->device->lun;
458 host->ecb[ecbno].ses = 1; /* Suppress underrun errors */ 440 host->ecb[ecbno].ses = 1; /* Suppress underrun errors */
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.c b/drivers/scsi/aic7xxx/aic79xx_osm.c
index 6054881f21f1..286ab83116f9 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.c
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.c
@@ -376,21 +376,10 @@ static __inline void
376ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb) 376ahd_linux_unmap_scb(struct ahd_softc *ahd, struct scb *scb)
377{ 377{
378 struct scsi_cmnd *cmd; 378 struct scsi_cmnd *cmd;
379 int direction;
380 379
381 cmd = scb->io_ctx; 380 cmd = scb->io_ctx;
382 direction = cmd->sc_data_direction;
383 ahd_sync_sglist(ahd, scb, BUS_DMASYNC_POSTWRITE); 381 ahd_sync_sglist(ahd, scb, BUS_DMASYNC_POSTWRITE);
384 if (cmd->use_sg != 0) { 382 scsi_dma_unmap(cmd);
385 struct scatterlist *sg;
386
387 sg = (struct scatterlist *)cmd->request_buffer;
388 pci_unmap_sg(ahd->dev_softc, sg, cmd->use_sg, direction);
389 } else if (cmd->request_bufflen != 0) {
390 pci_unmap_single(ahd->dev_softc,
391 scb->platform_data->buf_busaddr,
392 cmd->request_bufflen, direction);
393 }
394} 383}
395 384
396/******************************** Macros **************************************/ 385/******************************** Macros **************************************/
@@ -1422,6 +1411,7 @@ ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
1422 u_int col_idx; 1411 u_int col_idx;
1423 uint16_t mask; 1412 uint16_t mask;
1424 unsigned long flags; 1413 unsigned long flags;
1414 int nseg;
1425 1415
1426 ahd_lock(ahd, &flags); 1416 ahd_lock(ahd, &flags);
1427 1417
@@ -1494,18 +1484,17 @@ ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
1494 ahd_set_residual(scb, 0); 1484 ahd_set_residual(scb, 0);
1495 ahd_set_sense_residual(scb, 0); 1485 ahd_set_sense_residual(scb, 0);
1496 scb->sg_count = 0; 1486 scb->sg_count = 0;
1497 if (cmd->use_sg != 0) { 1487
1498 void *sg; 1488 nseg = scsi_dma_map(cmd);
1499 struct scatterlist *cur_seg; 1489 BUG_ON(nseg < 0);
1500 u_int nseg; 1490 if (nseg > 0) {
1501 int dir; 1491 void *sg = scb->sg_list;
1502 1492 struct scatterlist *cur_seg;
1503 cur_seg = (struct scatterlist *)cmd->request_buffer; 1493 int i;
1504 dir = cmd->sc_data_direction; 1494
1505 nseg = pci_map_sg(ahd->dev_softc, cur_seg,
1506 cmd->use_sg, dir);
1507 scb->platform_data->xfer_len = 0; 1495 scb->platform_data->xfer_len = 0;
1508 for (sg = scb->sg_list; nseg > 0; nseg--, cur_seg++) { 1496
1497 scsi_for_each_sg(cmd, cur_seg, nseg, i) {
1509 dma_addr_t addr; 1498 dma_addr_t addr;
1510 bus_size_t len; 1499 bus_size_t len;
1511 1500
@@ -1513,22 +1502,8 @@ ahd_linux_run_command(struct ahd_softc *ahd, struct ahd_linux_device *dev,
1513 len = sg_dma_len(cur_seg); 1502 len = sg_dma_len(cur_seg);
1514 scb->platform_data->xfer_len += len; 1503 scb->platform_data->xfer_len += len;
1515 sg = ahd_sg_setup(ahd, scb, sg, addr, len, 1504 sg = ahd_sg_setup(ahd, scb, sg, addr, len,
1516 /*last*/nseg == 1); 1505 i == (nseg - 1));
1517 } 1506 }
1518 } else if (cmd->request_bufflen != 0) {
1519 void *sg;
1520 dma_addr_t addr;
1521 int dir;
1522
1523 sg = scb->sg_list;
1524 dir = cmd->sc_data_direction;
1525 addr = pci_map_single(ahd->dev_softc,
1526 cmd->request_buffer,
1527 cmd->request_bufflen, dir);
1528 scb->platform_data->xfer_len = cmd->request_bufflen;
1529 scb->platform_data->buf_busaddr = addr;
1530 sg = ahd_sg_setup(ahd, scb, sg, addr,
1531 cmd->request_bufflen, /*last*/TRUE);
1532 } 1507 }
1533 1508
1534 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links); 1509 LIST_INSERT_HEAD(&ahd->pending_scbs, scb, pending_links);
diff --git a/drivers/scsi/aic7xxx/aic79xx_osm.h b/drivers/scsi/aic7xxx/aic79xx_osm.h
index ad9761b237dc..853998be1474 100644
--- a/drivers/scsi/aic7xxx/aic79xx_osm.h
+++ b/drivers/scsi/aic7xxx/aic79xx_osm.h
@@ -781,7 +781,7 @@ int ahd_get_transfer_dir(struct scb *scb)
781static __inline 781static __inline
782void ahd_set_residual(struct scb *scb, u_long resid) 782void ahd_set_residual(struct scb *scb, u_long resid)
783{ 783{
784 scb->io_ctx->resid = resid; 784 scsi_set_resid(scb->io_ctx, resid);
785} 785}
786 786
787static __inline 787static __inline
@@ -793,7 +793,7 @@ void ahd_set_sense_residual(struct scb *scb, u_long resid)
793static __inline 793static __inline
794u_long ahd_get_residual(struct scb *scb) 794u_long ahd_get_residual(struct scb *scb)
795{ 795{
796 return (scb->io_ctx->resid); 796 return scsi_get_resid(scb->io_ctx);
797} 797}
798 798
799static __inline 799static __inline
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.c b/drivers/scsi/aic7xxx/aic7xxx_osm.c
index 660f26e23a38..1803ab6fc21c 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.c
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.c
@@ -402,18 +402,8 @@ ahc_linux_unmap_scb(struct ahc_softc *ahc, struct scb *scb)
402 402
403 cmd = scb->io_ctx; 403 cmd = scb->io_ctx;
404 ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE); 404 ahc_sync_sglist(ahc, scb, BUS_DMASYNC_POSTWRITE);
405 if (cmd->use_sg != 0) { 405
406 struct scatterlist *sg; 406 scsi_dma_unmap(cmd);
407
408 sg = (struct scatterlist *)cmd->request_buffer;
409 pci_unmap_sg(ahc->dev_softc, sg, cmd->use_sg,
410 cmd->sc_data_direction);
411 } else if (cmd->request_bufflen != 0) {
412 pci_unmap_single(ahc->dev_softc,
413 scb->platform_data->buf_busaddr,
414 cmd->request_bufflen,
415 cmd->sc_data_direction);
416 }
417} 407}
418 408
419static __inline int 409static __inline int
@@ -1381,6 +1371,7 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
1381 struct ahc_tmode_tstate *tstate; 1371 struct ahc_tmode_tstate *tstate;
1382 uint16_t mask; 1372 uint16_t mask;
1383 struct scb_tailq *untagged_q = NULL; 1373 struct scb_tailq *untagged_q = NULL;
1374 int nseg;
1384 1375
1385 /* 1376 /*
1386 * Schedule us to run later. The only reason we are not 1377 * Schedule us to run later. The only reason we are not
@@ -1472,23 +1463,21 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
1472 ahc_set_residual(scb, 0); 1463 ahc_set_residual(scb, 0);
1473 ahc_set_sense_residual(scb, 0); 1464 ahc_set_sense_residual(scb, 0);
1474 scb->sg_count = 0; 1465 scb->sg_count = 0;
1475 if (cmd->use_sg != 0) { 1466
1467 nseg = scsi_dma_map(cmd);
1468 BUG_ON(nseg < 0);
1469 if (nseg > 0) {
1476 struct ahc_dma_seg *sg; 1470 struct ahc_dma_seg *sg;
1477 struct scatterlist *cur_seg; 1471 struct scatterlist *cur_seg;
1478 struct scatterlist *end_seg; 1472 int i;
1479 int nseg;
1480 1473
1481 cur_seg = (struct scatterlist *)cmd->request_buffer;
1482 nseg = pci_map_sg(ahc->dev_softc, cur_seg, cmd->use_sg,
1483 cmd->sc_data_direction);
1484 end_seg = cur_seg + nseg;
1485 /* Copy the segments into the SG list. */ 1474 /* Copy the segments into the SG list. */
1486 sg = scb->sg_list; 1475 sg = scb->sg_list;
1487 /* 1476 /*
1488 * The sg_count may be larger than nseg if 1477 * The sg_count may be larger than nseg if
1489 * a transfer crosses a 32bit page. 1478 * a transfer crosses a 32bit page.
1490 */ 1479 */
1491 while (cur_seg < end_seg) { 1480 scsi_for_each_sg(cmd, cur_seg, nseg, i) {
1492 dma_addr_t addr; 1481 dma_addr_t addr;
1493 bus_size_t len; 1482 bus_size_t len;
1494 int consumed; 1483 int consumed;
@@ -1499,7 +1488,6 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
1499 sg, addr, len); 1488 sg, addr, len);
1500 sg += consumed; 1489 sg += consumed;
1501 scb->sg_count += consumed; 1490 scb->sg_count += consumed;
1502 cur_seg++;
1503 } 1491 }
1504 sg--; 1492 sg--;
1505 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG); 1493 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
@@ -1516,33 +1504,6 @@ ahc_linux_run_command(struct ahc_softc *ahc, struct ahc_linux_device *dev,
1516 */ 1504 */
1517 scb->hscb->dataptr = scb->sg_list->addr; 1505 scb->hscb->dataptr = scb->sg_list->addr;
1518 scb->hscb->datacnt = scb->sg_list->len; 1506 scb->hscb->datacnt = scb->sg_list->len;
1519 } else if (cmd->request_bufflen != 0) {
1520 struct ahc_dma_seg *sg;
1521 dma_addr_t addr;
1522
1523 sg = scb->sg_list;
1524 addr = pci_map_single(ahc->dev_softc,
1525 cmd->request_buffer,
1526 cmd->request_bufflen,
1527 cmd->sc_data_direction);
1528 scb->platform_data->buf_busaddr = addr;
1529 scb->sg_count = ahc_linux_map_seg(ahc, scb,
1530 sg, addr,
1531 cmd->request_bufflen);
1532 sg->len |= ahc_htole32(AHC_DMA_LAST_SEG);
1533
1534 /*
1535 * Reset the sg list pointer.
1536 */
1537 scb->hscb->sgptr =
1538 ahc_htole32(scb->sg_list_phys | SG_FULL_RESID);
1539
1540 /*
1541 * Copy the first SG into the "current"
1542 * data pointer area.
1543 */
1544 scb->hscb->dataptr = sg->addr;
1545 scb->hscb->datacnt = sg->len;
1546 } else { 1507 } else {
1547 scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL); 1508 scb->hscb->sgptr = ahc_htole32(SG_LIST_NULL);
1548 scb->hscb->dataptr = 0; 1509 scb->hscb->dataptr = 0;
diff --git a/drivers/scsi/aic7xxx/aic7xxx_osm.h b/drivers/scsi/aic7xxx/aic7xxx_osm.h
index 8fee7edc6eb3..b48dab447bde 100644
--- a/drivers/scsi/aic7xxx/aic7xxx_osm.h
+++ b/drivers/scsi/aic7xxx/aic7xxx_osm.h
@@ -751,7 +751,7 @@ int ahc_get_transfer_dir(struct scb *scb)
751static __inline 751static __inline
752void ahc_set_residual(struct scb *scb, u_long resid) 752void ahc_set_residual(struct scb *scb, u_long resid)
753{ 753{
754 scb->io_ctx->resid = resid; 754 scsi_set_resid(scb->io_ctx, resid);
755} 755}
756 756
757static __inline 757static __inline
@@ -763,7 +763,7 @@ void ahc_set_sense_residual(struct scb *scb, u_long resid)
763static __inline 763static __inline
764u_long ahc_get_residual(struct scb *scb) 764u_long ahc_get_residual(struct scb *scb)
765{ 765{
766 return (scb->io_ctx->resid); 766 return scsi_get_resid(scb->io_ctx);
767} 767}
768 768
769static __inline 769static __inline
diff --git a/drivers/scsi/aic7xxx_old.c b/drivers/scsi/aic7xxx_old.c
index a988d5abf702..f5e3c6b27c70 100644
--- a/drivers/scsi/aic7xxx_old.c
+++ b/drivers/scsi/aic7xxx_old.c
@@ -2690,17 +2690,8 @@ aic7xxx_done(struct aic7xxx_host *p, struct aic7xxx_scb *scb)
2690 struct aic7xxx_scb *scbp; 2690 struct aic7xxx_scb *scbp;
2691 unsigned char queue_depth; 2691 unsigned char queue_depth;
2692 2692
2693 if (cmd->use_sg > 1) 2693 scsi_dma_unmap(cmd);
2694 {
2695 struct scatterlist *sg;
2696 2694
2697 sg = (struct scatterlist *)cmd->request_buffer;
2698 pci_unmap_sg(p->pdev, sg, cmd->use_sg, cmd->sc_data_direction);
2699 }
2700 else if (cmd->request_bufflen)
2701 pci_unmap_single(p->pdev, aic7xxx_mapping(cmd),
2702 cmd->request_bufflen,
2703 cmd->sc_data_direction);
2704 if (scb->flags & SCB_SENSE) 2695 if (scb->flags & SCB_SENSE)
2705 { 2696 {
2706 pci_unmap_single(p->pdev, 2697 pci_unmap_single(p->pdev,
@@ -3869,7 +3860,7 @@ aic7xxx_calculate_residual (struct aic7xxx_host *p, struct aic7xxx_scb *scb)
3869 * the mid layer didn't check residual data counts to see if the 3860 * the mid layer didn't check residual data counts to see if the
3870 * command needs retried. 3861 * command needs retried.
3871 */ 3862 */
3872 cmd->resid = scb->sg_length - actual; 3863 scsi_set_resid(cmd, scb->sg_length - actual);
3873 aic7xxx_status(cmd) = hscb->target_status; 3864 aic7xxx_status(cmd) = hscb->target_status;
3874 } 3865 }
3875 } 3866 }
@@ -10137,6 +10128,7 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
10137 struct scsi_device *sdptr = cmd->device; 10128 struct scsi_device *sdptr = cmd->device;
10138 unsigned char tindex = TARGET_INDEX(cmd); 10129 unsigned char tindex = TARGET_INDEX(cmd);
10139 struct request *req = cmd->request; 10130 struct request *req = cmd->request;
10131 int use_sg;
10140 10132
10141 mask = (0x01 << tindex); 10133 mask = (0x01 << tindex);
10142 hscb = scb->hscb; 10134 hscb = scb->hscb;
@@ -10209,8 +10201,10 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
10209 memcpy(scb->cmnd, cmd->cmnd, cmd->cmd_len); 10201 memcpy(scb->cmnd, cmd->cmnd, cmd->cmd_len);
10210 hscb->SCSI_cmd_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, scb->cmnd)); 10202 hscb->SCSI_cmd_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, scb->cmnd));
10211 10203
10212 if (cmd->use_sg) 10204 use_sg = scsi_dma_map(cmd);
10213 { 10205 BUG_ON(use_sg < 0);
10206
10207 if (use_sg) {
10214 struct scatterlist *sg; /* Must be mid-level SCSI code scatterlist */ 10208 struct scatterlist *sg; /* Must be mid-level SCSI code scatterlist */
10215 10209
10216 /* 10210 /*
@@ -10219,11 +10213,11 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
10219 * differences and the kernel SG list uses virtual addresses where 10213 * differences and the kernel SG list uses virtual addresses where
10220 * we need physical addresses. 10214 * we need physical addresses.
10221 */ 10215 */
10222 int i, use_sg; 10216 int i;
10223 10217
10224 sg = (struct scatterlist *)cmd->request_buffer;
10225 scb->sg_length = 0; 10218 scb->sg_length = 0;
10226 use_sg = pci_map_sg(p->pdev, sg, cmd->use_sg, cmd->sc_data_direction); 10219
10220
10227 /* 10221 /*
10228 * Copy the segments into the SG array. NOTE!!! - We used to 10222 * Copy the segments into the SG array. NOTE!!! - We used to
10229 * have the first entry both in the data_pointer area and the first 10223 * have the first entry both in the data_pointer area and the first
@@ -10231,10 +10225,9 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
10231 * entry in both places, but now we download the address of 10225 * entry in both places, but now we download the address of
10232 * scb->sg_list[1] instead of 0 to the sg pointer in the hscb. 10226 * scb->sg_list[1] instead of 0 to the sg pointer in the hscb.
10233 */ 10227 */
10234 for (i = 0; i < use_sg; i++) 10228 scsi_for_each_sg(cmd, sg, use_sg, i) {
10235 { 10229 unsigned int len = sg_dma_len(sg);
10236 unsigned int len = sg_dma_len(sg+i); 10230 scb->sg_list[i].address = cpu_to_le32(sg_dma_address(sg));
10237 scb->sg_list[i].address = cpu_to_le32(sg_dma_address(sg+i));
10238 scb->sg_list[i].length = cpu_to_le32(len); 10231 scb->sg_list[i].length = cpu_to_le32(len);
10239 scb->sg_length += len; 10232 scb->sg_length += len;
10240 } 10233 }
@@ -10244,33 +10237,13 @@ static void aic7xxx_buildscb(struct aic7xxx_host *p, struct scsi_cmnd *cmd,
10244 scb->sg_count = i; 10237 scb->sg_count = i;
10245 hscb->SG_segment_count = i; 10238 hscb->SG_segment_count = i;
10246 hscb->SG_list_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, &scb->sg_list[1])); 10239 hscb->SG_list_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, &scb->sg_list[1]));
10247 } 10240 } else {
10248 else
10249 {
10250 if (cmd->request_bufflen)
10251 {
10252 unsigned int address = pci_map_single(p->pdev, cmd->request_buffer,
10253 cmd->request_bufflen,
10254 cmd->sc_data_direction);
10255 aic7xxx_mapping(cmd) = address;
10256 scb->sg_list[0].address = cpu_to_le32(address);
10257 scb->sg_list[0].length = cpu_to_le32(cmd->request_bufflen);
10258 scb->sg_count = 1;
10259 scb->sg_length = cmd->request_bufflen;
10260 hscb->SG_segment_count = 1;
10261 hscb->SG_list_pointer = cpu_to_le32(SCB_DMA_ADDR(scb, &scb->sg_list[0]));
10262 hscb->data_count = scb->sg_list[0].length;
10263 hscb->data_pointer = scb->sg_list[0].address;
10264 }
10265 else
10266 {
10267 scb->sg_count = 0; 10241 scb->sg_count = 0;
10268 scb->sg_length = 0; 10242 scb->sg_length = 0;
10269 hscb->SG_segment_count = 0; 10243 hscb->SG_segment_count = 0;
10270 hscb->SG_list_pointer = 0; 10244 hscb->SG_list_pointer = 0;
10271 hscb->data_count = 0; 10245 hscb->data_count = 0;
10272 hscb->data_pointer = 0; 10246 hscb->data_pointer = 0;
10273 }
10274 } 10247 }
10275} 10248}
10276 10249
diff --git a/drivers/scsi/amiga7xx.c b/drivers/scsi/amiga7xx.c
deleted file mode 100644
index d5d3c4d5a253..000000000000
--- a/drivers/scsi/amiga7xx.c
+++ /dev/null
@@ -1,138 +0,0 @@
1/*
2 * Detection routine for the NCR53c710 based Amiga SCSI Controllers for Linux.
3 * Amiga MacroSystemUS WarpEngine SCSI controller.
4 * Amiga Technologies A4000T SCSI controller.
5 * Amiga Technologies/DKB A4091 SCSI controller.
6 *
7 * Written 1997 by Alan Hourihane <alanh@fairlite.demon.co.uk>
8 * plus modifications of the 53c7xx.c driver to support the Amiga.
9 */
10#include <linux/types.h>
11#include <linux/mm.h>
12#include <linux/blkdev.h>
13#include <linux/zorro.h>
14#include <linux/stat.h>
15
16#include <asm/setup.h>
17#include <asm/page.h>
18#include <asm/pgtable.h>
19#include <asm/amigaints.h>
20#include <asm/amigahw.h>
21#include <asm/dma.h>
22#include <asm/irq.h>
23
24#include "scsi.h"
25#include <scsi/scsi_host.h>
26#include "53c7xx.h"
27#include "amiga7xx.h"
28
29
30static int amiga7xx_register_one(struct scsi_host_template *tpnt,
31 unsigned long address)
32{
33 long long options;
34 int clock;
35
36 if (!request_mem_region(address, 0x1000, "ncr53c710"))
37 return 0;
38
39 address = (unsigned long)z_ioremap(address, 0x1000);
40 options = OPTION_MEMORY_MAPPED | OPTION_DEBUG_TEST1 | OPTION_INTFLY |
41 OPTION_SYNCHRONOUS | OPTION_ALWAYS_SYNCHRONOUS |
42 OPTION_DISCONNECT;
43 clock = 50000000; /* 50 MHz SCSI Clock */
44 ncr53c7xx_init(tpnt, 0, 710, address, 0, IRQ_AMIGA_PORTS, DMA_NONE,
45 options, clock);
46 return 1;
47}
48
49
50#ifdef CONFIG_ZORRO
51
52static struct {
53 zorro_id id;
54 unsigned long offset;
55 int absolute; /* offset is absolute address */
56} amiga7xx_table[] = {
57 { .id = ZORRO_PROD_PHASE5_BLIZZARD_603E_PLUS, .offset = 0xf40000,
58 .absolute = 1 },
59 { .id = ZORRO_PROD_MACROSYSTEMS_WARP_ENGINE_40xx, .offset = 0x40000 },
60 { .id = ZORRO_PROD_CBM_A4091_1, .offset = 0x800000 },
61 { .id = ZORRO_PROD_CBM_A4091_2, .offset = 0x800000 },
62 { .id = ZORRO_PROD_GVP_GFORCE_040_060, .offset = 0x40000 },
63 { 0 }
64};
65
66static int __init amiga7xx_zorro_detect(struct scsi_host_template *tpnt)
67{
68 int num = 0, i;
69 struct zorro_dev *z = NULL;
70 unsigned long address;
71
72 while ((z = zorro_find_device(ZORRO_WILDCARD, z))) {
73 for (i = 0; amiga7xx_table[i].id; i++)
74 if (z->id == amiga7xx_table[i].id)
75 break;
76 if (!amiga7xx_table[i].id)
77 continue;
78 if (amiga7xx_table[i].absolute)
79 address = amiga7xx_table[i].offset;
80 else
81 address = z->resource.start + amiga7xx_table[i].offset;
82 num += amiga7xx_register_one(tpnt, address);
83 }
84 return num;
85}
86
87#endif /* CONFIG_ZORRO */
88
89
90int __init amiga7xx_detect(struct scsi_host_template *tpnt)
91{
92 static unsigned char called = 0;
93 int num = 0;
94
95 if (called || !MACH_IS_AMIGA)
96 return 0;
97
98 tpnt->proc_name = "Amiga7xx";
99
100 if (AMIGAHW_PRESENT(A4000_SCSI))
101 num += amiga7xx_register_one(tpnt, 0xdd0040);
102
103#ifdef CONFIG_ZORRO
104 num += amiga7xx_zorro_detect(tpnt);
105#endif
106
107 called = 1;
108 return num;
109}
110
111static int amiga7xx_release(struct Scsi_Host *shost)
112{
113 if (shost->irq)
114 free_irq(shost->irq, NULL);
115 if (shost->dma_channel != 0xff)
116 free_dma(shost->dma_channel);
117 if (shost->io_port && shost->n_io_port)
118 release_region(shost->io_port, shost->n_io_port);
119 scsi_unregister(shost);
120 return 0;
121}
122
123static struct scsi_host_template driver_template = {
124 .name = "Amiga NCR53c710 SCSI",
125 .detect = amiga7xx_detect,
126 .release = amiga7xx_release,
127 .queuecommand = NCR53c7xx_queue_command,
128 .abort = NCR53c7xx_abort,
129 .reset = NCR53c7xx_reset,
130 .can_queue = 24,
131 .this_id = 7,
132 .sg_tablesize = 63,
133 .cmd_per_lun = 3,
134 .use_clustering = DISABLE_CLUSTERING
135};
136
137
138#include "scsi_module.c"
diff --git a/drivers/scsi/amiga7xx.h b/drivers/scsi/amiga7xx.h
deleted file mode 100644
index 7cd63a996886..000000000000
--- a/drivers/scsi/amiga7xx.h
+++ /dev/null
@@ -1,23 +0,0 @@
1#ifndef AMIGA7XX_H
2
3#include <linux/types.h>
4
5int amiga7xx_detect(struct scsi_host_template *);
6const char *NCR53c7x0_info(void);
7int NCR53c7xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
8int NCR53c7xx_abort(Scsi_Cmnd *);
9int NCR53c7x0_release (struct Scsi_Host *);
10int NCR53c7xx_reset(Scsi_Cmnd *, unsigned int);
11void NCR53c7x0_intr(int irq, void *dev_id);
12
13#ifndef CMD_PER_LUN
14#define CMD_PER_LUN 3
15#endif
16
17#ifndef CAN_QUEUE
18#define CAN_QUEUE 24
19#endif
20
21#include <scsi/scsicam.h>
22
23#endif /* AMIGA7XX_H */
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 8b46158cc045..672df79d7e39 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -369,19 +369,9 @@ static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
369 369
370static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb) 370static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
371{ 371{
372 struct AdapterControlBlock *acb = ccb->acb;
373 struct scsi_cmnd *pcmd = ccb->pcmd; 372 struct scsi_cmnd *pcmd = ccb->pcmd;
374 373
375 if (pcmd->use_sg != 0) { 374 scsi_dma_unmap(pcmd);
376 struct scatterlist *sl;
377
378 sl = (struct scatterlist *)pcmd->request_buffer;
379 pci_unmap_sg(acb->pdev, sl, pcmd->use_sg, pcmd->sc_data_direction);
380 }
381 else if (pcmd->request_bufflen != 0)
382 pci_unmap_single(acb->pdev,
383 pcmd->SCp.dma_handle,
384 pcmd->request_bufflen, pcmd->sc_data_direction);
385} 375}
386 376
387static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag) 377static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag)
@@ -551,6 +541,7 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
551 int8_t *psge = (int8_t *)&arcmsr_cdb->u; 541 int8_t *psge = (int8_t *)&arcmsr_cdb->u;
552 uint32_t address_lo, address_hi; 542 uint32_t address_lo, address_hi;
553 int arccdbsize = 0x30; 543 int arccdbsize = 0x30;
544 int nseg;
554 545
555 ccb->pcmd = pcmd; 546 ccb->pcmd = pcmd;
556 memset(arcmsr_cdb, 0, sizeof (struct ARCMSR_CDB)); 547 memset(arcmsr_cdb, 0, sizeof (struct ARCMSR_CDB));
@@ -561,20 +552,20 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
561 arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len; 552 arcmsr_cdb->CdbLength = (uint8_t)pcmd->cmd_len;
562 arcmsr_cdb->Context = (unsigned long)arcmsr_cdb; 553 arcmsr_cdb->Context = (unsigned long)arcmsr_cdb;
563 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len); 554 memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
564 if (pcmd->use_sg) { 555
565 int length, sgcount, i, cdb_sgcount = 0; 556 nseg = scsi_dma_map(pcmd);
566 struct scatterlist *sl; 557 BUG_ON(nseg < 0);
567 558
568 /* Get Scatter Gather List from scsiport. */ 559 if (nseg) {
569 sl = (struct scatterlist *) pcmd->request_buffer; 560 int length, i, cdb_sgcount = 0;
570 sgcount = pci_map_sg(acb->pdev, sl, pcmd->use_sg, 561 struct scatterlist *sg;
571 pcmd->sc_data_direction); 562
572 /* map stor port SG list to our iop SG List. */ 563 /* map stor port SG list to our iop SG List. */
573 for (i = 0; i < sgcount; i++) { 564 scsi_for_each_sg(pcmd, sg, nseg, i) {
574 /* Get the physical address of the current data pointer */ 565 /* Get the physical address of the current data pointer */
575 length = cpu_to_le32(sg_dma_len(sl)); 566 length = cpu_to_le32(sg_dma_len(sg));
576 address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sl))); 567 address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg)));
577 address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sl))); 568 address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg)));
578 if (address_hi == 0) { 569 if (address_hi == 0) {
579 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge; 570 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
580 571
@@ -591,32 +582,12 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
591 psge += sizeof (struct SG64ENTRY); 582 psge += sizeof (struct SG64ENTRY);
592 arccdbsize += sizeof (struct SG64ENTRY); 583 arccdbsize += sizeof (struct SG64ENTRY);
593 } 584 }
594 sl++;
595 cdb_sgcount++; 585 cdb_sgcount++;
596 } 586 }
597 arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount; 587 arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
598 arcmsr_cdb->DataLength = pcmd->request_bufflen; 588 arcmsr_cdb->DataLength = scsi_bufflen(pcmd);
599 if ( arccdbsize > 256) 589 if ( arccdbsize > 256)
600 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE; 590 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
601 } else if (pcmd->request_bufflen) {
602 dma_addr_t dma_addr;
603 dma_addr = pci_map_single(acb->pdev, pcmd->request_buffer,
604 pcmd->request_bufflen, pcmd->sc_data_direction);
605 pcmd->SCp.dma_handle = dma_addr;
606 address_lo = cpu_to_le32(dma_addr_lo32(dma_addr));
607 address_hi = cpu_to_le32(dma_addr_hi32(dma_addr));
608 if (address_hi == 0) {
609 struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge;
610 pdma_sg->address = address_lo;
611 pdma_sg->length = pcmd->request_bufflen;
612 } else {
613 struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge;
614 pdma_sg->addresshigh = address_hi;
615 pdma_sg->address = address_lo;
616 pdma_sg->length = pcmd->request_bufflen|IS_SG64_ADDR;
617 }
618 arcmsr_cdb->sgcount = 1;
619 arcmsr_cdb->DataLength = pcmd->request_bufflen;
620 } 591 }
621 if (pcmd->sc_data_direction == DMA_TO_DEVICE ) { 592 if (pcmd->sc_data_direction == DMA_TO_DEVICE ) {
622 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; 593 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
@@ -848,24 +819,21 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_
848 struct CMD_MESSAGE_FIELD *pcmdmessagefld; 819 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
849 int retvalue = 0, transfer_len = 0; 820 int retvalue = 0, transfer_len = 0;
850 char *buffer; 821 char *buffer;
822 struct scatterlist *sg;
851 uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 | 823 uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
852 (uint32_t ) cmd->cmnd[6] << 16 | 824 (uint32_t ) cmd->cmnd[6] << 16 |
853 (uint32_t ) cmd->cmnd[7] << 8 | 825 (uint32_t ) cmd->cmnd[7] << 8 |
854 (uint32_t ) cmd->cmnd[8]; 826 (uint32_t ) cmd->cmnd[8];
855 /* 4 bytes: Areca io control code */ 827 /* 4 bytes: Areca io control code */
856 if (cmd->use_sg) {
857 struct scatterlist *sg = (struct scatterlist *)cmd->request_buffer;
858 828
859 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 829 sg = scsi_sglist(cmd);
860 if (cmd->use_sg > 1) { 830 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
861 retvalue = ARCMSR_MESSAGE_FAIL; 831 if (scsi_sg_count(cmd) > 1) {
862 goto message_out; 832 retvalue = ARCMSR_MESSAGE_FAIL;
863 } 833 goto message_out;
864 transfer_len += sg->length;
865 } else {
866 buffer = cmd->request_buffer;
867 transfer_len = cmd->request_bufflen;
868 } 834 }
835 transfer_len += sg->length;
836
869 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { 837 if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
870 retvalue = ARCMSR_MESSAGE_FAIL; 838 retvalue = ARCMSR_MESSAGE_FAIL;
871 goto message_out; 839 goto message_out;
@@ -1057,12 +1025,9 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_
1057 retvalue = ARCMSR_MESSAGE_FAIL; 1025 retvalue = ARCMSR_MESSAGE_FAIL;
1058 } 1026 }
1059 message_out: 1027 message_out:
1060 if (cmd->use_sg) { 1028 sg = scsi_sglist(cmd);
1061 struct scatterlist *sg; 1029 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1062 1030
1063 sg = (struct scatterlist *) cmd->request_buffer;
1064 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1065 }
1066 return retvalue; 1031 return retvalue;
1067} 1032}
1068 1033
@@ -1085,6 +1050,7 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
1085 case INQUIRY: { 1050 case INQUIRY: {
1086 unsigned char inqdata[36]; 1051 unsigned char inqdata[36];
1087 char *buffer; 1052 char *buffer;
1053 struct scatterlist *sg;
1088 1054
1089 if (cmd->device->lun) { 1055 if (cmd->device->lun) {
1090 cmd->result = (DID_TIME_OUT << 16); 1056 cmd->result = (DID_TIME_OUT << 16);
@@ -1104,21 +1070,14 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
1104 strncpy(&inqdata[16], "RAID controller ", 16); 1070 strncpy(&inqdata[16], "RAID controller ", 16);
1105 /* Product Identification */ 1071 /* Product Identification */
1106 strncpy(&inqdata[32], "R001", 4); /* Product Revision */ 1072 strncpy(&inqdata[32], "R001", 4); /* Product Revision */
1107 if (cmd->use_sg) {
1108 struct scatterlist *sg;
1109 1073
1110 sg = (struct scatterlist *) cmd->request_buffer; 1074 sg = scsi_sglist(cmd);
1111 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset; 1075 buffer = kmap_atomic(sg->page, KM_IRQ0) + sg->offset;
1112 } else { 1076
1113 buffer = cmd->request_buffer;
1114 }
1115 memcpy(buffer, inqdata, sizeof(inqdata)); 1077 memcpy(buffer, inqdata, sizeof(inqdata));
1116 if (cmd->use_sg) { 1078 sg = scsi_sglist(cmd);
1117 struct scatterlist *sg; 1079 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1118 1080
1119 sg = (struct scatterlist *) cmd->request_buffer;
1120 kunmap_atomic(buffer - sg->offset, KM_IRQ0);
1121 }
1122 cmd->scsi_done(cmd); 1081 cmd->scsi_done(cmd);
1123 } 1082 }
1124 break; 1083 break;
diff --git a/drivers/scsi/bvme6000.c b/drivers/scsi/bvme6000.c
deleted file mode 100644
index 599b400a3c43..000000000000
--- a/drivers/scsi/bvme6000.c
+++ /dev/null
@@ -1,76 +0,0 @@
1/*
2 * Detection routine for the NCR53c710 based BVME6000 SCSI Controllers for Linux.
3 *
4 * Based on work by Alan Hourihane
5 */
6#include <linux/types.h>
7#include <linux/mm.h>
8#include <linux/blkdev.h>
9#include <linux/zorro.h>
10
11#include <asm/setup.h>
12#include <asm/page.h>
13#include <asm/pgtable.h>
14#include <asm/bvme6000hw.h>
15#include <asm/irq.h>
16
17#include "scsi.h"
18#include <scsi/scsi_host.h>
19#include "53c7xx.h"
20#include "bvme6000.h"
21
22#include<linux/stat.h>
23
24
25int bvme6000_scsi_detect(struct scsi_host_template *tpnt)
26{
27 static unsigned char called = 0;
28 int clock;
29 long long options;
30
31 if (called)
32 return 0;
33 if (!MACH_IS_BVME6000)
34 return 0;
35
36 tpnt->proc_name = "BVME6000";
37
38 options = OPTION_MEMORY_MAPPED|OPTION_DEBUG_TEST1|OPTION_INTFLY|OPTION_SYNCHRONOUS|OPTION_ALWAYS_SYNCHRONOUS|OPTION_DISCONNECT;
39
40 clock = 40000000; /* 66MHz SCSI Clock */
41
42 ncr53c7xx_init(tpnt, 0, 710, (unsigned long)BVME_NCR53C710_BASE,
43 0, BVME_IRQ_SCSI, DMA_NONE,
44 options, clock);
45 called = 1;
46 return 1;
47}
48
49static int bvme6000_scsi_release(struct Scsi_Host *shost)
50{
51 if (shost->irq)
52 free_irq(shost->irq, NULL);
53 if (shost->dma_channel != 0xff)
54 free_dma(shost->dma_channel);
55 if (shost->io_port && shost->n_io_port)
56 release_region(shost->io_port, shost->n_io_port);
57 scsi_unregister(shost);
58 return 0;
59}
60
61static struct scsi_host_template driver_template = {
62 .name = "BVME6000 NCR53c710 SCSI",
63 .detect = bvme6000_scsi_detect,
64 .release = bvme6000_scsi_release,
65 .queuecommand = NCR53c7xx_queue_command,
66 .abort = NCR53c7xx_abort,
67 .reset = NCR53c7xx_reset,
68 .can_queue = 24,
69 .this_id = 7,
70 .sg_tablesize = 63,
71 .cmd_per_lun = 3,
72 .use_clustering = DISABLE_CLUSTERING
73};
74
75
76#include "scsi_module.c"
diff --git a/drivers/scsi/bvme6000.h b/drivers/scsi/bvme6000.h
deleted file mode 100644
index ea3e4b2b9220..000000000000
--- a/drivers/scsi/bvme6000.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef BVME6000_SCSI_H
2#define BVME6000_SCSI_H
3
4#include <linux/types.h>
5
6int bvme6000_scsi_detect(struct scsi_host_template *);
7const char *NCR53c7x0_info(void);
8int NCR53c7xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
9int NCR53c7xx_abort(Scsi_Cmnd *);
10int NCR53c7x0_release (struct Scsi_Host *);
11int NCR53c7xx_reset(Scsi_Cmnd *, unsigned int);
12void NCR53c7x0_intr(int irq, void *dev_id);
13
14#ifndef CMD_PER_LUN
15#define CMD_PER_LUN 3
16#endif
17
18#ifndef CAN_QUEUE
19#define CAN_QUEUE 24
20#endif
21
22#include <scsi/scsicam.h>
23
24#endif /* BVME6000_SCSI_H */
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 2d38025861a5..a83e9f150b97 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -1609,8 +1609,9 @@ static int eata2x_detect(struct scsi_host_template *tpnt)
1609 1609
1610static void map_dma(unsigned int i, struct hostdata *ha) 1610static void map_dma(unsigned int i, struct hostdata *ha)
1611{ 1611{
1612 unsigned int k, count, pci_dir; 1612 unsigned int k, pci_dir;
1613 struct scatterlist *sgpnt; 1613 int count;
1614 struct scatterlist *sg;
1614 struct mscp *cpp; 1615 struct mscp *cpp;
1615 struct scsi_cmnd *SCpnt; 1616 struct scsi_cmnd *SCpnt;
1616 1617
@@ -1625,38 +1626,19 @@ static void map_dma(unsigned int i, struct hostdata *ha)
1625 1626
1626 cpp->sense_len = sizeof SCpnt->sense_buffer; 1627 cpp->sense_len = sizeof SCpnt->sense_buffer;
1627 1628
1628 if (!SCpnt->use_sg) { 1629 count = scsi_dma_map(SCpnt);
1629 1630 BUG_ON(count < 0);
1630 /* If we get here with PCI_DMA_NONE, pci_map_single triggers a BUG() */ 1631 scsi_for_each_sg(SCpnt, sg, count, k) {
1631 if (!SCpnt->request_bufflen) 1632 cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
1632 pci_dir = PCI_DMA_BIDIRECTIONAL; 1633 cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
1633
1634 if (SCpnt->request_buffer)
1635 cpp->data_address = H2DEV(pci_map_single(ha->pdev,
1636 SCpnt->
1637 request_buffer,
1638 SCpnt->
1639 request_bufflen,
1640 pci_dir));
1641
1642 cpp->data_len = H2DEV(SCpnt->request_bufflen);
1643 return;
1644 }
1645
1646 sgpnt = (struct scatterlist *)SCpnt->request_buffer;
1647 count = pci_map_sg(ha->pdev, sgpnt, SCpnt->use_sg, pci_dir);
1648
1649 for (k = 0; k < count; k++) {
1650 cpp->sglist[k].address = H2DEV(sg_dma_address(&sgpnt[k]));
1651 cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(&sgpnt[k]));
1652 } 1634 }
1653 1635
1654 cpp->sg = 1; 1636 cpp->sg = 1;
1655 cpp->data_address = H2DEV(pci_map_single(ha->pdev, cpp->sglist, 1637 cpp->data_address = H2DEV(pci_map_single(ha->pdev, cpp->sglist,
1656 SCpnt->use_sg * 1638 scsi_sg_count(SCpnt) *
1657 sizeof(struct sg_list), 1639 sizeof(struct sg_list),
1658 pci_dir)); 1640 pci_dir));
1659 cpp->data_len = H2DEV((SCpnt->use_sg * sizeof(struct sg_list))); 1641 cpp->data_len = H2DEV((scsi_sg_count(SCpnt) * sizeof(struct sg_list)));
1660} 1642}
1661 1643
1662static void unmap_dma(unsigned int i, struct hostdata *ha) 1644static void unmap_dma(unsigned int i, struct hostdata *ha)
@@ -1673,9 +1655,7 @@ static void unmap_dma(unsigned int i, struct hostdata *ha)
1673 pci_unmap_single(ha->pdev, DEV2H(cpp->sense_addr), 1655 pci_unmap_single(ha->pdev, DEV2H(cpp->sense_addr),
1674 DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE); 1656 DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
1675 1657
1676 if (SCpnt->use_sg) 1658 scsi_dma_unmap(SCpnt);
1677 pci_unmap_sg(ha->pdev, SCpnt->request_buffer, SCpnt->use_sg,
1678 pci_dir);
1679 1659
1680 if (!DEV2H(cpp->data_len)) 1660 if (!DEV2H(cpp->data_len))
1681 pci_dir = PCI_DMA_BIDIRECTIONAL; 1661 pci_dir = PCI_DMA_BIDIRECTIONAL;
@@ -1700,9 +1680,9 @@ static void sync_dma(unsigned int i, struct hostdata *ha)
1700 DEV2H(cpp->sense_len), 1680 DEV2H(cpp->sense_len),
1701 PCI_DMA_FROMDEVICE); 1681 PCI_DMA_FROMDEVICE);
1702 1682
1703 if (SCpnt->use_sg) 1683 if (scsi_sg_count(SCpnt))
1704 pci_dma_sync_sg_for_cpu(ha->pdev, SCpnt->request_buffer, 1684 pci_dma_sync_sg_for_cpu(ha->pdev, scsi_sglist(SCpnt),
1705 SCpnt->use_sg, pci_dir); 1685 scsi_sg_count(SCpnt), pci_dir);
1706 1686
1707 if (!DEV2H(cpp->data_len)) 1687 if (!DEV2H(cpp->data_len))
1708 pci_dir = PCI_DMA_BIDIRECTIONAL; 1688 pci_dir = PCI_DMA_BIDIRECTIONAL;
diff --git a/drivers/scsi/fdomain.c b/drivers/scsi/fdomain.c
index 5d4ea6f77953..36169d597e98 100644
--- a/drivers/scsi/fdomain.c
+++ b/drivers/scsi/fdomain.c
@@ -410,6 +410,8 @@ static irqreturn_t do_fdomain_16x0_intr( int irq, void *dev_id );
410static char * fdomain = NULL; 410static char * fdomain = NULL;
411module_param(fdomain, charp, 0); 411module_param(fdomain, charp, 0);
412 412
413#ifndef PCMCIA
414
413static unsigned long addresses[] = { 415static unsigned long addresses[] = {
414 0xc8000, 416 0xc8000,
415 0xca000, 417 0xca000,
@@ -426,6 +428,8 @@ static unsigned short ports[] = { 0x140, 0x150, 0x160, 0x170 };
426 428
427static unsigned short ints[] = { 3, 5, 10, 11, 12, 14, 15, 0 }; 429static unsigned short ints[] = { 3, 5, 10, 11, 12, 14, 15, 0 };
428 430
431#endif /* !PCMCIA */
432
429/* 433/*
430 434
431 READ THIS BEFORE YOU ADD A SIGNATURE! 435 READ THIS BEFORE YOU ADD A SIGNATURE!
@@ -458,6 +462,8 @@ static unsigned short ints[] = { 3, 5, 10, 11, 12, 14, 15, 0 };
458 462
459*/ 463*/
460 464
465#ifndef PCMCIA
466
461static struct signature { 467static struct signature {
462 const char *signature; 468 const char *signature;
463 int sig_offset; 469 int sig_offset;
@@ -503,6 +509,8 @@ static struct signature {
503 509
504#define SIGNATURE_COUNT ARRAY_SIZE(signatures) 510#define SIGNATURE_COUNT ARRAY_SIZE(signatures)
505 511
512#endif /* !PCMCIA */
513
506static void print_banner( struct Scsi_Host *shpnt ) 514static void print_banner( struct Scsi_Host *shpnt )
507{ 515{
508 if (!shpnt) return; /* This won't ever happen */ 516 if (!shpnt) return; /* This won't ever happen */
@@ -633,6 +641,8 @@ static int fdomain_test_loopback( void )
633 return 0; 641 return 0;
634} 642}
635 643
644#ifndef PCMCIA
645
636/* fdomain_get_irq assumes that we have a valid MCA ID for a 646/* fdomain_get_irq assumes that we have a valid MCA ID for a
637 TMC-1660/TMC-1680 Future Domain board. Now, check to be sure the 647 TMC-1660/TMC-1680 Future Domain board. Now, check to be sure the
638 bios_base matches these ports. If someone was unlucky enough to have 648 bios_base matches these ports. If someone was unlucky enough to have
@@ -667,7 +677,6 @@ static int fdomain_get_irq( int base )
667 677
668static int fdomain_isa_detect( int *irq, int *iobase ) 678static int fdomain_isa_detect( int *irq, int *iobase )
669{ 679{
670#ifndef PCMCIA
671 int i, j; 680 int i, j;
672 int base = 0xdeadbeef; 681 int base = 0xdeadbeef;
673 int flag = 0; 682 int flag = 0;
@@ -786,11 +795,22 @@ found:
786 *iobase = base; 795 *iobase = base;
787 796
788 return 1; /* success */ 797 return 1; /* success */
789#else
790 return 0;
791#endif
792} 798}
793 799
800#else /* PCMCIA */
801
802static int fdomain_isa_detect( int *irq, int *iobase )
803{
804 if (irq)
805 *irq = 0;
806 if (iobase)
807 *iobase = 0;
808 return 0;
809}
810
811#endif /* !PCMCIA */
812
813
794/* PCI detection function: int fdomain_pci_bios_detect(int* irq, int* 814/* PCI detection function: int fdomain_pci_bios_detect(int* irq, int*
795 iobase) This function gets the Interrupt Level and I/O base address from 815 iobase) This function gets the Interrupt Level and I/O base address from
796 the PCI configuration registers. */ 816 the PCI configuration registers. */
@@ -1345,16 +1365,15 @@ static irqreturn_t do_fdomain_16x0_intr(int irq, void *dev_id)
1345 1365
1346#if ERRORS_ONLY 1366#if ERRORS_ONLY
1347 if (current_SC->cmnd[0] == REQUEST_SENSE && !current_SC->SCp.Status) { 1367 if (current_SC->cmnd[0] == REQUEST_SENSE && !current_SC->SCp.Status) {
1348 if ((unsigned char)(*((char *)current_SC->request_buffer+2)) & 0x0f) { 1368 char *buf = scsi_sglist(current_SC);
1369 if ((unsigned char)(*(buf + 2)) & 0x0f) {
1349 unsigned char key; 1370 unsigned char key;
1350 unsigned char code; 1371 unsigned char code;
1351 unsigned char qualifier; 1372 unsigned char qualifier;
1352 1373
1353 key = (unsigned char)(*((char *)current_SC->request_buffer + 2)) 1374 key = (unsigned char)(*(buf + 2)) & 0x0f;
1354 & 0x0f; 1375 code = (unsigned char)(*(buf + 12));
1355 code = (unsigned char)(*((char *)current_SC->request_buffer + 12)); 1376 qualifier = (unsigned char)(*(buf + 13));
1356 qualifier = (unsigned char)(*((char *)current_SC->request_buffer
1357 + 13));
1358 1377
1359 if (key != UNIT_ATTENTION 1378 if (key != UNIT_ATTENTION
1360 && !(key == NOT_READY 1379 && !(key == NOT_READY
@@ -1405,8 +1424,8 @@ static int fdomain_16x0_queue(struct scsi_cmnd *SCpnt,
1405 printk( "queue: target = %d cmnd = 0x%02x pieces = %d size = %u\n", 1424 printk( "queue: target = %d cmnd = 0x%02x pieces = %d size = %u\n",
1406 SCpnt->target, 1425 SCpnt->target,
1407 *(unsigned char *)SCpnt->cmnd, 1426 *(unsigned char *)SCpnt->cmnd,
1408 SCpnt->use_sg, 1427 scsi_sg_count(SCpnt),
1409 SCpnt->request_bufflen ); 1428 scsi_bufflen(SCpnt));
1410#endif 1429#endif
1411 1430
1412 fdomain_make_bus_idle(); 1431 fdomain_make_bus_idle();
@@ -1416,20 +1435,19 @@ static int fdomain_16x0_queue(struct scsi_cmnd *SCpnt,
1416 1435
1417 /* Initialize static data */ 1436 /* Initialize static data */
1418 1437
1419 if (current_SC->use_sg) { 1438 if (scsi_sg_count(current_SC)) {
1420 current_SC->SCp.buffer = 1439 current_SC->SCp.buffer = scsi_sglist(current_SC);
1421 (struct scatterlist *)current_SC->request_buffer; 1440 current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page)
1422 current_SC->SCp.ptr = page_address(current_SC->SCp.buffer->page) + current_SC->SCp.buffer->offset; 1441 + current_SC->SCp.buffer->offset;
1423 current_SC->SCp.this_residual = current_SC->SCp.buffer->length; 1442 current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
1424 current_SC->SCp.buffers_residual = current_SC->use_sg - 1; 1443 current_SC->SCp.buffers_residual = scsi_sg_count(current_SC) - 1;
1425 } else { 1444 } else {
1426 current_SC->SCp.ptr = (char *)current_SC->request_buffer; 1445 current_SC->SCp.ptr = 0;
1427 current_SC->SCp.this_residual = current_SC->request_bufflen; 1446 current_SC->SCp.this_residual = 0;
1428 current_SC->SCp.buffer = NULL; 1447 current_SC->SCp.buffer = NULL;
1429 current_SC->SCp.buffers_residual = 0; 1448 current_SC->SCp.buffers_residual = 0;
1430 } 1449 }
1431 1450
1432
1433 current_SC->SCp.Status = 0; 1451 current_SC->SCp.Status = 0;
1434 current_SC->SCp.Message = 0; 1452 current_SC->SCp.Message = 0;
1435 current_SC->SCp.have_data_in = 0; 1453 current_SC->SCp.have_data_in = 0;
@@ -1472,8 +1490,8 @@ static void print_info(struct scsi_cmnd *SCpnt)
1472 SCpnt->SCp.phase, 1490 SCpnt->SCp.phase,
1473 SCpnt->device->id, 1491 SCpnt->device->id,
1474 *(unsigned char *)SCpnt->cmnd, 1492 *(unsigned char *)SCpnt->cmnd,
1475 SCpnt->use_sg, 1493 scsi_sg_count(SCpnt),
1476 SCpnt->request_bufflen ); 1494 scsi_bufflen(SCpnt));
1477 printk( "sent_command = %d, have_data_in = %d, timeout = %d\n", 1495 printk( "sent_command = %d, have_data_in = %d, timeout = %d\n",
1478 SCpnt->SCp.sent_command, 1496 SCpnt->SCp.sent_command,
1479 SCpnt->SCp.have_data_in, 1497 SCpnt->SCp.have_data_in,
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 60446b88f721..d0b95ce0ba00 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -876,7 +876,7 @@ static int __init gdth_search_pci(gdth_pci_str *pcistr)
876/* Vortex only makes RAID controllers. 876/* Vortex only makes RAID controllers.
877 * We do not really want to specify all 550 ids here, so wildcard match. 877 * We do not really want to specify all 550 ids here, so wildcard match.
878 */ 878 */
879static struct pci_device_id gdthtable[] __attribute_used__ = { 879static struct pci_device_id gdthtable[] __maybe_unused = {
880 {PCI_VENDOR_ID_VORTEX,PCI_ANY_ID,PCI_ANY_ID, PCI_ANY_ID}, 880 {PCI_VENDOR_ID_VORTEX,PCI_ANY_ID,PCI_ANY_ID, PCI_ANY_ID},
881 {PCI_VENDOR_ID_INTEL,PCI_DEVICE_ID_INTEL_SRC,PCI_ANY_ID,PCI_ANY_ID}, 881 {PCI_VENDOR_ID_INTEL,PCI_DEVICE_ID_INTEL_SRC,PCI_ANY_ID,PCI_ANY_ID},
882 {PCI_VENDOR_ID_INTEL,PCI_DEVICE_ID_INTEL_SRC_XSCALE,PCI_ANY_ID,PCI_ANY_ID}, 882 {PCI_VENDOR_ID_INTEL,PCI_DEVICE_ID_INTEL_SRC_XSCALE,PCI_ANY_ID,PCI_ANY_ID},
@@ -1955,7 +1955,7 @@ static int __init gdth_search_drives(int hanum)
1955 for (j = 0; j < 12; ++j) 1955 for (j = 0; j < 12; ++j)
1956 rtc[j] = CMOS_READ(j); 1956 rtc[j] = CMOS_READ(j);
1957 } while (rtc[0] != CMOS_READ(0)); 1957 } while (rtc[0] != CMOS_READ(0));
1958 spin_lock_irqrestore(&rtc_lock, flags); 1958 spin_unlock_irqrestore(&rtc_lock, flags);
1959 TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(ulong32 *)&rtc[0], 1959 TRACE2(("gdth_search_drives(): RTC: %x/%x/%x\n",*(ulong32 *)&rtc[0],
1960 *(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8])); 1960 *(ulong32 *)&rtc[4], *(ulong32 *)&rtc[8]));
1961 /* 3. send to controller firmware */ 1961 /* 3. send to controller firmware */
diff --git a/drivers/scsi/ibmmca.c b/drivers/scsi/ibmmca.c
index 0e57fb6964d5..4275d1b04ced 100644
--- a/drivers/scsi/ibmmca.c
+++ b/drivers/scsi/ibmmca.c
@@ -31,14 +31,21 @@
31#include <linux/mca.h> 31#include <linux/mca.h>
32#include <linux/spinlock.h> 32#include <linux/spinlock.h>
33#include <linux/init.h> 33#include <linux/init.h>
34#include <linux/mca-legacy.h>
35 34
36#include <asm/system.h> 35#include <asm/system.h>
37#include <asm/io.h> 36#include <asm/io.h>
38 37
39#include "scsi.h" 38#include "scsi.h"
40#include <scsi/scsi_host.h> 39#include <scsi/scsi_host.h>
41#include "ibmmca.h" 40
41/* Common forward declarations for all Linux-versions: */
42static int ibmmca_queuecommand (Scsi_Cmnd *, void (*done) (Scsi_Cmnd *));
43static int ibmmca_abort (Scsi_Cmnd *);
44static int ibmmca_host_reset (Scsi_Cmnd *);
45static int ibmmca_biosparam (struct scsi_device *, struct block_device *, sector_t, int *);
46static int ibmmca_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start, off_t offset, int length, int inout);
47
48
42 49
43/* current version of this driver-source: */ 50/* current version of this driver-source: */
44#define IBMMCA_SCSI_DRIVER_VERSION "4.0b-ac" 51#define IBMMCA_SCSI_DRIVER_VERSION "4.0b-ac"
@@ -65,11 +72,11 @@
65#define IM_DEBUG_CMD_DEVICE TYPE_TAPE 72#define IM_DEBUG_CMD_DEVICE TYPE_TAPE
66 73
67/* relative addresses of hardware registers on a subsystem */ 74/* relative addresses of hardware registers on a subsystem */
68#define IM_CMD_REG(hi) (hosts[(hi)]->io_port) /*Command Interface, (4 bytes long) */ 75#define IM_CMD_REG(h) ((h)->io_port) /*Command Interface, (4 bytes long) */
69#define IM_ATTN_REG(hi) (hosts[(hi)]->io_port+4) /*Attention (1 byte) */ 76#define IM_ATTN_REG(h) ((h)->io_port+4) /*Attention (1 byte) */
70#define IM_CTR_REG(hi) (hosts[(hi)]->io_port+5) /*Basic Control (1 byte) */ 77#define IM_CTR_REG(h) ((h)->io_port+5) /*Basic Control (1 byte) */
71#define IM_INTR_REG(hi) (hosts[(hi)]->io_port+6) /*Interrupt Status (1 byte, r/o) */ 78#define IM_INTR_REG(h) ((h)->io_port+6) /*Interrupt Status (1 byte, r/o) */
72#define IM_STAT_REG(hi) (hosts[(hi)]->io_port+7) /*Basic Status (1 byte, read only) */ 79#define IM_STAT_REG(h) ((h)->io_port+7) /*Basic Status (1 byte, read only) */
73 80
74/* basic I/O-port of first adapter */ 81/* basic I/O-port of first adapter */
75#define IM_IO_PORT 0x3540 82#define IM_IO_PORT 0x3540
@@ -266,30 +273,36 @@ static int global_adapter_speed = 0; /* full speed by default */
266 if ((display_mode & LED_ACTIVITY)||(!display_mode)) \ 273 if ((display_mode & LED_ACTIVITY)||(!display_mode)) \
267 outb(inb(PS2_SYS_CTR) & 0x3f, PS2_SYS_CTR); } 274 outb(inb(PS2_SYS_CTR) & 0x3f, PS2_SYS_CTR); }
268 275
269/*list of supported subsystems */
270struct subsys_list_struct {
271 unsigned short mca_id;
272 char *description;
273};
274
275/* types of different supported hardware that goes to hostdata special */ 276/* types of different supported hardware that goes to hostdata special */
276#define IBM_SCSI2_FW 0 277#define IBM_SCSI2_FW 0
277#define IBM_7568_WCACHE 1 278#define IBM_7568_WCACHE 1
278#define IBM_EXP_UNIT 2 279#define IBM_EXP_UNIT 2
279#define IBM_SCSI_WCACHE 3 280#define IBM_SCSI_WCACHE 3
280#define IBM_SCSI 4 281#define IBM_SCSI 4
282#define IBM_INTEGSCSI 5
281 283
282/* other special flags for hostdata structure */ 284/* other special flags for hostdata structure */
283#define FORCED_DETECTION 100 285#define FORCED_DETECTION 100
284#define INTEGRATED_SCSI 101 286#define INTEGRATED_SCSI 101
285 287
286/* List of possible IBM-SCSI-adapters */ 288/* List of possible IBM-SCSI-adapters */
287static struct subsys_list_struct subsys_list[] = { 289static short ibmmca_id_table[] = {
288 {0x8efc, "IBM SCSI-2 F/W Adapter"}, /* special = 0 */ 290 0x8efc,
289 {0x8efd, "IBM 7568 Industrial Computer SCSI Adapter w/Cache"}, /* special = 1 */ 291 0x8efd,
290 {0x8ef8, "IBM Expansion Unit SCSI Controller"}, /* special = 2 */ 292 0x8ef8,
291 {0x8eff, "IBM SCSI Adapter w/Cache"}, /* special = 3 */ 293 0x8eff,
292 {0x8efe, "IBM SCSI Adapter"}, /* special = 4 */ 294 0x8efe,
295 /* No entry for integrated SCSI, that's part of the register */
296 0
297};
298
299static const char *ibmmca_description[] = {
300 "IBM SCSI-2 F/W Adapter", /* special = 0 */
301 "IBM 7568 Industrial Computer SCSI Adapter w/Cache", /* special = 1 */
302 "IBM Expansion Unit SCSI Controller", /* special = 2 */
303 "IBM SCSI Adapter w/Cache", /* special = 3 */
304 "IBM SCSI Adapter", /* special = 4 */
305 "IBM Integrated SCSI Controller", /* special = 5 */
293}; 306};
294 307
295/* Max number of logical devices (can be up from 0 to 14). 15 is the address 308/* Max number of logical devices (can be up from 0 to 14). 15 is the address
@@ -375,30 +388,30 @@ struct ibmmca_hostdata {
375}; 388};
376 389
377/* macros to access host data structure */ 390/* macros to access host data structure */
378#define subsystem_pun(hi) (hosts[(hi)]->this_id) 391#define subsystem_pun(h) ((h)->this_id)
379#define subsystem_maxid(hi) (hosts[(hi)]->max_id) 392#define subsystem_maxid(h) ((h)->max_id)
380#define ld(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_ld) 393#define ld(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_ld)
381#define get_ldn(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_get_ldn) 394#define get_ldn(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_get_ldn)
382#define get_scsi(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_get_scsi) 395#define get_scsi(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_get_scsi)
383#define local_checking_phase_flag(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_local_checking_phase_flag) 396#define local_checking_phase_flag(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_local_checking_phase_flag)
384#define got_interrupt(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_got_interrupt) 397#define got_interrupt(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_got_interrupt)
385#define stat_result(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_stat_result) 398#define stat_result(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_stat_result)
386#define reset_status(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_reset_status) 399#define reset_status(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_reset_status)
387#define last_scsi_command(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_last_scsi_command) 400#define last_scsi_command(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_command)
388#define last_scsi_type(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_last_scsi_type) 401#define last_scsi_type(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_type)
389#define last_scsi_blockcount(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_last_scsi_blockcount) 402#define last_scsi_blockcount(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_blockcount)
390#define last_scsi_logical_block(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_last_scsi_logical_block) 403#define last_scsi_logical_block(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_logical_block)
391#define last_scsi_type(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_last_scsi_type) 404#define last_scsi_type(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_last_scsi_type)
392#define next_ldn(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_next_ldn) 405#define next_ldn(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_next_ldn)
393#define IBM_DS(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_IBM_DS) 406#define IBM_DS(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_IBM_DS)
394#define special(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_special) 407#define special(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_special)
395#define subsystem_connector_size(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_connector_size) 408#define subsystem_connector_size(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_connector_size)
396#define adapter_speed(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_adapter_speed) 409#define adapter_speed(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_adapter_speed)
397#define pos2(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_pos[2]) 410#define pos2(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[2])
398#define pos3(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_pos[3]) 411#define pos3(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[3])
399#define pos4(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_pos[4]) 412#define pos4(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[4])
400#define pos5(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_pos[5]) 413#define pos5(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[5])
401#define pos6(hi) (((struct ibmmca_hostdata *) hosts[(hi)]->hostdata)->_pos[6]) 414#define pos6(h) (((struct ibmmca_hostdata *) (h)->hostdata)->_pos[6])
402 415
403/* Define a arbitrary number as subsystem-marker-type. This number is, as 416/* Define a arbitrary number as subsystem-marker-type. This number is, as
404 described in the ANSI-SCSI-standard, not occupied by other device-types. */ 417 described in the ANSI-SCSI-standard, not occupied by other device-types. */
@@ -459,11 +472,6 @@ MODULE_LICENSE("GPL");
459/*counter of concurrent disk read/writes, to turn on/off disk led */ 472/*counter of concurrent disk read/writes, to turn on/off disk led */
460static int disk_rw_in_progress = 0; 473static int disk_rw_in_progress = 0;
461 474
462/* host information */
463static int found = 0;
464static struct Scsi_Host *hosts[IM_MAX_HOSTS + 1] = {
465 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL
466};
467static unsigned int pos[8]; /* whole pos register-line for diagnosis */ 475static unsigned int pos[8]; /* whole pos register-line for diagnosis */
468/* Taking into account the additions, made by ZP Gu. 476/* Taking into account the additions, made by ZP Gu.
469 * This selects now the preset value from the configfile and 477 * This selects now the preset value from the configfile and
@@ -474,70 +482,68 @@ static char ibm_ansi_order = 1;
474static char ibm_ansi_order = 0; 482static char ibm_ansi_order = 0;
475#endif 483#endif
476 484
477static void issue_cmd(int, unsigned long, unsigned char); 485static void issue_cmd(struct Scsi_Host *, unsigned long, unsigned char);
478static void internal_done(Scsi_Cmnd * cmd); 486static void internal_done(Scsi_Cmnd * cmd);
479static void check_devices(int, int); 487static void check_devices(struct Scsi_Host *, int);
480static int immediate_assign(int, unsigned int, unsigned int, unsigned int, unsigned int); 488static int immediate_assign(struct Scsi_Host *, unsigned int, unsigned int, unsigned int, unsigned int);
481static int immediate_feature(int, unsigned int, unsigned int); 489static int immediate_feature(struct Scsi_Host *, unsigned int, unsigned int);
482#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET 490#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET
483static int immediate_reset(int, unsigned int); 491static int immediate_reset(struct Scsi_Host *, unsigned int);
484#endif 492#endif
485static int device_inquiry(int, int); 493static int device_inquiry(struct Scsi_Host *, int);
486static int read_capacity(int, int); 494static int read_capacity(struct Scsi_Host *, int);
487static int get_pos_info(int); 495static int get_pos_info(struct Scsi_Host *);
488static char *ti_p(int); 496static char *ti_p(int);
489static char *ti_l(int); 497static char *ti_l(int);
490static char *ibmrate(unsigned int, int); 498static char *ibmrate(unsigned int, int);
491static int probe_display(int); 499static int probe_display(int);
492static int probe_bus_mode(int); 500static int probe_bus_mode(struct Scsi_Host *);
493static int device_exists(int, int, int *, int *); 501static int device_exists(struct Scsi_Host *, int, int *, int *);
494static struct Scsi_Host *ibmmca_register(struct scsi_host_template *, int, int, int, char *);
495static int option_setup(char *); 502static int option_setup(char *);
496/* local functions needed for proc_info */ 503/* local functions needed for proc_info */
497static int ldn_access_load(int, int); 504static int ldn_access_load(struct Scsi_Host *, int);
498static int ldn_access_total_read_write(int); 505static int ldn_access_total_read_write(struct Scsi_Host *);
499 506
500static irqreturn_t interrupt_handler(int irq, void *dev_id) 507static irqreturn_t interrupt_handler(int irq, void *dev_id)
501{ 508{
502 int host_index, ihost_index;
503 unsigned int intr_reg; 509 unsigned int intr_reg;
504 unsigned int cmd_result; 510 unsigned int cmd_result;
505 unsigned int ldn; 511 unsigned int ldn;
512 unsigned long flags;
506 Scsi_Cmnd *cmd; 513 Scsi_Cmnd *cmd;
507 int lastSCSI; 514 int lastSCSI;
508 struct Scsi_Host *dev = dev_id; 515 struct device *dev = dev_id;
516 struct Scsi_Host *shpnt = dev_get_drvdata(dev);
509 517
510 spin_lock(dev->host_lock); 518 spin_lock_irqsave(shpnt->host_lock, flags);
511 /* search for one adapter-response on shared interrupt */ 519
512 for (host_index = 0; hosts[host_index] && !(inb(IM_STAT_REG(host_index)) & IM_INTR_REQUEST); host_index++); 520 if(!(inb(IM_STAT_REG(shpnt)) & IM_INTR_REQUEST)) {
513 /* return if some other device on this IRQ caused the interrupt */ 521 spin_unlock_irqrestore(shpnt->host_lock, flags);
514 if (!hosts[host_index]) {
515 spin_unlock(dev->host_lock);
516 return IRQ_NONE; 522 return IRQ_NONE;
517 } 523 }
518 524
519 /* the reset-function already did all the job, even ints got 525 /* the reset-function already did all the job, even ints got
520 renabled on the subsystem, so just return */ 526 renabled on the subsystem, so just return */
521 if ((reset_status(host_index) == IM_RESET_NOT_IN_PROGRESS_NO_INT) || (reset_status(host_index) == IM_RESET_FINISHED_OK_NO_INT)) { 527 if ((reset_status(shpnt) == IM_RESET_NOT_IN_PROGRESS_NO_INT) || (reset_status(shpnt) == IM_RESET_FINISHED_OK_NO_INT)) {
522 reset_status(host_index) = IM_RESET_NOT_IN_PROGRESS; 528 reset_status(shpnt) = IM_RESET_NOT_IN_PROGRESS;
523 spin_unlock(dev->host_lock); 529 spin_unlock_irqrestore(shpnt->host_lock, flags);
524 return IRQ_HANDLED; 530 return IRQ_HANDLED;
525 } 531 }
526 532
527 /*must wait for attention reg not busy, then send EOI to subsystem */ 533 /*must wait for attention reg not busy, then send EOI to subsystem */
528 while (1) { 534 while (1) {
529 if (!(inb(IM_STAT_REG(host_index)) & IM_BUSY)) 535 if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
530 break; 536 break;
531 cpu_relax(); 537 cpu_relax();
532 } 538 }
533 ihost_index = host_index; 539
534 /*get command result and logical device */ 540 /*get command result and logical device */
535 intr_reg = (unsigned char) (inb(IM_INTR_REG(ihost_index))); 541 intr_reg = (unsigned char) (inb(IM_INTR_REG(shpnt)));
536 cmd_result = intr_reg & 0xf0; 542 cmd_result = intr_reg & 0xf0;
537 ldn = intr_reg & 0x0f; 543 ldn = intr_reg & 0x0f;
538 /* get the last_scsi_command here */ 544 /* get the last_scsi_command here */
539 lastSCSI = last_scsi_command(ihost_index)[ldn]; 545 lastSCSI = last_scsi_command(shpnt)[ldn];
540 outb(IM_EOI | ldn, IM_ATTN_REG(ihost_index)); 546 outb(IM_EOI | ldn, IM_ATTN_REG(shpnt));
541 547
542 /*these should never happen (hw fails, or a local programming bug) */ 548 /*these should never happen (hw fails, or a local programming bug) */
543 if (!global_command_error_excuse) { 549 if (!global_command_error_excuse) {
@@ -547,38 +553,38 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
547 case IM_SOFTWARE_SEQUENCING_ERROR: 553 case IM_SOFTWARE_SEQUENCING_ERROR:
548 case IM_CMD_ERROR: 554 case IM_CMD_ERROR:
549 printk(KERN_ERR "IBM MCA SCSI: Fatal Subsystem ERROR!\n"); 555 printk(KERN_ERR "IBM MCA SCSI: Fatal Subsystem ERROR!\n");
550 printk(KERN_ERR " Last cmd=0x%x, ena=%x, len=", lastSCSI, ld(ihost_index)[ldn].scb.enable); 556 printk(KERN_ERR " Last cmd=0x%x, ena=%x, len=", lastSCSI, ld(shpnt)[ldn].scb.enable);
551 if (ld(ihost_index)[ldn].cmd) 557 if (ld(shpnt)[ldn].cmd)
552 printk("%ld/%ld,", (long) (ld(ihost_index)[ldn].cmd->request_bufflen), (long) (ld(ihost_index)[ldn].scb.sys_buf_length)); 558 printk("%ld/%ld,", (long) (scsi_bufflen(ld(shpnt)[ldn].cmd)), (long) (ld(shpnt)[ldn].scb.sys_buf_length));
553 else 559 else
554 printk("none,"); 560 printk("none,");
555 if (ld(ihost_index)[ldn].cmd) 561 if (ld(shpnt)[ldn].cmd)
556 printk("Blocksize=%d", ld(ihost_index)[ldn].scb.u2.blk.length); 562 printk("Blocksize=%d", ld(shpnt)[ldn].scb.u2.blk.length);
557 else 563 else
558 printk("Blocksize=none"); 564 printk("Blocksize=none");
559 printk(", host=0x%x, ldn=0x%x\n", ihost_index, ldn); 565 printk(", host=%p, ldn=0x%x\n", shpnt, ldn);
560 if (ld(ihost_index)[ldn].cmd) { 566 if (ld(shpnt)[ldn].cmd) {
561 printk(KERN_ERR "Blockcount=%d/%d\n", last_scsi_blockcount(ihost_index)[ldn], ld(ihost_index)[ldn].scb.u2.blk.count); 567 printk(KERN_ERR "Blockcount=%d/%d\n", last_scsi_blockcount(shpnt)[ldn], ld(shpnt)[ldn].scb.u2.blk.count);
562 printk(KERN_ERR "Logical block=%lx/%lx\n", last_scsi_logical_block(ihost_index)[ldn], ld(ihost_index)[ldn].scb.u1.log_blk_adr); 568 printk(KERN_ERR "Logical block=%lx/%lx\n", last_scsi_logical_block(shpnt)[ldn], ld(shpnt)[ldn].scb.u1.log_blk_adr);
563 } 569 }
564 printk(KERN_ERR "Reason given: %s\n", (cmd_result == IM_ADAPTER_HW_FAILURE) ? "HARDWARE FAILURE" : (cmd_result == IM_SOFTWARE_SEQUENCING_ERROR) ? "SOFTWARE SEQUENCING ERROR" : (cmd_result == IM_CMD_ERROR) ? "COMMAND ERROR" : "UNKNOWN"); 570 printk(KERN_ERR "Reason given: %s\n", (cmd_result == IM_ADAPTER_HW_FAILURE) ? "HARDWARE FAILURE" : (cmd_result == IM_SOFTWARE_SEQUENCING_ERROR) ? "SOFTWARE SEQUENCING ERROR" : (cmd_result == IM_CMD_ERROR) ? "COMMAND ERROR" : "UNKNOWN");
565 /* if errors appear, enter this section to give detailed info */ 571 /* if errors appear, enter this section to give detailed info */
566 printk(KERN_ERR "IBM MCA SCSI: Subsystem Error-Status follows:\n"); 572 printk(KERN_ERR "IBM MCA SCSI: Subsystem Error-Status follows:\n");
567 printk(KERN_ERR " Command Type................: %x\n", last_scsi_type(ihost_index)[ldn]); 573 printk(KERN_ERR " Command Type................: %x\n", last_scsi_type(shpnt)[ldn]);
568 printk(KERN_ERR " Attention Register..........: %x\n", inb(IM_ATTN_REG(ihost_index))); 574 printk(KERN_ERR " Attention Register..........: %x\n", inb(IM_ATTN_REG(shpnt)));
569 printk(KERN_ERR " Basic Control Register......: %x\n", inb(IM_CTR_REG(ihost_index))); 575 printk(KERN_ERR " Basic Control Register......: %x\n", inb(IM_CTR_REG(shpnt)));
570 printk(KERN_ERR " Interrupt Status Register...: %x\n", intr_reg); 576 printk(KERN_ERR " Interrupt Status Register...: %x\n", intr_reg);
571 printk(KERN_ERR " Basic Status Register.......: %x\n", inb(IM_STAT_REG(ihost_index))); 577 printk(KERN_ERR " Basic Status Register.......: %x\n", inb(IM_STAT_REG(shpnt)));
572 if ((last_scsi_type(ihost_index)[ldn] == IM_SCB) || (last_scsi_type(ihost_index)[ldn] == IM_LONG_SCB)) { 578 if ((last_scsi_type(shpnt)[ldn] == IM_SCB) || (last_scsi_type(shpnt)[ldn] == IM_LONG_SCB)) {
573 printk(KERN_ERR " SCB-Command.................: %x\n", ld(ihost_index)[ldn].scb.command); 579 printk(KERN_ERR " SCB-Command.................: %x\n", ld(shpnt)[ldn].scb.command);
574 printk(KERN_ERR " SCB-Enable..................: %x\n", ld(ihost_index)[ldn].scb.enable); 580 printk(KERN_ERR " SCB-Enable..................: %x\n", ld(shpnt)[ldn].scb.enable);
575 printk(KERN_ERR " SCB-logical block address...: %lx\n", ld(ihost_index)[ldn].scb.u1.log_blk_adr); 581 printk(KERN_ERR " SCB-logical block address...: %lx\n", ld(shpnt)[ldn].scb.u1.log_blk_adr);
576 printk(KERN_ERR " SCB-system buffer address...: %lx\n", ld(ihost_index)[ldn].scb.sys_buf_adr); 582 printk(KERN_ERR " SCB-system buffer address...: %lx\n", ld(shpnt)[ldn].scb.sys_buf_adr);
577 printk(KERN_ERR " SCB-system buffer length....: %lx\n", ld(ihost_index)[ldn].scb.sys_buf_length); 583 printk(KERN_ERR " SCB-system buffer length....: %lx\n", ld(shpnt)[ldn].scb.sys_buf_length);
578 printk(KERN_ERR " SCB-tsb address.............: %lx\n", ld(ihost_index)[ldn].scb.tsb_adr); 584 printk(KERN_ERR " SCB-tsb address.............: %lx\n", ld(shpnt)[ldn].scb.tsb_adr);
579 printk(KERN_ERR " SCB-Chain address...........: %lx\n", ld(ihost_index)[ldn].scb.scb_chain_adr); 585 printk(KERN_ERR " SCB-Chain address...........: %lx\n", ld(shpnt)[ldn].scb.scb_chain_adr);
580 printk(KERN_ERR " SCB-block count.............: %x\n", ld(ihost_index)[ldn].scb.u2.blk.count); 586 printk(KERN_ERR " SCB-block count.............: %x\n", ld(shpnt)[ldn].scb.u2.blk.count);
581 printk(KERN_ERR " SCB-block length............: %x\n", ld(ihost_index)[ldn].scb.u2.blk.length); 587 printk(KERN_ERR " SCB-block length............: %x\n", ld(shpnt)[ldn].scb.u2.blk.length);
582 } 588 }
583 printk(KERN_ERR " Send this report to the maintainer.\n"); 589 printk(KERN_ERR " Send this report to the maintainer.\n");
584 panic("IBM MCA SCSI: Fatal error message from the subsystem (0x%X,0x%X)!\n", lastSCSI, cmd_result); 590 panic("IBM MCA SCSI: Fatal error message from the subsystem (0x%X,0x%X)!\n", lastSCSI, cmd_result);
@@ -600,72 +606,73 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
600 } 606 }
601 } 607 }
602 /* if no panic appeared, increase the interrupt-counter */ 608 /* if no panic appeared, increase the interrupt-counter */
603 IBM_DS(ihost_index).total_interrupts++; 609 IBM_DS(shpnt).total_interrupts++;
604 /*only for local checking phase */ 610 /*only for local checking phase */
605 if (local_checking_phase_flag(ihost_index)) { 611 if (local_checking_phase_flag(shpnt)) {
606 stat_result(ihost_index) = cmd_result; 612 stat_result(shpnt) = cmd_result;
607 got_interrupt(ihost_index) = 1; 613 got_interrupt(shpnt) = 1;
608 reset_status(ihost_index) = IM_RESET_FINISHED_OK; 614 reset_status(shpnt) = IM_RESET_FINISHED_OK;
609 last_scsi_command(ihost_index)[ldn] = NO_SCSI; 615 last_scsi_command(shpnt)[ldn] = NO_SCSI;
610 spin_unlock(dev->host_lock); 616 spin_unlock_irqrestore(shpnt->host_lock, flags);
611 return IRQ_HANDLED; 617 return IRQ_HANDLED;
612 } 618 }
613 /* handling of commands coming from upper level of scsi driver */ 619 /* handling of commands coming from upper level of scsi driver */
614 if (last_scsi_type(ihost_index)[ldn] == IM_IMM_CMD) { 620 if (last_scsi_type(shpnt)[ldn] == IM_IMM_CMD) {
615 /* verify ldn, and may handle rare reset immediate command */ 621 /* verify ldn, and may handle rare reset immediate command */
616 if ((reset_status(ihost_index) == IM_RESET_IN_PROGRESS) && (last_scsi_command(ihost_index)[ldn] == IM_RESET_IMM_CMD)) { 622 if ((reset_status(shpnt) == IM_RESET_IN_PROGRESS) && (last_scsi_command(shpnt)[ldn] == IM_RESET_IMM_CMD)) {
617 if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE) { 623 if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE) {
618 disk_rw_in_progress = 0; 624 disk_rw_in_progress = 0;
619 PS2_DISK_LED_OFF(); 625 PS2_DISK_LED_OFF();
620 reset_status(ihost_index) = IM_RESET_FINISHED_FAIL; 626 reset_status(shpnt) = IM_RESET_FINISHED_FAIL;
621 } else { 627 } else {
622 /*reset disk led counter, turn off disk led */ 628 /*reset disk led counter, turn off disk led */
623 disk_rw_in_progress = 0; 629 disk_rw_in_progress = 0;
624 PS2_DISK_LED_OFF(); 630 PS2_DISK_LED_OFF();
625 reset_status(ihost_index) = IM_RESET_FINISHED_OK; 631 reset_status(shpnt) = IM_RESET_FINISHED_OK;
626 } 632 }
627 stat_result(ihost_index) = cmd_result; 633 stat_result(shpnt) = cmd_result;
628 last_scsi_command(ihost_index)[ldn] = NO_SCSI; 634 last_scsi_command(shpnt)[ldn] = NO_SCSI;
629 last_scsi_type(ihost_index)[ldn] = 0; 635 last_scsi_type(shpnt)[ldn] = 0;
630 spin_unlock(dev->host_lock); 636 spin_unlock_irqrestore(shpnt->host_lock, flags);
631 return IRQ_HANDLED; 637 return IRQ_HANDLED;
632 } else if (last_scsi_command(ihost_index)[ldn] == IM_ABORT_IMM_CMD) { 638 } else if (last_scsi_command(shpnt)[ldn] == IM_ABORT_IMM_CMD) {
633 /* react on SCSI abort command */ 639 /* react on SCSI abort command */
634#ifdef IM_DEBUG_PROBE 640#ifdef IM_DEBUG_PROBE
635 printk("IBM MCA SCSI: Interrupt from SCSI-abort.\n"); 641 printk("IBM MCA SCSI: Interrupt from SCSI-abort.\n");
636#endif 642#endif
637 disk_rw_in_progress = 0; 643 disk_rw_in_progress = 0;
638 PS2_DISK_LED_OFF(); 644 PS2_DISK_LED_OFF();
639 cmd = ld(ihost_index)[ldn].cmd; 645 cmd = ld(shpnt)[ldn].cmd;
640 ld(ihost_index)[ldn].cmd = NULL; 646 ld(shpnt)[ldn].cmd = NULL;
641 if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE) 647 if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE)
642 cmd->result = DID_NO_CONNECT << 16; 648 cmd->result = DID_NO_CONNECT << 16;
643 else 649 else
644 cmd->result = DID_ABORT << 16; 650 cmd->result = DID_ABORT << 16;
645 stat_result(ihost_index) = cmd_result; 651 stat_result(shpnt) = cmd_result;
646 last_scsi_command(ihost_index)[ldn] = NO_SCSI; 652 last_scsi_command(shpnt)[ldn] = NO_SCSI;
647 last_scsi_type(ihost_index)[ldn] = 0; 653 last_scsi_type(shpnt)[ldn] = 0;
648 if (cmd->scsi_done) 654 if (cmd->scsi_done)
649 (cmd->scsi_done) (cmd); /* should be the internal_done */ 655 (cmd->scsi_done) (cmd); /* should be the internal_done */
650 spin_unlock(dev->host_lock); 656 spin_unlock_irqrestore(shpnt->host_lock, flags);
651 return IRQ_HANDLED; 657 return IRQ_HANDLED;
652 } else { 658 } else {
653 disk_rw_in_progress = 0; 659 disk_rw_in_progress = 0;
654 PS2_DISK_LED_OFF(); 660 PS2_DISK_LED_OFF();
655 reset_status(ihost_index) = IM_RESET_FINISHED_OK; 661 reset_status(shpnt) = IM_RESET_FINISHED_OK;
656 stat_result(ihost_index) = cmd_result; 662 stat_result(shpnt) = cmd_result;
657 last_scsi_command(ihost_index)[ldn] = NO_SCSI; 663 last_scsi_command(shpnt)[ldn] = NO_SCSI;
658 spin_unlock(dev->host_lock); 664 spin_unlock_irqrestore(shpnt->host_lock, flags);
659 return IRQ_HANDLED; 665 return IRQ_HANDLED;
660 } 666 }
661 } 667 }
662 last_scsi_command(ihost_index)[ldn] = NO_SCSI; 668 last_scsi_command(shpnt)[ldn] = NO_SCSI;
663 last_scsi_type(ihost_index)[ldn] = 0; 669 last_scsi_type(shpnt)[ldn] = 0;
664 cmd = ld(ihost_index)[ldn].cmd; 670 cmd = ld(shpnt)[ldn].cmd;
665 ld(ihost_index)[ldn].cmd = NULL; 671 ld(shpnt)[ldn].cmd = NULL;
666#ifdef IM_DEBUG_TIMEOUT 672#ifdef IM_DEBUG_TIMEOUT
667 if (cmd) { 673 if (cmd) {
668 if ((cmd->target == TIMEOUT_PUN) && (cmd->device->lun == TIMEOUT_LUN)) { 674 if ((cmd->target == TIMEOUT_PUN) && (cmd->device->lun == TIMEOUT_LUN)) {
675 spin_unlock_irqsave(shpnt->host_lock, flags);
669 printk("IBM MCA SCSI: Ignoring interrupt from pun=%x, lun=%x.\n", cmd->target, cmd->device->lun); 676 printk("IBM MCA SCSI: Ignoring interrupt from pun=%x, lun=%x.\n", cmd->target, cmd->device->lun);
670 return IRQ_HANDLED; 677 return IRQ_HANDLED;
671 } 678 }
@@ -674,15 +681,15 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
674 /*if no command structure, just return, else clear cmd */ 681 /*if no command structure, just return, else clear cmd */
675 if (!cmd) 682 if (!cmd)
676 { 683 {
677 spin_unlock(dev->host_lock); 684 spin_unlock_irqrestore(shpnt->host_lock, flags);
678 return IRQ_HANDLED; 685 return IRQ_HANDLED;
679 } 686 }
680 687
681#ifdef IM_DEBUG_INT 688#ifdef IM_DEBUG_INT
682 printk("cmd=%02x ireg=%02x ds=%02x cs=%02x de=%02x ce=%02x\n", cmd->cmnd[0], intr_reg, ld(ihost_index)[ldn].tsb.dev_status, ld(ihost_index)[ldn].tsb.cmd_status, ld(ihost_index)[ldn].tsb.dev_error, ld(ihost_index)[ldn].tsb.cmd_error); 689 printk("cmd=%02x ireg=%02x ds=%02x cs=%02x de=%02x ce=%02x\n", cmd->cmnd[0], intr_reg, ld(shpnt)[ldn].tsb.dev_status, ld(shpnt)[ldn].tsb.cmd_status, ld(shpnt)[ldn].tsb.dev_error, ld(shpnt)[ldn].tsb.cmd_error);
683#endif 690#endif
684 /*if this is end of media read/write, may turn off PS/2 disk led */ 691 /*if this is end of media read/write, may turn off PS/2 disk led */
685 if ((ld(ihost_index)[ldn].device_type != TYPE_NO_LUN) && (ld(ihost_index)[ldn].device_type != TYPE_NO_DEVICE)) { 692 if ((ld(shpnt)[ldn].device_type != TYPE_NO_LUN) && (ld(shpnt)[ldn].device_type != TYPE_NO_DEVICE)) {
686 /* only access this, if there was a valid device addressed */ 693 /* only access this, if there was a valid device addressed */
687 if (--disk_rw_in_progress == 0) 694 if (--disk_rw_in_progress == 0)
688 PS2_DISK_LED_OFF(); 695 PS2_DISK_LED_OFF();
@@ -693,8 +700,8 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
693 * adapters do not support CMD_TERMINATED, TASK_SET_FULL and 700 * adapters do not support CMD_TERMINATED, TASK_SET_FULL and
694 * ACA_ACTIVE as returning statusbyte information. (ML) */ 701 * ACA_ACTIVE as returning statusbyte information. (ML) */
695 if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE) { 702 if (cmd_result == IM_CMD_COMPLETED_WITH_FAILURE) {
696 cmd->result = (unsigned char) (ld(ihost_index)[ldn].tsb.dev_status & 0x1e); 703 cmd->result = (unsigned char) (ld(shpnt)[ldn].tsb.dev_status & 0x1e);
697 IBM_DS(ihost_index).total_errors++; 704 IBM_DS(shpnt).total_errors++;
698 } else 705 } else
699 cmd->result = 0; 706 cmd->result = 0;
700 /* write device status into cmd->result, and call done function */ 707 /* write device status into cmd->result, and call done function */
@@ -705,24 +712,25 @@ static irqreturn_t interrupt_handler(int irq, void *dev_id)
705 cmd->result |= DID_OK << 16; 712 cmd->result |= DID_OK << 16;
706 if (cmd->scsi_done) 713 if (cmd->scsi_done)
707 (cmd->scsi_done) (cmd); 714 (cmd->scsi_done) (cmd);
708 spin_unlock(dev->host_lock); 715 spin_unlock_irqrestore(shpnt->host_lock, flags);
709 return IRQ_HANDLED; 716 return IRQ_HANDLED;
710} 717}
711 718
712static void issue_cmd(int host_index, unsigned long cmd_reg, unsigned char attn_reg) 719static void issue_cmd(struct Scsi_Host *shpnt, unsigned long cmd_reg,
720 unsigned char attn_reg)
713{ 721{
714 unsigned long flags; 722 unsigned long flags;
715 /* must wait for attention reg not busy */ 723 /* must wait for attention reg not busy */
716 while (1) { 724 while (1) {
717 spin_lock_irqsave(hosts[host_index]->host_lock, flags); 725 spin_lock_irqsave(shpnt->host_lock, flags);
718 if (!(inb(IM_STAT_REG(host_index)) & IM_BUSY)) 726 if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
719 break; 727 break;
720 spin_unlock_irqrestore(hosts[host_index]->host_lock, flags); 728 spin_unlock_irqrestore(shpnt->host_lock, flags);
721 } 729 }
722 /* write registers and enable system interrupts */ 730 /* write registers and enable system interrupts */
723 outl(cmd_reg, IM_CMD_REG(host_index)); 731 outl(cmd_reg, IM_CMD_REG(shpnt));
724 outb(attn_reg, IM_ATTN_REG(host_index)); 732 outb(attn_reg, IM_ATTN_REG(shpnt));
725 spin_unlock_irqrestore(hosts[host_index]->host_lock, flags); 733 spin_unlock_irqrestore(shpnt->host_lock, flags);
726} 734}
727 735
728static void internal_done(Scsi_Cmnd * cmd) 736static void internal_done(Scsi_Cmnd * cmd)
@@ -732,34 +740,34 @@ static void internal_done(Scsi_Cmnd * cmd)
732} 740}
733 741
734/* SCSI-SCB-command for device_inquiry */ 742/* SCSI-SCB-command for device_inquiry */
735static int device_inquiry(int host_index, int ldn) 743static int device_inquiry(struct Scsi_Host *shpnt, int ldn)
736{ 744{
737 int retr; 745 int retr;
738 struct im_scb *scb; 746 struct im_scb *scb;
739 struct im_tsb *tsb; 747 struct im_tsb *tsb;
740 unsigned char *buf; 748 unsigned char *buf;
741 749
742 scb = &(ld(host_index)[ldn].scb); 750 scb = &(ld(shpnt)[ldn].scb);
743 tsb = &(ld(host_index)[ldn].tsb); 751 tsb = &(ld(shpnt)[ldn].tsb);
744 buf = (unsigned char *) (&(ld(host_index)[ldn].buf)); 752 buf = (unsigned char *) (&(ld(shpnt)[ldn].buf));
745 ld(host_index)[ldn].tsb.dev_status = 0; /* prepare statusblock */ 753 ld(shpnt)[ldn].tsb.dev_status = 0; /* prepare statusblock */
746 for (retr = 0; retr < 3; retr++) { 754 for (retr = 0; retr < 3; retr++) {
747 /* fill scb with inquiry command */ 755 /* fill scb with inquiry command */
748 scb->command = IM_DEVICE_INQUIRY_CMD | IM_NO_DISCONNECT; 756 scb->command = IM_DEVICE_INQUIRY_CMD | IM_NO_DISCONNECT;
749 scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_RETRY_ENABLE | IM_BYPASS_BUFFER; 757 scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
750 last_scsi_command(host_index)[ldn] = IM_DEVICE_INQUIRY_CMD; 758 last_scsi_command(shpnt)[ldn] = IM_DEVICE_INQUIRY_CMD;
751 last_scsi_type(host_index)[ldn] = IM_SCB; 759 last_scsi_type(shpnt)[ldn] = IM_SCB;
752 scb->sys_buf_adr = isa_virt_to_bus(buf); 760 scb->sys_buf_adr = isa_virt_to_bus(buf);
753 scb->sys_buf_length = 255; /* maximum bufferlength gives max info */ 761 scb->sys_buf_length = 255; /* maximum bufferlength gives max info */
754 scb->tsb_adr = isa_virt_to_bus(tsb); 762 scb->tsb_adr = isa_virt_to_bus(tsb);
755 /* issue scb to passed ldn, and busy wait for interrupt */ 763 /* issue scb to passed ldn, and busy wait for interrupt */
756 got_interrupt(host_index) = 0; 764 got_interrupt(shpnt) = 0;
757 issue_cmd(host_index, isa_virt_to_bus(scb), IM_SCB | ldn); 765 issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | ldn);
758 while (!got_interrupt(host_index)) 766 while (!got_interrupt(shpnt))
759 barrier(); 767 barrier();
760 768
761 /*if command successful, break */ 769 /*if command successful, break */
762 if ((stat_result(host_index) == IM_SCB_CMD_COMPLETED) || (stat_result(host_index) == IM_SCB_CMD_COMPLETED_WITH_RETRIES)) 770 if ((stat_result(shpnt) == IM_SCB_CMD_COMPLETED) || (stat_result(shpnt) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
763 return 1; 771 return 1;
764 } 772 }
765 /*if all three retries failed, return "no device at this ldn" */ 773 /*if all three retries failed, return "no device at this ldn" */
@@ -769,34 +777,34 @@ static int device_inquiry(int host_index, int ldn)
769 return 1; 777 return 1;
770} 778}
771 779
772static int read_capacity(int host_index, int ldn) 780static int read_capacity(struct Scsi_Host *shpnt, int ldn)
773{ 781{
774 int retr; 782 int retr;
775 struct im_scb *scb; 783 struct im_scb *scb;
776 struct im_tsb *tsb; 784 struct im_tsb *tsb;
777 unsigned char *buf; 785 unsigned char *buf;
778 786
779 scb = &(ld(host_index)[ldn].scb); 787 scb = &(ld(shpnt)[ldn].scb);
780 tsb = &(ld(host_index)[ldn].tsb); 788 tsb = &(ld(shpnt)[ldn].tsb);
781 buf = (unsigned char *) (&(ld(host_index)[ldn].buf)); 789 buf = (unsigned char *) (&(ld(shpnt)[ldn].buf));
782 ld(host_index)[ldn].tsb.dev_status = 0; 790 ld(shpnt)[ldn].tsb.dev_status = 0;
783 for (retr = 0; retr < 3; retr++) { 791 for (retr = 0; retr < 3; retr++) {
784 /*fill scb with read capacity command */ 792 /*fill scb with read capacity command */
785 scb->command = IM_READ_CAPACITY_CMD; 793 scb->command = IM_READ_CAPACITY_CMD;
786 scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_READ_CONTROL | IM_RETRY_ENABLE | IM_BYPASS_BUFFER; 794 scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_READ_CONTROL | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
787 last_scsi_command(host_index)[ldn] = IM_READ_CAPACITY_CMD; 795 last_scsi_command(shpnt)[ldn] = IM_READ_CAPACITY_CMD;
788 last_scsi_type(host_index)[ldn] = IM_SCB; 796 last_scsi_type(shpnt)[ldn] = IM_SCB;
789 scb->sys_buf_adr = isa_virt_to_bus(buf); 797 scb->sys_buf_adr = isa_virt_to_bus(buf);
790 scb->sys_buf_length = 8; 798 scb->sys_buf_length = 8;
791 scb->tsb_adr = isa_virt_to_bus(tsb); 799 scb->tsb_adr = isa_virt_to_bus(tsb);
792 /*issue scb to passed ldn, and busy wait for interrupt */ 800 /*issue scb to passed ldn, and busy wait for interrupt */
793 got_interrupt(host_index) = 0; 801 got_interrupt(shpnt) = 0;
794 issue_cmd(host_index, isa_virt_to_bus(scb), IM_SCB | ldn); 802 issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | ldn);
795 while (!got_interrupt(host_index)) 803 while (!got_interrupt(shpnt))
796 barrier(); 804 barrier();
797 805
798 /*if got capacity, get block length and return one device found */ 806 /*if got capacity, get block length and return one device found */
799 if ((stat_result(host_index) == IM_SCB_CMD_COMPLETED) || (stat_result(host_index) == IM_SCB_CMD_COMPLETED_WITH_RETRIES)) 807 if ((stat_result(shpnt) == IM_SCB_CMD_COMPLETED) || (stat_result(shpnt) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
800 return 1; 808 return 1;
801 } 809 }
802 /*if all three retries failed, return "no device at this ldn" */ 810 /*if all three retries failed, return "no device at this ldn" */
@@ -806,39 +814,39 @@ static int read_capacity(int host_index, int ldn)
806 return 1; 814 return 1;
807} 815}
808 816
809static int get_pos_info(int host_index) 817static int get_pos_info(struct Scsi_Host *shpnt)
810{ 818{
811 int retr; 819 int retr;
812 struct im_scb *scb; 820 struct im_scb *scb;
813 struct im_tsb *tsb; 821 struct im_tsb *tsb;
814 unsigned char *buf; 822 unsigned char *buf;
815 823
816 scb = &(ld(host_index)[MAX_LOG_DEV].scb); 824 scb = &(ld(shpnt)[MAX_LOG_DEV].scb);
817 tsb = &(ld(host_index)[MAX_LOG_DEV].tsb); 825 tsb = &(ld(shpnt)[MAX_LOG_DEV].tsb);
818 buf = (unsigned char *) (&(ld(host_index)[MAX_LOG_DEV].buf)); 826 buf = (unsigned char *) (&(ld(shpnt)[MAX_LOG_DEV].buf));
819 ld(host_index)[MAX_LOG_DEV].tsb.dev_status = 0; 827 ld(shpnt)[MAX_LOG_DEV].tsb.dev_status = 0;
820 for (retr = 0; retr < 3; retr++) { 828 for (retr = 0; retr < 3; retr++) {
821 /*fill scb with get_pos_info command */ 829 /*fill scb with get_pos_info command */
822 scb->command = IM_GET_POS_INFO_CMD; 830 scb->command = IM_GET_POS_INFO_CMD;
823 scb->enable = IM_READ_CONTROL | IM_REPORT_TSB_ONLY_ON_ERROR | IM_RETRY_ENABLE | IM_BYPASS_BUFFER; 831 scb->enable = IM_READ_CONTROL | IM_REPORT_TSB_ONLY_ON_ERROR | IM_RETRY_ENABLE | IM_BYPASS_BUFFER;
824 last_scsi_command(host_index)[MAX_LOG_DEV] = IM_GET_POS_INFO_CMD; 832 last_scsi_command(shpnt)[MAX_LOG_DEV] = IM_GET_POS_INFO_CMD;
825 last_scsi_type(host_index)[MAX_LOG_DEV] = IM_SCB; 833 last_scsi_type(shpnt)[MAX_LOG_DEV] = IM_SCB;
826 scb->sys_buf_adr = isa_virt_to_bus(buf); 834 scb->sys_buf_adr = isa_virt_to_bus(buf);
827 if (special(host_index) == IBM_SCSI2_FW) 835 if (special(shpnt) == IBM_SCSI2_FW)
828 scb->sys_buf_length = 256; /* get all info from F/W adapter */ 836 scb->sys_buf_length = 256; /* get all info from F/W adapter */
829 else 837 else
830 scb->sys_buf_length = 18; /* get exactly 18 bytes for other SCSI */ 838 scb->sys_buf_length = 18; /* get exactly 18 bytes for other SCSI */
831 scb->tsb_adr = isa_virt_to_bus(tsb); 839 scb->tsb_adr = isa_virt_to_bus(tsb);
832 /*issue scb to ldn=15, and busy wait for interrupt */ 840 /*issue scb to ldn=15, and busy wait for interrupt */
833 got_interrupt(host_index) = 0; 841 got_interrupt(shpnt) = 0;
834 issue_cmd(host_index, isa_virt_to_bus(scb), IM_SCB | MAX_LOG_DEV); 842 issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | MAX_LOG_DEV);
835 843
836 /* FIXME: timeout */ 844 /* FIXME: timeout */
837 while (!got_interrupt(host_index)) 845 while (!got_interrupt(shpnt))
838 barrier(); 846 barrier();
839 847
840 /*if got POS-stuff, get block length and return one device found */ 848 /*if got POS-stuff, get block length and return one device found */
841 if ((stat_result(host_index) == IM_SCB_CMD_COMPLETED) || (stat_result(host_index) == IM_SCB_CMD_COMPLETED_WITH_RETRIES)) 849 if ((stat_result(shpnt) == IM_SCB_CMD_COMPLETED) || (stat_result(shpnt) == IM_SCB_CMD_COMPLETED_WITH_RETRIES))
842 return 1; 850 return 1;
843 } 851 }
844 /* if all three retries failed, return "no device at this ldn" */ 852 /* if all three retries failed, return "no device at this ldn" */
@@ -851,14 +859,16 @@ static int get_pos_info(int host_index)
851/* SCSI-immediate-command for assign. This functions maps/unmaps specific 859/* SCSI-immediate-command for assign. This functions maps/unmaps specific
852 ldn-numbers on SCSI (PUN,LUN). It is needed for presetting of the 860 ldn-numbers on SCSI (PUN,LUN). It is needed for presetting of the
853 subsystem and for dynamical remapping od ldns. */ 861 subsystem and for dynamical remapping od ldns. */
854static int immediate_assign(int host_index, unsigned int pun, unsigned int lun, unsigned int ldn, unsigned int operation) 862static int immediate_assign(struct Scsi_Host *shpnt, unsigned int pun,
863 unsigned int lun, unsigned int ldn,
864 unsigned int operation)
855{ 865{
856 int retr; 866 int retr;
857 unsigned long imm_cmd; 867 unsigned long imm_cmd;
858 868
859 for (retr = 0; retr < 3; retr++) { 869 for (retr = 0; retr < 3; retr++) {
860 /* select mutation level of the SCSI-adapter */ 870 /* select mutation level of the SCSI-adapter */
861 switch (special(host_index)) { 871 switch (special(shpnt)) {
862 case IBM_SCSI2_FW: 872 case IBM_SCSI2_FW:
863 imm_cmd = (unsigned long) (IM_ASSIGN_IMM_CMD); 873 imm_cmd = (unsigned long) (IM_ASSIGN_IMM_CMD);
864 imm_cmd |= (unsigned long) ((lun & 7) << 24); 874 imm_cmd |= (unsigned long) ((lun & 7) << 24);
@@ -867,7 +877,7 @@ static int immediate_assign(int host_index, unsigned int pun, unsigned int lun,
867 imm_cmd |= (unsigned long) ((ldn & 15) << 16); 877 imm_cmd |= (unsigned long) ((ldn & 15) << 16);
868 break; 878 break;
869 default: 879 default:
870 imm_cmd = inl(IM_CMD_REG(host_index)); 880 imm_cmd = inl(IM_CMD_REG(shpnt));
871 imm_cmd &= (unsigned long) (0xF8000000); /* keep reserved bits */ 881 imm_cmd &= (unsigned long) (0xF8000000); /* keep reserved bits */
872 imm_cmd |= (unsigned long) (IM_ASSIGN_IMM_CMD); 882 imm_cmd |= (unsigned long) (IM_ASSIGN_IMM_CMD);
873 imm_cmd |= (unsigned long) ((lun & 7) << 24); 883 imm_cmd |= (unsigned long) ((lun & 7) << 24);
@@ -876,15 +886,15 @@ static int immediate_assign(int host_index, unsigned int pun, unsigned int lun,
876 imm_cmd |= (unsigned long) ((ldn & 15) << 16); 886 imm_cmd |= (unsigned long) ((ldn & 15) << 16);
877 break; 887 break;
878 } 888 }
879 last_scsi_command(host_index)[MAX_LOG_DEV] = IM_ASSIGN_IMM_CMD; 889 last_scsi_command(shpnt)[MAX_LOG_DEV] = IM_ASSIGN_IMM_CMD;
880 last_scsi_type(host_index)[MAX_LOG_DEV] = IM_IMM_CMD; 890 last_scsi_type(shpnt)[MAX_LOG_DEV] = IM_IMM_CMD;
881 got_interrupt(host_index) = 0; 891 got_interrupt(shpnt) = 0;
882 issue_cmd(host_index, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV); 892 issue_cmd(shpnt, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV);
883 while (!got_interrupt(host_index)) 893 while (!got_interrupt(shpnt))
884 barrier(); 894 barrier();
885 895
886 /*if command successful, break */ 896 /*if command successful, break */
887 if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED) 897 if (stat_result(shpnt) == IM_IMMEDIATE_CMD_COMPLETED)
888 return 1; 898 return 1;
889 } 899 }
890 if (retr >= 3) 900 if (retr >= 3)
@@ -893,7 +903,7 @@ static int immediate_assign(int host_index, unsigned int pun, unsigned int lun,
893 return 1; 903 return 1;
894} 904}
895 905
896static int immediate_feature(int host_index, unsigned int speed, unsigned int timeout) 906static int immediate_feature(struct Scsi_Host *shpnt, unsigned int speed, unsigned int timeout)
897{ 907{
898 int retr; 908 int retr;
899 unsigned long imm_cmd; 909 unsigned long imm_cmd;
@@ -903,16 +913,16 @@ static int immediate_feature(int host_index, unsigned int speed, unsigned int ti
903 imm_cmd = IM_FEATURE_CTR_IMM_CMD; 913 imm_cmd = IM_FEATURE_CTR_IMM_CMD;
904 imm_cmd |= (unsigned long) ((speed & 0x7) << 29); 914 imm_cmd |= (unsigned long) ((speed & 0x7) << 29);
905 imm_cmd |= (unsigned long) ((timeout & 0x1fff) << 16); 915 imm_cmd |= (unsigned long) ((timeout & 0x1fff) << 16);
906 last_scsi_command(host_index)[MAX_LOG_DEV] = IM_FEATURE_CTR_IMM_CMD; 916 last_scsi_command(shpnt)[MAX_LOG_DEV] = IM_FEATURE_CTR_IMM_CMD;
907 last_scsi_type(host_index)[MAX_LOG_DEV] = IM_IMM_CMD; 917 last_scsi_type(shpnt)[MAX_LOG_DEV] = IM_IMM_CMD;
908 got_interrupt(host_index) = 0; 918 got_interrupt(shpnt) = 0;
909 /* we need to run into command errors in order to probe for the 919 /* we need to run into command errors in order to probe for the
910 * right speed! */ 920 * right speed! */
911 global_command_error_excuse = 1; 921 global_command_error_excuse = 1;
912 issue_cmd(host_index, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV); 922 issue_cmd(shpnt, (unsigned long) (imm_cmd), IM_IMM_CMD | MAX_LOG_DEV);
913 923
914 /* FIXME: timeout */ 924 /* FIXME: timeout */
915 while (!got_interrupt(host_index)) 925 while (!got_interrupt(shpnt))
916 barrier(); 926 barrier();
917 if (global_command_error_excuse == CMD_FAIL) { 927 if (global_command_error_excuse == CMD_FAIL) {
918 global_command_error_excuse = 0; 928 global_command_error_excuse = 0;
@@ -920,7 +930,7 @@ static int immediate_feature(int host_index, unsigned int speed, unsigned int ti
920 } else 930 } else
921 global_command_error_excuse = 0; 931 global_command_error_excuse = 0;
922 /*if command successful, break */ 932 /*if command successful, break */
923 if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED) 933 if (stat_result(shpnt) == IM_IMMEDIATE_CMD_COMPLETED)
924 return 1; 934 return 1;
925 } 935 }
926 if (retr >= 3) 936 if (retr >= 3)
@@ -930,35 +940,35 @@ static int immediate_feature(int host_index, unsigned int speed, unsigned int ti
930} 940}
931 941
932#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET 942#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET
933static int immediate_reset(int host_index, unsigned int ldn) 943static int immediate_reset(struct Scsi_Host *shpnt, unsigned int ldn)
934{ 944{
935 int retries; 945 int retries;
936 int ticks; 946 int ticks;
937 unsigned long imm_command; 947 unsigned long imm_command;
938 948
939 for (retries = 0; retries < 3; retries++) { 949 for (retries = 0; retries < 3; retries++) {
940 imm_command = inl(IM_CMD_REG(host_index)); 950 imm_command = inl(IM_CMD_REG(shpnt));
941 imm_command &= (unsigned long) (0xFFFF0000); /* keep reserved bits */ 951 imm_command &= (unsigned long) (0xFFFF0000); /* keep reserved bits */
942 imm_command |= (unsigned long) (IM_RESET_IMM_CMD); 952 imm_command |= (unsigned long) (IM_RESET_IMM_CMD);
943 last_scsi_command(host_index)[ldn] = IM_RESET_IMM_CMD; 953 last_scsi_command(shpnt)[ldn] = IM_RESET_IMM_CMD;
944 last_scsi_type(host_index)[ldn] = IM_IMM_CMD; 954 last_scsi_type(shpnt)[ldn] = IM_IMM_CMD;
945 got_interrupt(host_index) = 0; 955 got_interrupt(shpnt) = 0;
946 reset_status(host_index) = IM_RESET_IN_PROGRESS; 956 reset_status(shpnt) = IM_RESET_IN_PROGRESS;
947 issue_cmd(host_index, (unsigned long) (imm_command), IM_IMM_CMD | ldn); 957 issue_cmd(shpnt, (unsigned long) (imm_command), IM_IMM_CMD | ldn);
948 ticks = IM_RESET_DELAY * HZ; 958 ticks = IM_RESET_DELAY * HZ;
949 while (reset_status(host_index) == IM_RESET_IN_PROGRESS && --ticks) { 959 while (reset_status(shpnt) == IM_RESET_IN_PROGRESS && --ticks) {
950 udelay((1 + 999 / HZ) * 1000); 960 udelay((1 + 999 / HZ) * 1000);
951 barrier(); 961 barrier();
952 } 962 }
953 /* if reset did not complete, just complain */ 963 /* if reset did not complete, just complain */
954 if (!ticks) { 964 if (!ticks) {
955 printk(KERN_ERR "IBM MCA SCSI: reset did not complete within %d seconds.\n", IM_RESET_DELAY); 965 printk(KERN_ERR "IBM MCA SCSI: reset did not complete within %d seconds.\n", IM_RESET_DELAY);
956 reset_status(host_index) = IM_RESET_FINISHED_OK; 966 reset_status(shpnt) = IM_RESET_FINISHED_OK;
957 /* did not work, finish */ 967 /* did not work, finish */
958 return 1; 968 return 1;
959 } 969 }
960 /*if command successful, break */ 970 /*if command successful, break */
961 if (stat_result(host_index) == IM_IMMEDIATE_CMD_COMPLETED) 971 if (stat_result(shpnt) == IM_IMMEDIATE_CMD_COMPLETED)
962 return 1; 972 return 1;
963 } 973 }
964 if (retries >= 3) 974 if (retries >= 3)
@@ -1060,35 +1070,35 @@ static int probe_display(int what)
1060 return 0; 1070 return 0;
1061} 1071}
1062 1072
1063static int probe_bus_mode(int host_index) 1073static int probe_bus_mode(struct Scsi_Host *shpnt)
1064{ 1074{
1065 struct im_pos_info *info; 1075 struct im_pos_info *info;
1066 int num_bus = 0; 1076 int num_bus = 0;
1067 int ldn; 1077 int ldn;
1068 1078
1069 info = (struct im_pos_info *) (&(ld(host_index)[MAX_LOG_DEV].buf)); 1079 info = (struct im_pos_info *) (&(ld(shpnt)[MAX_LOG_DEV].buf));
1070 if (get_pos_info(host_index)) { 1080 if (get_pos_info(shpnt)) {
1071 if (info->connector_size & 0xf000) 1081 if (info->connector_size & 0xf000)
1072 subsystem_connector_size(host_index) = 16; 1082 subsystem_connector_size(shpnt) = 16;
1073 else 1083 else
1074 subsystem_connector_size(host_index) = 32; 1084 subsystem_connector_size(shpnt) = 32;
1075 num_bus |= (info->pos_4b & 8) >> 3; 1085 num_bus |= (info->pos_4b & 8) >> 3;
1076 for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) { 1086 for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
1077 if ((special(host_index) == IBM_SCSI_WCACHE) || (special(host_index) == IBM_7568_WCACHE)) { 1087 if ((special(shpnt) == IBM_SCSI_WCACHE) || (special(shpnt) == IBM_7568_WCACHE)) {
1078 if (!((info->cache_stat >> ldn) & 1)) 1088 if (!((info->cache_stat >> ldn) & 1))
1079 ld(host_index)[ldn].cache_flag = 0; 1089 ld(shpnt)[ldn].cache_flag = 0;
1080 } 1090 }
1081 if (!((info->retry_stat >> ldn) & 1)) 1091 if (!((info->retry_stat >> ldn) & 1))
1082 ld(host_index)[ldn].retry_flag = 0; 1092 ld(shpnt)[ldn].retry_flag = 0;
1083 } 1093 }
1084#ifdef IM_DEBUG_PROBE 1094#ifdef IM_DEBUG_PROBE
1085 printk("IBM MCA SCSI: SCSI-Cache bits: "); 1095 printk("IBM MCA SCSI: SCSI-Cache bits: ");
1086 for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) { 1096 for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
1087 printk("%d", ld(host_index)[ldn].cache_flag); 1097 printk("%d", ld(shpnt)[ldn].cache_flag);
1088 } 1098 }
1089 printk("\nIBM MCA SCSI: SCSI-Retry bits: "); 1099 printk("\nIBM MCA SCSI: SCSI-Retry bits: ");
1090 for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) { 1100 for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
1091 printk("%d", ld(host_index)[ldn].retry_flag); 1101 printk("%d", ld(shpnt)[ldn].retry_flag);
1092 } 1102 }
1093 printk("\n"); 1103 printk("\n");
1094#endif 1104#endif
@@ -1097,7 +1107,7 @@ static int probe_bus_mode(int host_index)
1097} 1107}
1098 1108
1099/* probing scsi devices */ 1109/* probing scsi devices */
1100static void check_devices(int host_index, int adaptertype) 1110static void check_devices(struct Scsi_Host *shpnt, int adaptertype)
1101{ 1111{
1102 int id, lun, ldn, ticks; 1112 int id, lun, ldn, ticks;
1103 int count_devices; /* local counter for connected device */ 1113 int count_devices; /* local counter for connected device */
@@ -1108,24 +1118,24 @@ static void check_devices(int host_index, int adaptertype)
1108 /* assign default values to certain variables */ 1118 /* assign default values to certain variables */
1109 ticks = 0; 1119 ticks = 0;
1110 count_devices = 0; 1120 count_devices = 0;
1111 IBM_DS(host_index).dyn_flag = 0; /* normally no need for dynamical ldn management */ 1121 IBM_DS(shpnt).dyn_flag = 0; /* normally no need for dynamical ldn management */
1112 IBM_DS(host_index).total_errors = 0; /* set errorcounter to 0 */ 1122 IBM_DS(shpnt).total_errors = 0; /* set errorcounter to 0 */
1113 next_ldn(host_index) = 7; /* next ldn to be assigned is 7, because 0-6 is 'hardwired' */ 1123 next_ldn(shpnt) = 7; /* next ldn to be assigned is 7, because 0-6 is 'hardwired' */
1114 1124
1115 /* initialize the very important driver-informational arrays/structs */ 1125 /* initialize the very important driver-informational arrays/structs */
1116 memset(ld(host_index), 0, sizeof(ld(host_index))); 1126 memset(ld(shpnt), 0, sizeof(ld(shpnt)));
1117 for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) { 1127 for (ldn = 0; ldn <= MAX_LOG_DEV; ldn++) {
1118 last_scsi_command(host_index)[ldn] = NO_SCSI; /* emptify last SCSI-command storage */ 1128 last_scsi_command(shpnt)[ldn] = NO_SCSI; /* emptify last SCSI-command storage */
1119 last_scsi_type(host_index)[ldn] = 0; 1129 last_scsi_type(shpnt)[ldn] = 0;
1120 ld(host_index)[ldn].cache_flag = 1; 1130 ld(shpnt)[ldn].cache_flag = 1;
1121 ld(host_index)[ldn].retry_flag = 1; 1131 ld(shpnt)[ldn].retry_flag = 1;
1122 } 1132 }
1123 memset(get_ldn(host_index), TYPE_NO_DEVICE, sizeof(get_ldn(host_index))); /* this is essential ! */ 1133 memset(get_ldn(shpnt), TYPE_NO_DEVICE, sizeof(get_ldn(shpnt))); /* this is essential ! */
1124 memset(get_scsi(host_index), TYPE_NO_DEVICE, sizeof(get_scsi(host_index))); /* this is essential ! */ 1134 memset(get_scsi(shpnt), TYPE_NO_DEVICE, sizeof(get_scsi(shpnt))); /* this is essential ! */
1125 for (lun = 0; lun < 8; lun++) { 1135 for (lun = 0; lun < 8; lun++) {
1126 /* mark the adapter at its pun on all luns */ 1136 /* mark the adapter at its pun on all luns */
1127 get_scsi(host_index)[subsystem_pun(host_index)][lun] = TYPE_IBM_SCSI_ADAPTER; 1137 get_scsi(shpnt)[subsystem_pun(shpnt)][lun] = TYPE_IBM_SCSI_ADAPTER;
1128 get_ldn(host_index)[subsystem_pun(host_index)][lun] = MAX_LOG_DEV; /* make sure, the subsystem 1138 get_ldn(shpnt)[subsystem_pun(shpnt)][lun] = MAX_LOG_DEV; /* make sure, the subsystem
1129 ldn is active for all 1139 ldn is active for all
1130 luns. */ 1140 luns. */
1131 } 1141 }
@@ -1134,9 +1144,9 @@ static void check_devices(int host_index, int adaptertype)
1134 /* monitor connected on model XX95. */ 1144 /* monitor connected on model XX95. */
1135 1145
1136 /* STEP 1: */ 1146 /* STEP 1: */
1137 adapter_speed(host_index) = global_adapter_speed; 1147 adapter_speed(shpnt) = global_adapter_speed;
1138 speedrun = adapter_speed(host_index); 1148 speedrun = adapter_speed(shpnt);
1139 while (immediate_feature(host_index, speedrun, adapter_timeout) == 2) { 1149 while (immediate_feature(shpnt, speedrun, adapter_timeout) == 2) {
1140 probe_display(1); 1150 probe_display(1);
1141 if (speedrun == 7) 1151 if (speedrun == 7)
1142 panic("IBM MCA SCSI: Cannot set Synchronous-Transfer-Rate!\n"); 1152 panic("IBM MCA SCSI: Cannot set Synchronous-Transfer-Rate!\n");
@@ -1144,30 +1154,30 @@ static void check_devices(int host_index, int adaptertype)
1144 if (speedrun > 7) 1154 if (speedrun > 7)
1145 speedrun = 7; 1155 speedrun = 7;
1146 } 1156 }
1147 adapter_speed(host_index) = speedrun; 1157 adapter_speed(shpnt) = speedrun;
1148 /* Get detailed information about the current adapter, necessary for 1158 /* Get detailed information about the current adapter, necessary for
1149 * device operations: */ 1159 * device operations: */
1150 num_bus = probe_bus_mode(host_index); 1160 num_bus = probe_bus_mode(shpnt);
1151 1161
1152 /* num_bus contains only valid data for the F/W adapter! */ 1162 /* num_bus contains only valid data for the F/W adapter! */
1153 if (adaptertype == IBM_SCSI2_FW) { /* F/W SCSI adapter: */ 1163 if (adaptertype == IBM_SCSI2_FW) { /* F/W SCSI adapter: */
1154 /* F/W adapter PUN-space extension evaluation: */ 1164 /* F/W adapter PUN-space extension evaluation: */
1155 if (num_bus) { 1165 if (num_bus) {
1156 printk(KERN_INFO "IBM MCA SCSI: Separate bus mode (wide-addressing enabled)\n"); 1166 printk(KERN_INFO "IBM MCA SCSI: Separate bus mode (wide-addressing enabled)\n");
1157 subsystem_maxid(host_index) = 16; 1167 subsystem_maxid(shpnt) = 16;
1158 } else { 1168 } else {
1159 printk(KERN_INFO "IBM MCA SCSI: Combined bus mode (wide-addressing disabled)\n"); 1169 printk(KERN_INFO "IBM MCA SCSI: Combined bus mode (wide-addressing disabled)\n");
1160 subsystem_maxid(host_index) = 8; 1170 subsystem_maxid(shpnt) = 8;
1161 } 1171 }
1162 printk(KERN_INFO "IBM MCA SCSI: Sync.-Rate (F/W: 20, Int.: 10, Ext.: %s) MBytes/s\n", ibmrate(speedrun, adaptertype)); 1172 printk(KERN_INFO "IBM MCA SCSI: Sync.-Rate (F/W: 20, Int.: 10, Ext.: %s) MBytes/s\n", ibmrate(speedrun, adaptertype));
1163 } else /* all other IBM SCSI adapters: */ 1173 } else /* all other IBM SCSI adapters: */
1164 printk(KERN_INFO "IBM MCA SCSI: Synchronous-SCSI-Transfer-Rate: %s MBytes/s\n", ibmrate(speedrun, adaptertype)); 1174 printk(KERN_INFO "IBM MCA SCSI: Synchronous-SCSI-Transfer-Rate: %s MBytes/s\n", ibmrate(speedrun, adaptertype));
1165 1175
1166 /* assign correct PUN device space */ 1176 /* assign correct PUN device space */
1167 max_pun = subsystem_maxid(host_index); 1177 max_pun = subsystem_maxid(shpnt);
1168 1178
1169#ifdef IM_DEBUG_PROBE 1179#ifdef IM_DEBUG_PROBE
1170 printk("IBM MCA SCSI: Current SCSI-host index: %d\n", host_index); 1180 printk("IBM MCA SCSI: Current SCSI-host index: %d\n", shpnt);
1171 printk("IBM MCA SCSI: Removing default logical SCSI-device mapping."); 1181 printk("IBM MCA SCSI: Removing default logical SCSI-device mapping.");
1172#else 1182#else
1173 printk(KERN_INFO "IBM MCA SCSI: Dev. Order: %s, Mapping (takes <2min): ", (ibm_ansi_order) ? "ANSI" : "New"); 1183 printk(KERN_INFO "IBM MCA SCSI: Dev. Order: %s, Mapping (takes <2min): ", (ibm_ansi_order) ? "ANSI" : "New");
@@ -1177,7 +1187,7 @@ static void check_devices(int host_index, int adaptertype)
1177#ifdef IM_DEBUG_PROBE 1187#ifdef IM_DEBUG_PROBE
1178 printk("."); 1188 printk(".");
1179#endif 1189#endif
1180 immediate_assign(host_index, 0, 0, ldn, REMOVE_LDN); /* remove ldn (wherever) */ 1190 immediate_assign(shpnt, 0, 0, ldn, REMOVE_LDN); /* remove ldn (wherever) */
1181 } 1191 }
1182 lun = 0; /* default lun is 0 */ 1192 lun = 0; /* default lun is 0 */
1183#ifndef IM_DEBUG_PROBE 1193#ifndef IM_DEBUG_PROBE
@@ -1196,18 +1206,18 @@ static void check_devices(int host_index, int adaptertype)
1196#ifdef IM_DEBUG_PROBE 1206#ifdef IM_DEBUG_PROBE
1197 printk("."); 1207 printk(".");
1198#endif 1208#endif
1199 if (id != subsystem_pun(host_index)) { 1209 if (id != subsystem_pun(shpnt)) {
1200 /* if pun is not the adapter: */ 1210 /* if pun is not the adapter: */
1201 /* set ldn=0 to pun,lun */ 1211 /* set ldn=0 to pun,lun */
1202 immediate_assign(host_index, id, lun, PROBE_LDN, SET_LDN); 1212 immediate_assign(shpnt, id, lun, PROBE_LDN, SET_LDN);
1203 if (device_inquiry(host_index, PROBE_LDN)) { /* probe device */ 1213 if (device_inquiry(shpnt, PROBE_LDN)) { /* probe device */
1204 get_scsi(host_index)[id][lun] = (unsigned char) (ld(host_index)[PROBE_LDN].buf[0]); 1214 get_scsi(shpnt)[id][lun] = (unsigned char) (ld(shpnt)[PROBE_LDN].buf[0]);
1205 /* entry, even for NO_LUN */ 1215 /* entry, even for NO_LUN */
1206 if (ld(host_index)[PROBE_LDN].buf[0] != TYPE_NO_LUN) 1216 if (ld(shpnt)[PROBE_LDN].buf[0] != TYPE_NO_LUN)
1207 count_devices++; /* a existing device is found */ 1217 count_devices++; /* a existing device is found */
1208 } 1218 }
1209 /* remove ldn */ 1219 /* remove ldn */
1210 immediate_assign(host_index, id, lun, PROBE_LDN, REMOVE_LDN); 1220 immediate_assign(shpnt, id, lun, PROBE_LDN, REMOVE_LDN);
1211 } 1221 }
1212 } 1222 }
1213#ifndef IM_DEBUG_PROBE 1223#ifndef IM_DEBUG_PROBE
@@ -1227,16 +1237,16 @@ static void check_devices(int host_index, int adaptertype)
1227#ifdef IM_DEBUG_PROBE 1237#ifdef IM_DEBUG_PROBE
1228 printk("."); 1238 printk(".");
1229#endif 1239#endif
1230 if (id != subsystem_pun(host_index)) { 1240 if (id != subsystem_pun(shpnt)) {
1231 if (get_scsi(host_index)[id][lun] != TYPE_NO_LUN && get_scsi(host_index)[id][lun] != TYPE_NO_DEVICE) { 1241 if (get_scsi(shpnt)[id][lun] != TYPE_NO_LUN && get_scsi(shpnt)[id][lun] != TYPE_NO_DEVICE) {
1232 /* Only map if accepted type. Always enter for 1242 /* Only map if accepted type. Always enter for
1233 lun == 0 to get no gaps into ldn-mapping for ldn<7. */ 1243 lun == 0 to get no gaps into ldn-mapping for ldn<7. */
1234 immediate_assign(host_index, id, lun, ldn, SET_LDN); 1244 immediate_assign(shpnt, id, lun, ldn, SET_LDN);
1235 get_ldn(host_index)[id][lun] = ldn; /* map ldn */ 1245 get_ldn(shpnt)[id][lun] = ldn; /* map ldn */
1236 if (device_exists(host_index, ldn, &ld(host_index)[ldn].block_length, &ld(host_index)[ldn].device_type)) { 1246 if (device_exists(shpnt, ldn, &ld(shpnt)[ldn].block_length, &ld(shpnt)[ldn].device_type)) {
1237#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET 1247#ifdef CONFIG_IBMMCA_SCSI_DEV_RESET
1238 printk("resetting device at ldn=%x ... ", ldn); 1248 printk("resetting device at ldn=%x ... ", ldn);
1239 immediate_reset(host_index, ldn); 1249 immediate_reset(shpnt, ldn);
1240#endif 1250#endif
1241 ldn++; 1251 ldn++;
1242 } else { 1252 } else {
@@ -1244,15 +1254,15 @@ static void check_devices(int host_index, int adaptertype)
1244 * handle it or because it has problems */ 1254 * handle it or because it has problems */
1245 if (lun > 0) { 1255 if (lun > 0) {
1246 /* remove mapping */ 1256 /* remove mapping */
1247 get_ldn(host_index)[id][lun] = TYPE_NO_DEVICE; 1257 get_ldn(shpnt)[id][lun] = TYPE_NO_DEVICE;
1248 immediate_assign(host_index, 0, 0, ldn, REMOVE_LDN); 1258 immediate_assign(shpnt, 0, 0, ldn, REMOVE_LDN);
1249 } else 1259 } else
1250 ldn++; 1260 ldn++;
1251 } 1261 }
1252 } else if (lun == 0) { 1262 } else if (lun == 0) {
1253 /* map lun == 0, even if no device exists */ 1263 /* map lun == 0, even if no device exists */
1254 immediate_assign(host_index, id, lun, ldn, SET_LDN); 1264 immediate_assign(shpnt, id, lun, ldn, SET_LDN);
1255 get_ldn(host_index)[id][lun] = ldn; /* map ldn */ 1265 get_ldn(shpnt)[id][lun] = ldn; /* map ldn */
1256 ldn++; 1266 ldn++;
1257 } 1267 }
1258 } 1268 }
@@ -1262,14 +1272,14 @@ static void check_devices(int host_index, int adaptertype)
1262 /* map remaining ldns to non-existing devices */ 1272 /* map remaining ldns to non-existing devices */
1263 for (lun = 1; lun < 8 && ldn < MAX_LOG_DEV; lun++) 1273 for (lun = 1; lun < 8 && ldn < MAX_LOG_DEV; lun++)
1264 for (id = 0; id < max_pun && ldn < MAX_LOG_DEV; id++) { 1274 for (id = 0; id < max_pun && ldn < MAX_LOG_DEV; id++) {
1265 if (get_scsi(host_index)[id][lun] == TYPE_NO_LUN || get_scsi(host_index)[id][lun] == TYPE_NO_DEVICE) { 1275 if (get_scsi(shpnt)[id][lun] == TYPE_NO_LUN || get_scsi(shpnt)[id][lun] == TYPE_NO_DEVICE) {
1266 probe_display(1); 1276 probe_display(1);
1267 /* Map remaining ldns only to NON-existing pun,lun 1277 /* Map remaining ldns only to NON-existing pun,lun
1268 combinations to make sure an inquiry will fail. 1278 combinations to make sure an inquiry will fail.
1269 For MULTI_LUN, it is needed to avoid adapter autonome 1279 For MULTI_LUN, it is needed to avoid adapter autonome
1270 SCSI-remapping. */ 1280 SCSI-remapping. */
1271 immediate_assign(host_index, id, lun, ldn, SET_LDN); 1281 immediate_assign(shpnt, id, lun, ldn, SET_LDN);
1272 get_ldn(host_index)[id][lun] = ldn; 1282 get_ldn(shpnt)[id][lun] = ldn;
1273 ldn++; 1283 ldn++;
1274 } 1284 }
1275 } 1285 }
@@ -1292,51 +1302,51 @@ static void check_devices(int host_index, int adaptertype)
1292 for (id = 0; id < max_pun; id++) { 1302 for (id = 0; id < max_pun; id++) {
1293 printk("%2d ", id); 1303 printk("%2d ", id);
1294 for (lun = 0; lun < 8; lun++) 1304 for (lun = 0; lun < 8; lun++)
1295 printk("%2s ", ti_p(get_scsi(host_index)[id][lun])); 1305 printk("%2s ", ti_p(get_scsi(shpnt)[id][lun]));
1296 printk(" %2d ", id); 1306 printk(" %2d ", id);
1297 for (lun = 0; lun < 8; lun++) 1307 for (lun = 0; lun < 8; lun++)
1298 printk("%2s ", ti_l(get_ldn(host_index)[id][lun])); 1308 printk("%2s ", ti_l(get_ldn(shpnt)[id][lun]));
1299 printk("\n"); 1309 printk("\n");
1300 } 1310 }
1301#endif 1311#endif
1302 1312
1303 /* assign total number of found SCSI-devices to the statistics struct */ 1313 /* assign total number of found SCSI-devices to the statistics struct */
1304 IBM_DS(host_index).total_scsi_devices = count_devices; 1314 IBM_DS(shpnt).total_scsi_devices = count_devices;
1305 1315
1306 /* decide for output in /proc-filesystem, if the configuration of 1316 /* decide for output in /proc-filesystem, if the configuration of
1307 SCSI-devices makes dynamical reassignment of devices necessary */ 1317 SCSI-devices makes dynamical reassignment of devices necessary */
1308 if (count_devices >= MAX_LOG_DEV) 1318 if (count_devices >= MAX_LOG_DEV)
1309 IBM_DS(host_index).dyn_flag = 1; /* dynamical assignment is necessary */ 1319 IBM_DS(shpnt).dyn_flag = 1; /* dynamical assignment is necessary */
1310 else 1320 else
1311 IBM_DS(host_index).dyn_flag = 0; /* dynamical assignment is not necessary */ 1321 IBM_DS(shpnt).dyn_flag = 0; /* dynamical assignment is not necessary */
1312 1322
1313 /* If no SCSI-devices are assigned, return 1 in order to cause message. */ 1323 /* If no SCSI-devices are assigned, return 1 in order to cause message. */
1314 if (ldn == 0) 1324 if (ldn == 0)
1315 printk("IBM MCA SCSI: Warning: No SCSI-devices found/assigned!\n"); 1325 printk("IBM MCA SCSI: Warning: No SCSI-devices found/assigned!\n");
1316 1326
1317 /* reset the counters for statistics on the current adapter */ 1327 /* reset the counters for statistics on the current adapter */
1318 IBM_DS(host_index).scbs = 0; 1328 IBM_DS(shpnt).scbs = 0;
1319 IBM_DS(host_index).long_scbs = 0; 1329 IBM_DS(shpnt).long_scbs = 0;
1320 IBM_DS(host_index).total_accesses = 0; 1330 IBM_DS(shpnt).total_accesses = 0;
1321 IBM_DS(host_index).total_interrupts = 0; 1331 IBM_DS(shpnt).total_interrupts = 0;
1322 IBM_DS(host_index).dynamical_assignments = 0; 1332 IBM_DS(shpnt).dynamical_assignments = 0;
1323 memset(IBM_DS(host_index).ldn_access, 0x0, sizeof(IBM_DS(host_index).ldn_access)); 1333 memset(IBM_DS(shpnt).ldn_access, 0x0, sizeof(IBM_DS(shpnt).ldn_access));
1324 memset(IBM_DS(host_index).ldn_read_access, 0x0, sizeof(IBM_DS(host_index).ldn_read_access)); 1334 memset(IBM_DS(shpnt).ldn_read_access, 0x0, sizeof(IBM_DS(shpnt).ldn_read_access));
1325 memset(IBM_DS(host_index).ldn_write_access, 0x0, sizeof(IBM_DS(host_index).ldn_write_access)); 1335 memset(IBM_DS(shpnt).ldn_write_access, 0x0, sizeof(IBM_DS(shpnt).ldn_write_access));
1326 memset(IBM_DS(host_index).ldn_inquiry_access, 0x0, sizeof(IBM_DS(host_index).ldn_inquiry_access)); 1336 memset(IBM_DS(shpnt).ldn_inquiry_access, 0x0, sizeof(IBM_DS(shpnt).ldn_inquiry_access));
1327 memset(IBM_DS(host_index).ldn_modeselect_access, 0x0, sizeof(IBM_DS(host_index).ldn_modeselect_access)); 1337 memset(IBM_DS(shpnt).ldn_modeselect_access, 0x0, sizeof(IBM_DS(shpnt).ldn_modeselect_access));
1328 memset(IBM_DS(host_index).ldn_assignments, 0x0, sizeof(IBM_DS(host_index).ldn_assignments)); 1338 memset(IBM_DS(shpnt).ldn_assignments, 0x0, sizeof(IBM_DS(shpnt).ldn_assignments));
1329 probe_display(0); 1339 probe_display(0);
1330 return; 1340 return;
1331} 1341}
1332 1342
1333static int device_exists(int host_index, int ldn, int *block_length, int *device_type) 1343static int device_exists(struct Scsi_Host *shpnt, int ldn, int *block_length, int *device_type)
1334{ 1344{
1335 unsigned char *buf; 1345 unsigned char *buf;
1336 /* if no valid device found, return immediately with 0 */ 1346 /* if no valid device found, return immediately with 0 */
1337 if (!(device_inquiry(host_index, ldn))) 1347 if (!(device_inquiry(shpnt, ldn)))
1338 return 0; 1348 return 0;
1339 buf = (unsigned char *) (&(ld(host_index)[ldn].buf)); 1349 buf = (unsigned char *) (&(ld(shpnt)[ldn].buf));
1340 if (*buf == TYPE_ROM) { 1350 if (*buf == TYPE_ROM) {
1341 *device_type = TYPE_ROM; 1351 *device_type = TYPE_ROM;
1342 *block_length = 2048; /* (standard blocksize for yellow-/red-book) */ 1352 *block_length = 2048; /* (standard blocksize for yellow-/red-book) */
@@ -1349,7 +1359,7 @@ static int device_exists(int host_index, int ldn, int *block_length, int *device
1349 } 1359 }
1350 if (*buf == TYPE_DISK) { 1360 if (*buf == TYPE_DISK) {
1351 *device_type = TYPE_DISK; 1361 *device_type = TYPE_DISK;
1352 if (read_capacity(host_index, ldn)) { 1362 if (read_capacity(shpnt, ldn)) {
1353 *block_length = *(buf + 7) + (*(buf + 6) << 8) + (*(buf + 5) << 16) + (*(buf + 4) << 24); 1363 *block_length = *(buf + 7) + (*(buf + 6) << 8) + (*(buf + 5) << 16) + (*(buf + 4) << 24);
1354 return 1; 1364 return 1;
1355 } else 1365 } else
@@ -1357,7 +1367,7 @@ static int device_exists(int host_index, int ldn, int *block_length, int *device
1357 } 1367 }
1358 if (*buf == TYPE_MOD) { 1368 if (*buf == TYPE_MOD) {
1359 *device_type = TYPE_MOD; 1369 *device_type = TYPE_MOD;
1360 if (read_capacity(host_index, ldn)) { 1370 if (read_capacity(shpnt, ldn)) {
1361 *block_length = *(buf + 7) + (*(buf + 6) << 8) + (*(buf + 5) << 16) + (*(buf + 4) << 24); 1371 *block_length = *(buf + 7) + (*(buf + 6) << 8) + (*(buf + 5) << 16) + (*(buf + 4) << 24);
1362 return 1; 1372 return 1;
1363 } else 1373 } else
@@ -1430,6 +1440,9 @@ static void internal_ibmmca_scsi_setup(char *str, int *ints)
1430 return; 1440 return;
1431} 1441}
1432 1442
1443#if 0
1444 FIXME NEED TO MOVE TO SYSFS
1445
1433static int ibmmca_getinfo(char *buf, int slot, void *dev_id) 1446static int ibmmca_getinfo(char *buf, int slot, void *dev_id)
1434{ 1447{
1435 struct Scsi_Host *shpnt; 1448 struct Scsi_Host *shpnt;
@@ -1480,58 +1493,34 @@ static int ibmmca_getinfo(char *buf, int slot, void *dev_id)
1480 1493
1481 return len; 1494 return len;
1482} 1495}
1496#endif
1483 1497
1484int ibmmca_detect(struct scsi_host_template * scsi_template) 1498static struct scsi_host_template ibmmca_driver_template = {
1499 .proc_name = "ibmmca",
1500 .proc_info = ibmmca_proc_info,
1501 .name = "IBM SCSI-Subsystem",
1502 .queuecommand = ibmmca_queuecommand,
1503 .eh_abort_handler = ibmmca_abort,
1504 .eh_host_reset_handler = ibmmca_host_reset,
1505 .bios_param = ibmmca_biosparam,
1506 .can_queue = 16,
1507 .this_id = 7,
1508 .sg_tablesize = 16,
1509 .cmd_per_lun = 1,
1510 .use_clustering = ENABLE_CLUSTERING,
1511};
1512
1513static int ibmmca_probe(struct device *dev)
1485{ 1514{
1486 struct Scsi_Host *shpnt; 1515 struct Scsi_Host *shpnt;
1487 int port, id, i, j, k, slot; 1516 int port, id, i, j, k, irq, enabled, ret = -EINVAL;
1488 int devices_on_irq_11 = 0; 1517 struct mca_device *mca_dev = to_mca_device(dev);
1489 int devices_on_irq_14 = 0; 1518 const char *description = ibmmca_description[mca_dev->index];
1490 int IRQ14_registered = 0;
1491 int IRQ11_registered = 0;
1492
1493 found = 0; /* make absolutely sure, that found is set to 0 */
1494 1519
1495 /* First of all, print the version number of the driver. This is 1520 /* First of all, print the version number of the driver. This is
1496 * important to allow better user bugreports in case of already 1521 * important to allow better user bugreports in case of already
1497 * having problems with the MCA_bus probing. */ 1522 * having problems with the MCA_bus probing. */
1498 printk(KERN_INFO "IBM MCA SCSI: Version %s\n", IBMMCA_SCSI_DRIVER_VERSION); 1523 printk(KERN_INFO "IBM MCA SCSI: Version %s\n", IBMMCA_SCSI_DRIVER_VERSION);
1499 /* if this is not MCA machine, return "nothing found" */
1500 if (!MCA_bus) {
1501 printk(KERN_INFO "IBM MCA SCSI: No Microchannel-bus present --> Aborting.\n" " This machine does not have any IBM MCA-bus\n" " or the MCA-Kernel-support is not enabled!\n");
1502 return 0;
1503 }
1504
1505#ifdef MODULE
1506 /* If the driver is run as module, read from conf.modules or cmd-line */
1507 if (boot_options)
1508 option_setup(boot_options);
1509#endif
1510
1511 /* get interrupt request level */
1512 if (request_irq(IM_IRQ, interrupt_handler, IRQF_SHARED, "ibmmcascsi", hosts)) {
1513 printk(KERN_ERR "IBM MCA SCSI: Unable to get shared IRQ %d.\n", IM_IRQ);
1514 return 0;
1515 } else
1516 IRQ14_registered++;
1517
1518 /* if ibmmcascsi setup option was passed to kernel, return "found" */
1519 for (i = 0; i < IM_MAX_HOSTS; i++)
1520 if (io_port[i] > 0 && scsi_id[i] >= 0 && scsi_id[i] < 8) {
1521 printk("IBM MCA SCSI: forced detected SCSI Adapter, io=0x%x, scsi id=%d.\n", io_port[i], scsi_id[i]);
1522 if ((shpnt = ibmmca_register(scsi_template, io_port[i], scsi_id[i], FORCED_DETECTION, "forced detected SCSI Adapter"))) {
1523 for (k = 2; k < 7; k++)
1524 ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = 0;
1525 ((struct ibmmca_hostdata *) shpnt->hostdata)->_special = FORCED_DETECTION;
1526 mca_set_adapter_name(MCA_INTEGSCSI, "forced detected SCSI Adapter");
1527 mca_set_adapter_procfn(MCA_INTEGSCSI, (MCA_ProcFn) ibmmca_getinfo, shpnt);
1528 mca_mark_as_used(MCA_INTEGSCSI);
1529 devices_on_irq_14++;
1530 }
1531 }
1532 if (found)
1533 return found;
1534
1535 /* The POS2-register of all PS/2 model SCSI-subsystems has the following 1524 /* The POS2-register of all PS/2 model SCSI-subsystems has the following
1536 * interpretation of bits: 1525 * interpretation of bits:
1537 * Bit 7 - 4 : Chip Revision ID (Release) 1526 * Bit 7 - 4 : Chip Revision ID (Release)
@@ -1558,7 +1547,14 @@ int ibmmca_detect(struct scsi_host_template * scsi_template)
1558 1547
1559 /* first look for the IBM SCSI integrated subsystem on the motherboard */ 1548 /* first look for the IBM SCSI integrated subsystem on the motherboard */
1560 for (j = 0; j < 8; j++) /* read the pos-information */ 1549 for (j = 0; j < 8; j++) /* read the pos-information */
1561 pos[j] = mca_read_stored_pos(MCA_INTEGSCSI, j); 1550 pos[j] = mca_device_read_pos(mca_dev, j);
1551 id = (pos[3] & 0xe0) >> 5; /* this is correct and represents the PUN */
1552 enabled = (pos[2] &0x01);
1553 if (!enabled) {
1554 printk(KERN_WARNING "IBM MCA SCSI: WARNING - Your SCSI-subsystem is disabled!\n");
1555 printk(KERN_WARNING " SCSI-operations may not work.\n");
1556 }
1557
1562 /* pos2 = pos3 = 0xff if there is no integrated SCSI-subsystem present, but 1558 /* pos2 = pos3 = 0xff if there is no integrated SCSI-subsystem present, but
1563 * if we ignore the settings of all surrounding pos registers, it is not 1559 * if we ignore the settings of all surrounding pos registers, it is not
1564 * completely sufficient to only check pos2 and pos3. */ 1560 * completely sufficient to only check pos2 and pos3. */
@@ -1566,232 +1562,137 @@ int ibmmca_detect(struct scsi_host_template * scsi_template)
1566 * make sure, we see a real integrated onboard SCSI-interface and no 1562 * make sure, we see a real integrated onboard SCSI-interface and no
1567 * internal system information, which gets mapped to some pos registers 1563 * internal system information, which gets mapped to some pos registers
1568 * on models 95xx. */ 1564 * on models 95xx. */
1569 if ((!pos[0] && !pos[1] && pos[2] > 0 && pos[3] > 0 && !pos[4] && !pos[5] && !pos[6] && !pos[7]) || (pos[0] == 0xff && pos[1] == 0xff && pos[2] < 0xff && pos[3] < 0xff && pos[4] == 0xff && pos[5] == 0xff && pos[6] == 0xff && pos[7] == 0xff)) { 1565 if (mca_dev->slot == MCA_INTEGSCSI &&
1570 if ((pos[2] & 1) == 1) /* is the subsystem chip enabled ? */ 1566 ((!pos[0] && !pos[1] && pos[2] > 0 &&
1571 port = IM_IO_PORT; 1567 pos[3] > 0 && !pos[4] && !pos[5] &&
1572 else { /* if disabled, no IRQs will be generated, as the chip won't 1568 !pos[6] && !pos[7]) ||
1573 * listen to the incoming commands and will do really nothing, 1569 (pos[0] == 0xff && pos[1] == 0xff &&
1574 * except for listening to the pos-register settings. If this 1570 pos[2] < 0xff && pos[3] < 0xff &&
1575 * happens, I need to hugely think about it, as one has to 1571 pos[4] == 0xff && pos[5] == 0xff &&
1576 * write something to the MCA-Bus pos register in order to 1572 pos[6] == 0xff && pos[7] == 0xff))) {
1577 * enable the chip. Normally, IBM-SCSI won't pass the POST, 1573 irq = IM_IRQ;
1578 * when the chip is disabled (see IBM tech. ref.). */ 1574 port = IM_IO_PORT;
1579 port = IM_IO_PORT; /* anyway, set the portnumber and warn */ 1575 } else {
1580 printk("IBM MCA SCSI: WARNING - Your SCSI-subsystem is disabled!\n" " SCSI-operations may not work.\n"); 1576 irq = IM_IRQ;
1577 port = IM_IO_PORT + ((pos[2] &0x0e) << 2);
1578 if ((mca_dev->index == IBM_SCSI2_FW) && (pos[6] != 0)) {
1579 printk(KERN_ERR "IBM MCA SCSI: ERROR - Wrong POS(6)-register setting!\n");
1580 printk(KERN_ERR " Impossible to determine adapter PUN!\n");
1581 printk(KERN_ERR " Guessing adapter PUN = 7.\n");
1582 id = 7;
1583 } else {
1584 id = (pos[3] & 0xe0) >> 5; /* get subsystem PUN */
1585 if (mca_dev->index == IBM_SCSI2_FW) {
1586 id |= (pos[3] & 0x10) >> 1; /* get subsystem PUN high-bit
1587 * for F/W adapters */
1588 }
1581 } 1589 }
1582 id = (pos[3] & 0xe0) >> 5; /* this is correct and represents the PUN */ 1590 if ((mca_dev->index == IBM_SCSI2_FW) &&
1583 /* give detailed information on the subsystem. This helps me 1591 (pos[4] & 0x01) && (pos[6] == 0)) {
1584 * additionally during debugging and analyzing bug-reports. */ 1592 /* IRQ11 is used by SCSI-2 F/W Adapter/A */
1585 printk(KERN_INFO "IBM MCA SCSI: IBM Integrated SCSI Controller ffound, io=0x%x, scsi id=%d,\n", port, id); 1593 printk(KERN_DEBUG "IBM MCA SCSI: SCSI-2 F/W adapter needs IRQ 11.\n");
1586 printk(KERN_INFO " chip rev.=%d, 8K NVRAM=%s, subsystem=%s\n", ((pos[2] & 0xf0) >> 4), (pos[2] & 2) ? "locked" : "accessible", (pos[2] & 1) ? "enabled." : "disabled."); 1594 irq = IM_IRQ_FW;
1587
1588 /* register the found integrated SCSI-subsystem */
1589 if ((shpnt = ibmmca_register(scsi_template, port, id, INTEGRATED_SCSI, "IBM Integrated SCSI Controller")))
1590 {
1591 for (k = 2; k < 7; k++)
1592 ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = pos[k];
1593 ((struct ibmmca_hostdata *) shpnt->hostdata)->_special = INTEGRATED_SCSI;
1594 mca_set_adapter_name(MCA_INTEGSCSI, "IBM Integrated SCSI Controller");
1595 mca_set_adapter_procfn(MCA_INTEGSCSI, (MCA_ProcFn) ibmmca_getinfo, shpnt);
1596 mca_mark_as_used(MCA_INTEGSCSI);
1597 devices_on_irq_14++;
1598 } 1595 }
1599 } 1596 }
1600 1597
1601 /* now look for other adapters in MCA slots, */
1602 /* determine the number of known IBM-SCSI-subsystem types */
1603 /* see the pos[2] dependence to get the adapter port-offset. */
1604 for (i = 0; i < ARRAY_SIZE(subsys_list); i++) {
1605 /* scan each slot for a fitting adapter id */
1606 slot = 0; /* start at slot 0 */
1607 while ((slot = mca_find_adapter(subsys_list[i].mca_id, slot))
1608 != MCA_NOTFOUND) { /* scan through all slots */
1609 for (j = 0; j < 8; j++) /* read the pos-information */
1610 pos[j] = mca_read_stored_pos(slot, j);
1611 if ((pos[2] & 1) == 1)
1612 /* is the subsystem chip enabled ? */
1613 /* (explanations see above) */
1614 port = IM_IO_PORT + ((pos[2] & 0x0e) << 2);
1615 else {
1616 /* anyway, set the portnumber and warn */
1617 port = IM_IO_PORT + ((pos[2] & 0x0e) << 2);
1618 printk(KERN_WARNING "IBM MCA SCSI: WARNING - Your SCSI-subsystem is disabled!\n");
1619 printk(KERN_WARNING " SCSI-operations may not work.\n");
1620 }
1621 if ((i == IBM_SCSI2_FW) && (pos[6] != 0)) {
1622 printk(KERN_ERR "IBM MCA SCSI: ERROR - Wrong POS(6)-register setting!\n");
1623 printk(KERN_ERR " Impossible to determine adapter PUN!\n");
1624 printk(KERN_ERR " Guessing adapter PUN = 7.\n");
1625 id = 7;
1626 } else {
1627 id = (pos[3] & 0xe0) >> 5; /* get subsystem PUN */
1628 if (i == IBM_SCSI2_FW) {
1629 id |= (pos[3] & 0x10) >> 1; /* get subsystem PUN high-bit
1630 * for F/W adapters */
1631 }
1632 }
1633 if ((i == IBM_SCSI2_FW) && (pos[4] & 0x01) && (pos[6] == 0)) {
1634 /* IRQ11 is used by SCSI-2 F/W Adapter/A */
1635 printk(KERN_DEBUG "IBM MCA SCSI: SCSI-2 F/W adapter needs IRQ 11.\n");
1636 /* get interrupt request level */
1637 if (request_irq(IM_IRQ_FW, interrupt_handler, IRQF_SHARED, "ibmmcascsi", hosts)) {
1638 printk(KERN_ERR "IBM MCA SCSI: Unable to get shared IRQ %d.\n", IM_IRQ_FW);
1639 } else
1640 IRQ11_registered++;
1641 }
1642 printk(KERN_INFO "IBM MCA SCSI: %s found in slot %d, io=0x%x, scsi id=%d,\n", subsys_list[i].description, slot + 1, port, id);
1643 if ((pos[2] & 0xf0) == 0xf0)
1644 printk(KERN_DEBUG" ROM Addr.=off,");
1645 else
1646 printk(KERN_DEBUG " ROM Addr.=0x%x,", ((pos[2] & 0xf0) << 13) + 0xc0000);
1647 printk(KERN_DEBUG " port-offset=0x%x, subsystem=%s\n", ((pos[2] & 0x0e) << 2), (pos[2] & 1) ? "enabled." : "disabled.");
1648
1649 /* register the hostadapter */
1650 if ((shpnt = ibmmca_register(scsi_template, port, id, i, subsys_list[i].description))) {
1651 for (k = 2; k < 8; k++)
1652 ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = pos[k];
1653 ((struct ibmmca_hostdata *) shpnt->hostdata)->_special = i;
1654 mca_set_adapter_name(slot, subsys_list[i].description);
1655 mca_set_adapter_procfn(slot, (MCA_ProcFn) ibmmca_getinfo, shpnt);
1656 mca_mark_as_used(slot);
1657 if ((i == IBM_SCSI2_FW) && (pos[4] & 0x01) && (pos[6] == 0))
1658 devices_on_irq_11++;
1659 else
1660 devices_on_irq_14++;
1661 }
1662 slot++; /* advance to next slot */
1663 } /* advance to next adapter id in the list of IBM-SCSI-subsystems */
1664 }
1665 1598
1666 /* now check for SCSI-adapters, mapped to the integrated SCSI
1667 * area. E.g. a W/Cache in MCA-slot 9(!). Do the check correct here,
1668 * as this is a known effect on some models 95xx. */
1669 for (i = 0; i < ARRAY_SIZE(subsys_list); i++) {
1670 /* scan each slot for a fitting adapter id */
1671 slot = mca_find_adapter(subsys_list[i].mca_id, MCA_INTEGSCSI);
1672 if (slot != MCA_NOTFOUND) { /* scan through all slots */
1673 for (j = 0; j < 8; j++) /* read the pos-information */
1674 pos[j] = mca_read_stored_pos(slot, j);
1675 if ((pos[2] & 1) == 1) { /* is the subsystem chip enabled ? */
1676 /* (explanations see above) */
1677 port = IM_IO_PORT + ((pos[2] & 0x0e) << 2);
1678 } else { /* anyway, set the portnumber and warn */
1679 port = IM_IO_PORT + ((pos[2] & 0x0e) << 2);
1680 printk(KERN_WARNING "IBM MCA SCSI: WARNING - Your SCSI-subsystem is disabled!\n");
1681 printk(KERN_WARNING " SCSI-operations may not work.\n");
1682 }
1683 if ((i == IBM_SCSI2_FW) && (pos[6] != 0)) {
1684 printk(KERN_ERR "IBM MCA SCSI: ERROR - Wrong POS(6)-register setting!\n");
1685 printk(KERN_ERR " Impossible to determine adapter PUN!\n");
1686 printk(KERN_ERR " Guessing adapter PUN = 7.\n");
1687 id = 7;
1688 } else {
1689 id = (pos[3] & 0xe0) >> 5; /* get subsystem PUN */
1690 if (i == IBM_SCSI2_FW)
1691 id |= (pos[3] & 0x10) >> 1; /* get subsystem PUN high-bit
1692 * for F/W adapters */
1693 }
1694 if ((i == IBM_SCSI2_FW) && (pos[4] & 0x01) && (pos[6] == 0)) {
1695 /* IRQ11 is used by SCSI-2 F/W Adapter/A */
1696 printk(KERN_DEBUG "IBM MCA SCSI: SCSI-2 F/W adapter needs IRQ 11.\n");
1697 /* get interrupt request level */
1698 if (request_irq(IM_IRQ_FW, interrupt_handler, IRQF_SHARED, "ibmmcascsi", hosts))
1699 printk(KERN_ERR "IBM MCA SCSI: Unable to get shared IRQ %d.\n", IM_IRQ_FW);
1700 else
1701 IRQ11_registered++;
1702 }
1703 printk(KERN_INFO "IBM MCA SCSI: %s found in slot %d, io=0x%x, scsi id=%d,\n", subsys_list[i].description, slot + 1, port, id);
1704 if ((pos[2] & 0xf0) == 0xf0)
1705 printk(KERN_DEBUG " ROM Addr.=off,");
1706 else
1707 printk(KERN_DEBUG " ROM Addr.=0x%x,", ((pos[2] & 0xf0) << 13) + 0xc0000);
1708 printk(KERN_DEBUG " port-offset=0x%x, subsystem=%s\n", ((pos[2] & 0x0e) << 2), (pos[2] & 1) ? "enabled." : "disabled.");
1709
1710 /* register the hostadapter */
1711 if ((shpnt = ibmmca_register(scsi_template, port, id, i, subsys_list[i].description))) {
1712 for (k = 2; k < 7; k++)
1713 ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = pos[k];
1714 ((struct ibmmca_hostdata *) shpnt->hostdata)->_special = i;
1715 mca_set_adapter_name(slot, subsys_list[i].description);
1716 mca_set_adapter_procfn(slot, (MCA_ProcFn) ibmmca_getinfo, shpnt);
1717 mca_mark_as_used(slot);
1718 if ((i == IBM_SCSI2_FW) && (pos[4] & 0x01) && (pos[6] == 0))
1719 devices_on_irq_11++;
1720 else
1721 devices_on_irq_14++;
1722 }
1723 slot++; /* advance to next slot */
1724 } /* advance to next adapter id in the list of IBM-SCSI-subsystems */
1725 }
1726 if (IRQ11_registered && !devices_on_irq_11)
1727 free_irq(IM_IRQ_FW, hosts); /* no devices on IRQ 11 */
1728 if (IRQ14_registered && !devices_on_irq_14)
1729 free_irq(IM_IRQ, hosts); /* no devices on IRQ 14 */
1730 if (!devices_on_irq_11 && !devices_on_irq_14)
1731 printk(KERN_WARNING "IBM MCA SCSI: No IBM SCSI-subsystem adapter attached.\n");
1732 return found; /* return the number of found SCSI hosts. Should be 1 or 0. */
1733}
1734 1599
1735static struct Scsi_Host *ibmmca_register(struct scsi_host_template * scsi_template, int port, int id, int adaptertype, char *hostname) 1600 /* give detailed information on the subsystem. This helps me
1736{ 1601 * additionally during debugging and analyzing bug-reports. */
1737 struct Scsi_Host *shpnt; 1602 printk(KERN_INFO "IBM MCA SCSI: %s found, io=0x%x, scsi id=%d,\n",
1738 int i, j; 1603 description, port, id);
1739 unsigned int ctrl; 1604 if (mca_dev->slot == MCA_INTEGSCSI)
1605 printk(KERN_INFO " chip rev.=%d, 8K NVRAM=%s, subsystem=%s\n", ((pos[2] & 0xf0) >> 4), (pos[2] & 2) ? "locked" : "accessible", (pos[2] & 1) ? "enabled." : "disabled.");
1606 else {
1607 if ((pos[2] & 0xf0) == 0xf0)
1608 printk(KERN_DEBUG " ROM Addr.=off,");
1609 else
1610 printk(KERN_DEBUG " ROM Addr.=0x%x,", ((pos[2] & 0xf0) << 13) + 0xc0000);
1611
1612 printk(KERN_DEBUG " port-offset=0x%x, subsystem=%s\n", ((pos[2] & 0x0e) << 2), (pos[2] & 1) ? "enabled." : "disabled.");
1613 }
1740 1614
1741 /* check I/O region */ 1615 /* check I/O region */
1742 if (!request_region(port, IM_N_IO_PORT, hostname)) { 1616 if (!request_region(port, IM_N_IO_PORT, description)) {
1743 printk(KERN_ERR "IBM MCA SCSI: Unable to get I/O region 0x%x-0x%x (%d ports).\n", port, port + IM_N_IO_PORT - 1, IM_N_IO_PORT); 1617 printk(KERN_ERR "IBM MCA SCSI: Unable to get I/O region 0x%x-0x%x (%d ports).\n", port, port + IM_N_IO_PORT - 1, IM_N_IO_PORT);
1744 return NULL; 1618 goto out_fail;
1745 } 1619 }
1746 1620
1747 /* register host */ 1621 /* register host */
1748 shpnt = scsi_register(scsi_template, sizeof(struct ibmmca_hostdata)); 1622 shpnt = scsi_host_alloc(&ibmmca_driver_template,
1623 sizeof(struct ibmmca_hostdata));
1749 if (!shpnt) { 1624 if (!shpnt) {
1750 printk(KERN_ERR "IBM MCA SCSI: Unable to register host.\n"); 1625 printk(KERN_ERR "IBM MCA SCSI: Unable to register host.\n");
1751 release_region(port, IM_N_IO_PORT); 1626 goto out_release;
1752 return NULL; 1627 }
1628
1629 dev_set_drvdata(dev, shpnt);
1630 if(request_irq(irq, interrupt_handler, IRQF_SHARED, description, dev)) {
1631 printk(KERN_ERR "IBM MCA SCSI: failed to request interrupt %d\n", irq);
1632 goto out_free_host;
1753 } 1633 }
1754 1634
1755 /* request I/O region */ 1635 /* request I/O region */
1756 hosts[found] = shpnt; /* add new found hostadapter to the list */ 1636 special(shpnt) = mca_dev->index; /* important assignment or else crash! */
1757 special(found) = adaptertype; /* important assignment or else crash! */ 1637 subsystem_connector_size(shpnt) = 0; /* preset slot-size */
1758 subsystem_connector_size(found) = 0; /* preset slot-size */ 1638 shpnt->irq = irq; /* assign necessary stuff for the adapter */
1759 shpnt->irq = IM_IRQ; /* assign necessary stuff for the adapter */
1760 shpnt->io_port = port; 1639 shpnt->io_port = port;
1761 shpnt->n_io_port = IM_N_IO_PORT; 1640 shpnt->n_io_port = IM_N_IO_PORT;
1762 shpnt->this_id = id; 1641 shpnt->this_id = id;
1763 shpnt->max_id = 8; /* 8 PUNs are default */ 1642 shpnt->max_id = 8; /* 8 PUNs are default */
1764 /* now, the SCSI-subsystem is connected to Linux */ 1643 /* now, the SCSI-subsystem is connected to Linux */
1765 1644
1766 ctrl = (unsigned int) (inb(IM_CTR_REG(found))); /* get control-register status */
1767#ifdef IM_DEBUG_PROBE 1645#ifdef IM_DEBUG_PROBE
1646 ctrl = (unsigned int) (inb(IM_CTR_REG(found))); /* get control-register status */
1768 printk("IBM MCA SCSI: Control Register contents: %x, status: %x\n", ctrl, inb(IM_STAT_REG(found))); 1647 printk("IBM MCA SCSI: Control Register contents: %x, status: %x\n", ctrl, inb(IM_STAT_REG(found)));
1769 printk("IBM MCA SCSI: This adapters' POS-registers: "); 1648 printk("IBM MCA SCSI: This adapters' POS-registers: ");
1770 for (i = 0; i < 8; i++) 1649 for (i = 0; i < 8; i++)
1771 printk("%x ", pos[i]); 1650 printk("%x ", pos[i]);
1772 printk("\n"); 1651 printk("\n");
1773#endif 1652#endif
1774 reset_status(found) = IM_RESET_NOT_IN_PROGRESS; 1653 reset_status(shpnt) = IM_RESET_NOT_IN_PROGRESS;
1775 1654
1776 for (i = 0; i < 16; i++) /* reset the tables */ 1655 for (i = 0; i < 16; i++) /* reset the tables */
1777 for (j = 0; j < 8; j++) 1656 for (j = 0; j < 8; j++)
1778 get_ldn(found)[i][j] = MAX_LOG_DEV; 1657 get_ldn(shpnt)[i][j] = MAX_LOG_DEV;
1779 1658
1780 /* check which logical devices exist */ 1659 /* check which logical devices exist */
1781 /* after this line, local interrupting is possible: */ 1660 /* after this line, local interrupting is possible: */
1782 local_checking_phase_flag(found) = 1; 1661 local_checking_phase_flag(shpnt) = 1;
1783 check_devices(found, adaptertype); /* call by value, using the global variable hosts */ 1662 check_devices(shpnt, mca_dev->index); /* call by value, using the global variable hosts */
1784 local_checking_phase_flag(found) = 0; 1663 local_checking_phase_flag(shpnt) = 0;
1785 found++; /* now increase index to be prepared for next found subsystem */ 1664
1786 /* an ibm mca subsystem has been detected */ 1665 /* an ibm mca subsystem has been detected */
1787 return shpnt; 1666
1667 for (k = 2; k < 7; k++)
1668 ((struct ibmmca_hostdata *) shpnt->hostdata)->_pos[k] = pos[k];
1669 ((struct ibmmca_hostdata *) shpnt->hostdata)->_special = INTEGRATED_SCSI;
1670 mca_device_set_name(mca_dev, description);
1671 /* FIXME: NEED TO REPLUMB TO SYSFS
1672 mca_set_adapter_procfn(MCA_INTEGSCSI, (MCA_ProcFn) ibmmca_getinfo, shpnt);
1673 */
1674 mca_device_set_claim(mca_dev, 1);
1675 if (scsi_add_host(shpnt, dev)) {
1676 dev_printk(KERN_ERR, dev, "IBM MCA SCSI: scsi_add_host failed\n");
1677 goto out_free_host;
1678 }
1679 scsi_scan_host(shpnt);
1680
1681 return 0;
1682 out_free_host:
1683 scsi_host_put(shpnt);
1684 out_release:
1685 release_region(port, IM_N_IO_PORT);
1686 out_fail:
1687 return ret;
1788} 1688}
1789 1689
1790static int ibmmca_release(struct Scsi_Host *shpnt) 1690static int __devexit ibmmca_remove(struct device *dev)
1791{ 1691{
1692 struct Scsi_Host *shpnt = dev_get_drvdata(dev);
1693 scsi_remove_host(shpnt);
1792 release_region(shpnt->io_port, shpnt->n_io_port); 1694 release_region(shpnt->io_port, shpnt->n_io_port);
1793 if (!(--found)) 1695 free_irq(shpnt->irq, dev);
1794 free_irq(shpnt->irq, hosts);
1795 return 0; 1696 return 0;
1796} 1697}
1797 1698
@@ -1805,33 +1706,24 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
1805 int current_ldn; 1706 int current_ldn;
1806 int id, lun; 1707 int id, lun;
1807 int target; 1708 int target;
1808 int host_index;
1809 int max_pun; 1709 int max_pun;
1810 int i; 1710 int i;
1811 struct scatterlist *sl; 1711 struct scatterlist *sg;
1812 1712
1813 shpnt = cmd->device->host; 1713 shpnt = cmd->device->host;
1814 /* search for the right hostadapter */
1815 for (host_index = 0; hosts[host_index] && hosts[host_index]->host_no != shpnt->host_no; host_index++);
1816 1714
1817 if (!hosts[host_index]) { /* invalid hostadapter descriptor address */ 1715 max_pun = subsystem_maxid(shpnt);
1818 cmd->result = DID_NO_CONNECT << 16;
1819 if (done)
1820 done(cmd);
1821 return 0;
1822 }
1823 max_pun = subsystem_maxid(host_index);
1824 if (ibm_ansi_order) { 1716 if (ibm_ansi_order) {
1825 target = max_pun - 1 - cmd->device->id; 1717 target = max_pun - 1 - cmd->device->id;
1826 if ((target <= subsystem_pun(host_index)) && (cmd->device->id <= subsystem_pun(host_index))) 1718 if ((target <= subsystem_pun(shpnt)) && (cmd->device->id <= subsystem_pun(shpnt)))
1827 target--; 1719 target--;
1828 else if ((target >= subsystem_pun(host_index)) && (cmd->device->id >= subsystem_pun(host_index))) 1720 else if ((target >= subsystem_pun(shpnt)) && (cmd->device->id >= subsystem_pun(shpnt)))
1829 target++; 1721 target++;
1830 } else 1722 } else
1831 target = cmd->device->id; 1723 target = cmd->device->id;
1832 1724
1833 /* if (target,lun) is NO LUN or not existing at all, return error */ 1725 /* if (target,lun) is NO LUN or not existing at all, return error */
1834 if ((get_scsi(host_index)[target][cmd->device->lun] == TYPE_NO_LUN) || (get_scsi(host_index)[target][cmd->device->lun] == TYPE_NO_DEVICE)) { 1726 if ((get_scsi(shpnt)[target][cmd->device->lun] == TYPE_NO_LUN) || (get_scsi(shpnt)[target][cmd->device->lun] == TYPE_NO_DEVICE)) {
1835 cmd->result = DID_NO_CONNECT << 16; 1727 cmd->result = DID_NO_CONNECT << 16;
1836 if (done) 1728 if (done)
1837 done(cmd); 1729 done(cmd);
@@ -1839,16 +1731,16 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
1839 } 1731 }
1840 1732
1841 /*if (target,lun) unassigned, do further checks... */ 1733 /*if (target,lun) unassigned, do further checks... */
1842 ldn = get_ldn(host_index)[target][cmd->device->lun]; 1734 ldn = get_ldn(shpnt)[target][cmd->device->lun];
1843 if (ldn >= MAX_LOG_DEV) { /* on invalid ldn do special stuff */ 1735 if (ldn >= MAX_LOG_DEV) { /* on invalid ldn do special stuff */
1844 if (ldn > MAX_LOG_DEV) { /* dynamical remapping if ldn unassigned */ 1736 if (ldn > MAX_LOG_DEV) { /* dynamical remapping if ldn unassigned */
1845 current_ldn = next_ldn(host_index); /* stop-value for one circle */ 1737 current_ldn = next_ldn(shpnt); /* stop-value for one circle */
1846 while (ld(host_index)[next_ldn(host_index)].cmd) { /* search for a occupied, but not in */ 1738 while (ld(shpnt)[next_ldn(shpnt)].cmd) { /* search for a occupied, but not in */
1847 /* command-processing ldn. */ 1739 /* command-processing ldn. */
1848 next_ldn(host_index)++; 1740 next_ldn(shpnt)++;
1849 if (next_ldn(host_index) >= MAX_LOG_DEV) 1741 if (next_ldn(shpnt) >= MAX_LOG_DEV)
1850 next_ldn(host_index) = 7; 1742 next_ldn(shpnt) = 7;
1851 if (current_ldn == next_ldn(host_index)) { /* One circle done ? */ 1743 if (current_ldn == next_ldn(shpnt)) { /* One circle done ? */
1852 /* no non-processing ldn found */ 1744 /* no non-processing ldn found */
1853 scmd_printk(KERN_WARNING, cmd, 1745 scmd_printk(KERN_WARNING, cmd,
1854 "IBM MCA SCSI: Cannot assign SCSI-device dynamically!\n" 1746 "IBM MCA SCSI: Cannot assign SCSI-device dynamically!\n"
@@ -1864,56 +1756,56 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
1864 /* unmap non-processing ldn */ 1756 /* unmap non-processing ldn */
1865 for (id = 0; id < max_pun; id++) 1757 for (id = 0; id < max_pun; id++)
1866 for (lun = 0; lun < 8; lun++) { 1758 for (lun = 0; lun < 8; lun++) {
1867 if (get_ldn(host_index)[id][lun] == next_ldn(host_index)) { 1759 if (get_ldn(shpnt)[id][lun] == next_ldn(shpnt)) {
1868 get_ldn(host_index)[id][lun] = TYPE_NO_DEVICE; 1760 get_ldn(shpnt)[id][lun] = TYPE_NO_DEVICE;
1869 get_scsi(host_index)[id][lun] = TYPE_NO_DEVICE; 1761 get_scsi(shpnt)[id][lun] = TYPE_NO_DEVICE;
1870 /* unmap entry */ 1762 /* unmap entry */
1871 } 1763 }
1872 } 1764 }
1873 /* set reduced interrupt_handler-mode for checking */ 1765 /* set reduced interrupt_handler-mode for checking */
1874 local_checking_phase_flag(host_index) = 1; 1766 local_checking_phase_flag(shpnt) = 1;
1875 /* map found ldn to pun,lun */ 1767 /* map found ldn to pun,lun */
1876 get_ldn(host_index)[target][cmd->device->lun] = next_ldn(host_index); 1768 get_ldn(shpnt)[target][cmd->device->lun] = next_ldn(shpnt);
1877 /* change ldn to the right value, that is now next_ldn */ 1769 /* change ldn to the right value, that is now next_ldn */
1878 ldn = next_ldn(host_index); 1770 ldn = next_ldn(shpnt);
1879 /* unassign all ldns (pun,lun,ldn does not matter for remove) */ 1771 /* unassign all ldns (pun,lun,ldn does not matter for remove) */
1880 immediate_assign(host_index, 0, 0, 0, REMOVE_LDN); 1772 immediate_assign(shpnt, 0, 0, 0, REMOVE_LDN);
1881 /* set only LDN for remapped device */ 1773 /* set only LDN for remapped device */
1882 immediate_assign(host_index, target, cmd->device->lun, ldn, SET_LDN); 1774 immediate_assign(shpnt, target, cmd->device->lun, ldn, SET_LDN);
1883 /* get device information for ld[ldn] */ 1775 /* get device information for ld[ldn] */
1884 if (device_exists(host_index, ldn, &ld(host_index)[ldn].block_length, &ld(host_index)[ldn].device_type)) { 1776 if (device_exists(shpnt, ldn, &ld(shpnt)[ldn].block_length, &ld(shpnt)[ldn].device_type)) {
1885 ld(host_index)[ldn].cmd = NULL; /* To prevent panic set 0, because 1777 ld(shpnt)[ldn].cmd = NULL; /* To prevent panic set 0, because
1886 devices that were not assigned, 1778 devices that were not assigned,
1887 should have nothing in progress. */ 1779 should have nothing in progress. */
1888 get_scsi(host_index)[target][cmd->device->lun] = ld(host_index)[ldn].device_type; 1780 get_scsi(shpnt)[target][cmd->device->lun] = ld(shpnt)[ldn].device_type;
1889 /* increase assignment counters for statistics in /proc */ 1781 /* increase assignment counters for statistics in /proc */
1890 IBM_DS(host_index).dynamical_assignments++; 1782 IBM_DS(shpnt).dynamical_assignments++;
1891 IBM_DS(host_index).ldn_assignments[ldn]++; 1783 IBM_DS(shpnt).ldn_assignments[ldn]++;
1892 } else 1784 } else
1893 /* panic here, because a device, found at boottime has 1785 /* panic here, because a device, found at boottime has
1894 vanished */ 1786 vanished */
1895 panic("IBM MCA SCSI: ldn=0x%x, SCSI-device on (%d,%d) vanished!\n", ldn, target, cmd->device->lun); 1787 panic("IBM MCA SCSI: ldn=0x%x, SCSI-device on (%d,%d) vanished!\n", ldn, target, cmd->device->lun);
1896 /* unassign again all ldns (pun,lun,ldn does not matter for remove) */ 1788 /* unassign again all ldns (pun,lun,ldn does not matter for remove) */
1897 immediate_assign(host_index, 0, 0, 0, REMOVE_LDN); 1789 immediate_assign(shpnt, 0, 0, 0, REMOVE_LDN);
1898 /* remap all ldns, as written in the pun/lun table */ 1790 /* remap all ldns, as written in the pun/lun table */
1899 lun = 0; 1791 lun = 0;
1900#ifdef CONFIG_SCSI_MULTI_LUN 1792#ifdef CONFIG_SCSI_MULTI_LUN
1901 for (lun = 0; lun < 8; lun++) 1793 for (lun = 0; lun < 8; lun++)
1902#endif 1794#endif
1903 for (id = 0; id < max_pun; id++) { 1795 for (id = 0; id < max_pun; id++) {
1904 if (get_ldn(host_index)[id][lun] <= MAX_LOG_DEV) 1796 if (get_ldn(shpnt)[id][lun] <= MAX_LOG_DEV)
1905 immediate_assign(host_index, id, lun, get_ldn(host_index)[id][lun], SET_LDN); 1797 immediate_assign(shpnt, id, lun, get_ldn(shpnt)[id][lun], SET_LDN);
1906 } 1798 }
1907 /* set back to normal interrupt_handling */ 1799 /* set back to normal interrupt_handling */
1908 local_checking_phase_flag(host_index) = 0; 1800 local_checking_phase_flag(shpnt) = 0;
1909#ifdef IM_DEBUG_PROBE 1801#ifdef IM_DEBUG_PROBE
1910 /* Information on syslog terminal */ 1802 /* Information on syslog terminal */
1911 printk("IBM MCA SCSI: ldn=0x%x dynamically reassigned to (%d,%d).\n", ldn, target, cmd->device->lun); 1803 printk("IBM MCA SCSI: ldn=0x%x dynamically reassigned to (%d,%d).\n", ldn, target, cmd->device->lun);
1912#endif 1804#endif
1913 /* increase next_ldn for next dynamical assignment */ 1805 /* increase next_ldn for next dynamical assignment */
1914 next_ldn(host_index)++; 1806 next_ldn(shpnt)++;
1915 if (next_ldn(host_index) >= MAX_LOG_DEV) 1807 if (next_ldn(shpnt) >= MAX_LOG_DEV)
1916 next_ldn(host_index) = 7; 1808 next_ldn(shpnt) = 7;
1917 } else { /* wall against Linux accesses to the subsystem adapter */ 1809 } else { /* wall against Linux accesses to the subsystem adapter */
1918 cmd->result = DID_BAD_TARGET << 16; 1810 cmd->result = DID_BAD_TARGET << 16;
1919 if (done) 1811 if (done)
@@ -1923,34 +1815,32 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
1923 } 1815 }
1924 1816
1925 /*verify there is no command already in progress for this log dev */ 1817 /*verify there is no command already in progress for this log dev */
1926 if (ld(host_index)[ldn].cmd) 1818 if (ld(shpnt)[ldn].cmd)
1927 panic("IBM MCA SCSI: cmd already in progress for this ldn.\n"); 1819 panic("IBM MCA SCSI: cmd already in progress for this ldn.\n");
1928 1820
1929 /*save done in cmd, and save cmd for the interrupt handler */ 1821 /*save done in cmd, and save cmd for the interrupt handler */
1930 cmd->scsi_done = done; 1822 cmd->scsi_done = done;
1931 ld(host_index)[ldn].cmd = cmd; 1823 ld(shpnt)[ldn].cmd = cmd;
1932 1824
1933 /*fill scb information independent of the scsi command */ 1825 /*fill scb information independent of the scsi command */
1934 scb = &(ld(host_index)[ldn].scb); 1826 scb = &(ld(shpnt)[ldn].scb);
1935 ld(host_index)[ldn].tsb.dev_status = 0; 1827 ld(shpnt)[ldn].tsb.dev_status = 0;
1936 scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_RETRY_ENABLE; 1828 scb->enable = IM_REPORT_TSB_ONLY_ON_ERROR | IM_RETRY_ENABLE;
1937 scb->tsb_adr = isa_virt_to_bus(&(ld(host_index)[ldn].tsb)); 1829 scb->tsb_adr = isa_virt_to_bus(&(ld(shpnt)[ldn].tsb));
1938 scsi_cmd = cmd->cmnd[0]; 1830 scsi_cmd = cmd->cmnd[0];
1939 1831
1940 if (cmd->use_sg) { 1832 if (scsi_sg_count(cmd)) {
1941 i = cmd->use_sg; 1833 BUG_ON(scsi_sg_count(cmd) > 16);
1942 sl = (struct scatterlist *) (cmd->request_buffer); 1834
1943 if (i > 16) 1835 scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) {
1944 panic("IBM MCA SCSI: scatter-gather list too long.\n"); 1836 ld(shpnt)[ldn].sge[i].address = (void *) (isa_page_to_bus(sg->page) + sg->offset);
1945 while (--i >= 0) { 1837 ld(shpnt)[ldn].sge[i].byte_length = sg->length;
1946 ld(host_index)[ldn].sge[i].address = (void *) (isa_page_to_bus(sl[i].page) + sl[i].offset);
1947 ld(host_index)[ldn].sge[i].byte_length = sl[i].length;
1948 } 1838 }
1949 scb->enable |= IM_POINTER_TO_LIST; 1839 scb->enable |= IM_POINTER_TO_LIST;
1950 scb->sys_buf_adr = isa_virt_to_bus(&(ld(host_index)[ldn].sge[0])); 1840 scb->sys_buf_adr = isa_virt_to_bus(&(ld(shpnt)[ldn].sge[0]));
1951 scb->sys_buf_length = cmd->use_sg * sizeof(struct im_sge); 1841 scb->sys_buf_length = scsi_sg_count(cmd) * sizeof(struct im_sge);
1952 } else { 1842 } else {
1953 scb->sys_buf_adr = isa_virt_to_bus(cmd->request_buffer); 1843 scb->sys_buf_adr = isa_virt_to_bus(scsi_sglist(cmd));
1954 /* recent Linux midlevel SCSI places 1024 byte for inquiry 1844 /* recent Linux midlevel SCSI places 1024 byte for inquiry
1955 * command. Far too much for old PS/2 hardware. */ 1845 * command. Far too much for old PS/2 hardware. */
1956 switch (scsi_cmd) { 1846 switch (scsi_cmd) {
@@ -1961,16 +1851,16 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
1961 case REQUEST_SENSE: 1851 case REQUEST_SENSE:
1962 case MODE_SENSE: 1852 case MODE_SENSE:
1963 case MODE_SELECT: 1853 case MODE_SELECT:
1964 if (cmd->request_bufflen > 255) 1854 if (scsi_bufflen(cmd) > 255)
1965 scb->sys_buf_length = 255; 1855 scb->sys_buf_length = 255;
1966 else 1856 else
1967 scb->sys_buf_length = cmd->request_bufflen; 1857 scb->sys_buf_length = scsi_bufflen(cmd);
1968 break; 1858 break;
1969 case TEST_UNIT_READY: 1859 case TEST_UNIT_READY:
1970 scb->sys_buf_length = 0; 1860 scb->sys_buf_length = 0;
1971 break; 1861 break;
1972 default: 1862 default:
1973 scb->sys_buf_length = cmd->request_bufflen; 1863 scb->sys_buf_length = scsi_bufflen(cmd);
1974 break; 1864 break;
1975 } 1865 }
1976 } 1866 }
@@ -1982,16 +1872,16 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
1982 1872
1983 /* for specific device-type debugging: */ 1873 /* for specific device-type debugging: */
1984#ifdef IM_DEBUG_CMD_SPEC_DEV 1874#ifdef IM_DEBUG_CMD_SPEC_DEV
1985 if (ld(host_index)[ldn].device_type == IM_DEBUG_CMD_DEVICE) 1875 if (ld(shpnt)[ldn].device_type == IM_DEBUG_CMD_DEVICE)
1986 printk("(SCSI-device-type=0x%x) issue scsi cmd=%02x to ldn=%d\n", ld(host_index)[ldn].device_type, scsi_cmd, ldn); 1876 printk("(SCSI-device-type=0x%x) issue scsi cmd=%02x to ldn=%d\n", ld(shpnt)[ldn].device_type, scsi_cmd, ldn);
1987#endif 1877#endif
1988 1878
1989 /* for possible panics store current command */ 1879 /* for possible panics store current command */
1990 last_scsi_command(host_index)[ldn] = scsi_cmd; 1880 last_scsi_command(shpnt)[ldn] = scsi_cmd;
1991 last_scsi_type(host_index)[ldn] = IM_SCB; 1881 last_scsi_type(shpnt)[ldn] = IM_SCB;
1992 /* update statistical info */ 1882 /* update statistical info */
1993 IBM_DS(host_index).total_accesses++; 1883 IBM_DS(shpnt).total_accesses++;
1994 IBM_DS(host_index).ldn_access[ldn]++; 1884 IBM_DS(shpnt).ldn_access[ldn]++;
1995 1885
1996 switch (scsi_cmd) { 1886 switch (scsi_cmd) {
1997 case READ_6: 1887 case READ_6:
@@ -2003,17 +1893,17 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
2003 /* Distinguish between disk and other devices. Only disks (that are the 1893 /* Distinguish between disk and other devices. Only disks (that are the
2004 most frequently accessed devices) should be supported by the 1894 most frequently accessed devices) should be supported by the
2005 IBM-SCSI-Subsystem commands. */ 1895 IBM-SCSI-Subsystem commands. */
2006 switch (ld(host_index)[ldn].device_type) { 1896 switch (ld(shpnt)[ldn].device_type) {
2007 case TYPE_DISK: /* for harddisks enter here ... */ 1897 case TYPE_DISK: /* for harddisks enter here ... */
2008 case TYPE_MOD: /* ... try it also for MO-drives (send flames as */ 1898 case TYPE_MOD: /* ... try it also for MO-drives (send flames as */
2009 /* you like, if this won't work.) */ 1899 /* you like, if this won't work.) */
2010 if (scsi_cmd == READ_6 || scsi_cmd == READ_10 || scsi_cmd == READ_12) { 1900 if (scsi_cmd == READ_6 || scsi_cmd == READ_10 || scsi_cmd == READ_12) {
2011 /* read command preparations */ 1901 /* read command preparations */
2012 scb->enable |= IM_READ_CONTROL; 1902 scb->enable |= IM_READ_CONTROL;
2013 IBM_DS(host_index).ldn_read_access[ldn]++; /* increase READ-access on ldn stat. */ 1903 IBM_DS(shpnt).ldn_read_access[ldn]++; /* increase READ-access on ldn stat. */
2014 scb->command = IM_READ_DATA_CMD | IM_NO_DISCONNECT; 1904 scb->command = IM_READ_DATA_CMD | IM_NO_DISCONNECT;
2015 } else { /* write command preparations */ 1905 } else { /* write command preparations */
2016 IBM_DS(host_index).ldn_write_access[ldn]++; /* increase write-count on ldn stat. */ 1906 IBM_DS(shpnt).ldn_write_access[ldn]++; /* increase write-count on ldn stat. */
2017 scb->command = IM_WRITE_DATA_CMD | IM_NO_DISCONNECT; 1907 scb->command = IM_WRITE_DATA_CMD | IM_NO_DISCONNECT;
2018 } 1908 }
2019 if (scsi_cmd == READ_6 || scsi_cmd == WRITE_6) { 1909 if (scsi_cmd == READ_6 || scsi_cmd == WRITE_6) {
@@ -2023,9 +1913,9 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
2023 scb->u1.log_blk_adr = (((unsigned) cmd->cmnd[5]) << 0) | (((unsigned) cmd->cmnd[4]) << 8) | (((unsigned) cmd->cmnd[3]) << 16) | (((unsigned) cmd->cmnd[2]) << 24); 1913 scb->u1.log_blk_adr = (((unsigned) cmd->cmnd[5]) << 0) | (((unsigned) cmd->cmnd[4]) << 8) | (((unsigned) cmd->cmnd[3]) << 16) | (((unsigned) cmd->cmnd[2]) << 24);
2024 scb->u2.blk.count = (((unsigned) cmd->cmnd[8]) << 0) | (((unsigned) cmd->cmnd[7]) << 8); 1914 scb->u2.blk.count = (((unsigned) cmd->cmnd[8]) << 0) | (((unsigned) cmd->cmnd[7]) << 8);
2025 } 1915 }
2026 last_scsi_logical_block(host_index)[ldn] = scb->u1.log_blk_adr; 1916 last_scsi_logical_block(shpnt)[ldn] = scb->u1.log_blk_adr;
2027 last_scsi_blockcount(host_index)[ldn] = scb->u2.blk.count; 1917 last_scsi_blockcount(shpnt)[ldn] = scb->u2.blk.count;
2028 scb->u2.blk.length = ld(host_index)[ldn].block_length; 1918 scb->u2.blk.length = ld(shpnt)[ldn].block_length;
2029 break; 1919 break;
2030 /* for other devices, enter here. Other types are not known by 1920 /* for other devices, enter here. Other types are not known by
2031 Linux! TYPE_NO_LUN is forbidden as valid device. */ 1921 Linux! TYPE_NO_LUN is forbidden as valid device. */
@@ -2046,14 +1936,14 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
2046 scb->enable |= IM_BYPASS_BUFFER; 1936 scb->enable |= IM_BYPASS_BUFFER;
2047 scb->u1.scsi_cmd_length = cmd->cmd_len; 1937 scb->u1.scsi_cmd_length = cmd->cmd_len;
2048 memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len); 1938 memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len);
2049 last_scsi_type(host_index)[ldn] = IM_LONG_SCB; 1939 last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
2050 /* Read/write on this non-disk devices is also displayworthy, 1940 /* Read/write on this non-disk devices is also displayworthy,
2051 so flash-up the LED/display. */ 1941 so flash-up the LED/display. */
2052 break; 1942 break;
2053 } 1943 }
2054 break; 1944 break;
2055 case INQUIRY: 1945 case INQUIRY:
2056 IBM_DS(host_index).ldn_inquiry_access[ldn]++; 1946 IBM_DS(shpnt).ldn_inquiry_access[ldn]++;
2057 scb->command = IM_DEVICE_INQUIRY_CMD; 1947 scb->command = IM_DEVICE_INQUIRY_CMD;
2058 scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER; 1948 scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER;
2059 scb->u1.log_blk_adr = 0; 1949 scb->u1.log_blk_adr = 0;
@@ -2064,7 +1954,7 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
2064 scb->u1.log_blk_adr = 0; 1954 scb->u1.log_blk_adr = 0;
2065 scb->u1.scsi_cmd_length = 6; 1955 scb->u1.scsi_cmd_length = 6;
2066 memcpy(scb->u2.scsi_command, cmd->cmnd, 6); 1956 memcpy(scb->u2.scsi_command, cmd->cmnd, 6);
2067 last_scsi_type(host_index)[ldn] = IM_LONG_SCB; 1957 last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
2068 break; 1958 break;
2069 case READ_CAPACITY: 1959 case READ_CAPACITY:
2070 /* the length of system memory buffer must be exactly 8 bytes */ 1960 /* the length of system memory buffer must be exactly 8 bytes */
@@ -2081,12 +1971,12 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
2081 /* Commands that need write-only-mode (system -> device): */ 1971 /* Commands that need write-only-mode (system -> device): */
2082 case MODE_SELECT: 1972 case MODE_SELECT:
2083 case MODE_SELECT_10: 1973 case MODE_SELECT_10:
2084 IBM_DS(host_index).ldn_modeselect_access[ldn]++; 1974 IBM_DS(shpnt).ldn_modeselect_access[ldn]++;
2085 scb->command = IM_OTHER_SCSI_CMD_CMD; 1975 scb->command = IM_OTHER_SCSI_CMD_CMD;
2086 scb->enable |= IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER; /*Select needs WRITE-enabled */ 1976 scb->enable |= IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER; /*Select needs WRITE-enabled */
2087 scb->u1.scsi_cmd_length = cmd->cmd_len; 1977 scb->u1.scsi_cmd_length = cmd->cmd_len;
2088 memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len); 1978 memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len);
2089 last_scsi_type(host_index)[ldn] = IM_LONG_SCB; 1979 last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
2090 break; 1980 break;
2091 /* For other commands, read-only is useful. Most other commands are 1981 /* For other commands, read-only is useful. Most other commands are
2092 running without an input-data-block. */ 1982 running without an input-data-block. */
@@ -2095,19 +1985,19 @@ static int ibmmca_queuecommand(Scsi_Cmnd * cmd, void (*done) (Scsi_Cmnd *))
2095 scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER; 1985 scb->enable |= IM_READ_CONTROL | IM_SUPRESS_EXCEPTION_SHORT | IM_BYPASS_BUFFER;
2096 scb->u1.scsi_cmd_length = cmd->cmd_len; 1986 scb->u1.scsi_cmd_length = cmd->cmd_len;
2097 memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len); 1987 memcpy(scb->u2.scsi_command, cmd->cmnd, cmd->cmd_len);
2098 last_scsi_type(host_index)[ldn] = IM_LONG_SCB; 1988 last_scsi_type(shpnt)[ldn] = IM_LONG_SCB;
2099 break; 1989 break;
2100 } 1990 }
2101 /*issue scb command, and return */ 1991 /*issue scb command, and return */
2102 if (++disk_rw_in_progress == 1) 1992 if (++disk_rw_in_progress == 1)
2103 PS2_DISK_LED_ON(shpnt->host_no, target); 1993 PS2_DISK_LED_ON(shpnt->host_no, target);
2104 1994
2105 if (last_scsi_type(host_index)[ldn] == IM_LONG_SCB) { 1995 if (last_scsi_type(shpnt)[ldn] == IM_LONG_SCB) {
2106 issue_cmd(host_index, isa_virt_to_bus(scb), IM_LONG_SCB | ldn); 1996 issue_cmd(shpnt, isa_virt_to_bus(scb), IM_LONG_SCB | ldn);
2107 IBM_DS(host_index).long_scbs++; 1997 IBM_DS(shpnt).long_scbs++;
2108 } else { 1998 } else {
2109 issue_cmd(host_index, isa_virt_to_bus(scb), IM_SCB | ldn); 1999 issue_cmd(shpnt, isa_virt_to_bus(scb), IM_SCB | ldn);
2110 IBM_DS(host_index).scbs++; 2000 IBM_DS(shpnt).scbs++;
2111 } 2001 }
2112 return 0; 2002 return 0;
2113} 2003}
@@ -2122,7 +2012,6 @@ static int __ibmmca_abort(Scsi_Cmnd * cmd)
2122 unsigned int ldn; 2012 unsigned int ldn;
2123 void (*saved_done) (Scsi_Cmnd *); 2013 void (*saved_done) (Scsi_Cmnd *);
2124 int target; 2014 int target;
2125 int host_index;
2126 int max_pun; 2015 int max_pun;
2127 unsigned long imm_command; 2016 unsigned long imm_command;
2128 2017
@@ -2131,35 +2020,23 @@ static int __ibmmca_abort(Scsi_Cmnd * cmd)
2131#endif 2020#endif
2132 2021
2133 shpnt = cmd->device->host; 2022 shpnt = cmd->device->host;
2134 /* search for the right hostadapter */
2135 for (host_index = 0; hosts[host_index] && hosts[host_index]->host_no != shpnt->host_no; host_index++);
2136 2023
2137 if (!hosts[host_index]) { /* invalid hostadapter descriptor address */ 2024 max_pun = subsystem_maxid(shpnt);
2138 cmd->result = DID_NO_CONNECT << 16;
2139 if (cmd->scsi_done)
2140 (cmd->scsi_done) (cmd);
2141 shpnt = cmd->device->host;
2142#ifdef IM_DEBUG_PROBE
2143 printk(KERN_DEBUG "IBM MCA SCSI: Abort adapter selection failed!\n");
2144#endif
2145 return SUCCESS;
2146 }
2147 max_pun = subsystem_maxid(host_index);
2148 if (ibm_ansi_order) { 2025 if (ibm_ansi_order) {
2149 target = max_pun - 1 - cmd->device->id; 2026 target = max_pun - 1 - cmd->device->id;
2150 if ((target <= subsystem_pun(host_index)) && (cmd->device->id <= subsystem_pun(host_index))) 2027 if ((target <= subsystem_pun(shpnt)) && (cmd->device->id <= subsystem_pun(shpnt)))
2151 target--; 2028 target--;
2152 else if ((target >= subsystem_pun(host_index)) && (cmd->device->id >= subsystem_pun(host_index))) 2029 else if ((target >= subsystem_pun(shpnt)) && (cmd->device->id >= subsystem_pun(shpnt)))
2153 target++; 2030 target++;
2154 } else 2031 } else
2155 target = cmd->device->id; 2032 target = cmd->device->id;
2156 2033
2157 /* get logical device number, and disable system interrupts */ 2034 /* get logical device number, and disable system interrupts */
2158 printk(KERN_WARNING "IBM MCA SCSI: Sending abort to device pun=%d, lun=%d.\n", target, cmd->device->lun); 2035 printk(KERN_WARNING "IBM MCA SCSI: Sending abort to device pun=%d, lun=%d.\n", target, cmd->device->lun);
2159 ldn = get_ldn(host_index)[target][cmd->device->lun]; 2036 ldn = get_ldn(shpnt)[target][cmd->device->lun];
2160 2037
2161 /*if cmd for this ldn has already finished, no need to abort */ 2038 /*if cmd for this ldn has already finished, no need to abort */
2162 if (!ld(host_index)[ldn].cmd) { 2039 if (!ld(shpnt)[ldn].cmd) {
2163 return SUCCESS; 2040 return SUCCESS;
2164 } 2041 }
2165 2042
@@ -2170,20 +2047,20 @@ static int __ibmmca_abort(Scsi_Cmnd * cmd)
2170 saved_done = cmd->scsi_done; 2047 saved_done = cmd->scsi_done;
2171 cmd->scsi_done = internal_done; 2048 cmd->scsi_done = internal_done;
2172 cmd->SCp.Status = 0; 2049 cmd->SCp.Status = 0;
2173 last_scsi_command(host_index)[ldn] = IM_ABORT_IMM_CMD; 2050 last_scsi_command(shpnt)[ldn] = IM_ABORT_IMM_CMD;
2174 last_scsi_type(host_index)[ldn] = IM_IMM_CMD; 2051 last_scsi_type(shpnt)[ldn] = IM_IMM_CMD;
2175 imm_command = inl(IM_CMD_REG(host_index)); 2052 imm_command = inl(IM_CMD_REG(shpnt));
2176 imm_command &= (unsigned long) (0xffff0000); /* mask reserved stuff */ 2053 imm_command &= (unsigned long) (0xffff0000); /* mask reserved stuff */
2177 imm_command |= (unsigned long) (IM_ABORT_IMM_CMD); 2054 imm_command |= (unsigned long) (IM_ABORT_IMM_CMD);
2178 /* must wait for attention reg not busy */ 2055 /* must wait for attention reg not busy */
2179 /* FIXME - timeout, politeness */ 2056 /* FIXME - timeout, politeness */
2180 while (1) { 2057 while (1) {
2181 if (!(inb(IM_STAT_REG(host_index)) & IM_BUSY)) 2058 if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
2182 break; 2059 break;
2183 } 2060 }
2184 /* write registers and enable system interrupts */ 2061 /* write registers and enable system interrupts */
2185 outl(imm_command, IM_CMD_REG(host_index)); 2062 outl(imm_command, IM_CMD_REG(shpnt));
2186 outb(IM_IMM_CMD | ldn, IM_ATTN_REG(host_index)); 2063 outb(IM_IMM_CMD | ldn, IM_ATTN_REG(shpnt));
2187#ifdef IM_DEBUG_PROBE 2064#ifdef IM_DEBUG_PROBE
2188 printk("IBM MCA SCSI: Abort queued to adapter...\n"); 2065 printk("IBM MCA SCSI: Abort queued to adapter...\n");
2189#endif 2066#endif
@@ -2202,7 +2079,7 @@ static int __ibmmca_abort(Scsi_Cmnd * cmd)
2202 cmd->result |= DID_ABORT << 16; 2079 cmd->result |= DID_ABORT << 16;
2203 if (cmd->scsi_done) 2080 if (cmd->scsi_done)
2204 (cmd->scsi_done) (cmd); 2081 (cmd->scsi_done) (cmd);
2205 ld(host_index)[ldn].cmd = NULL; 2082 ld(shpnt)[ldn].cmd = NULL;
2206#ifdef IM_DEBUG_PROBE 2083#ifdef IM_DEBUG_PROBE
2207 printk("IBM MCA SCSI: Abort finished with success.\n"); 2084 printk("IBM MCA SCSI: Abort finished with success.\n");
2208#endif 2085#endif
@@ -2211,7 +2088,7 @@ static int __ibmmca_abort(Scsi_Cmnd * cmd)
2211 cmd->result |= DID_NO_CONNECT << 16; 2088 cmd->result |= DID_NO_CONNECT << 16;
2212 if (cmd->scsi_done) 2089 if (cmd->scsi_done)
2213 (cmd->scsi_done) (cmd); 2090 (cmd->scsi_done) (cmd);
2214 ld(host_index)[ldn].cmd = NULL; 2091 ld(shpnt)[ldn].cmd = NULL;
2215#ifdef IM_DEBUG_PROBE 2092#ifdef IM_DEBUG_PROBE
2216 printk("IBM MCA SCSI: Abort failed.\n"); 2093 printk("IBM MCA SCSI: Abort failed.\n");
2217#endif 2094#endif
@@ -2236,71 +2113,65 @@ static int __ibmmca_host_reset(Scsi_Cmnd * cmd)
2236 struct Scsi_Host *shpnt; 2113 struct Scsi_Host *shpnt;
2237 Scsi_Cmnd *cmd_aid; 2114 Scsi_Cmnd *cmd_aid;
2238 int ticks, i; 2115 int ticks, i;
2239 int host_index;
2240 unsigned long imm_command; 2116 unsigned long imm_command;
2241 2117
2242 BUG_ON(cmd == NULL); 2118 BUG_ON(cmd == NULL);
2243 2119
2244 ticks = IM_RESET_DELAY * HZ; 2120 ticks = IM_RESET_DELAY * HZ;
2245 shpnt = cmd->device->host; 2121 shpnt = cmd->device->host;
2246 /* search for the right hostadapter */
2247 for (host_index = 0; hosts[host_index] && hosts[host_index]->host_no != shpnt->host_no; host_index++);
2248
2249 if (!hosts[host_index]) /* invalid hostadapter descriptor address */
2250 return FAILED;
2251 2122
2252 if (local_checking_phase_flag(host_index)) { 2123 if (local_checking_phase_flag(shpnt)) {
2253 printk(KERN_WARNING "IBM MCA SCSI: unable to reset while checking devices.\n"); 2124 printk(KERN_WARNING "IBM MCA SCSI: unable to reset while checking devices.\n");
2254 return FAILED; 2125 return FAILED;
2255 } 2126 }
2256 2127
2257 /* issue reset immediate command to subsystem, and wait for interrupt */ 2128 /* issue reset immediate command to subsystem, and wait for interrupt */
2258 printk("IBM MCA SCSI: resetting all devices.\n"); 2129 printk("IBM MCA SCSI: resetting all devices.\n");
2259 reset_status(host_index) = IM_RESET_IN_PROGRESS; 2130 reset_status(shpnt) = IM_RESET_IN_PROGRESS;
2260 last_scsi_command(host_index)[0xf] = IM_RESET_IMM_CMD; 2131 last_scsi_command(shpnt)[0xf] = IM_RESET_IMM_CMD;
2261 last_scsi_type(host_index)[0xf] = IM_IMM_CMD; 2132 last_scsi_type(shpnt)[0xf] = IM_IMM_CMD;
2262 imm_command = inl(IM_CMD_REG(host_index)); 2133 imm_command = inl(IM_CMD_REG(shpnt));
2263 imm_command &= (unsigned long) (0xffff0000); /* mask reserved stuff */ 2134 imm_command &= (unsigned long) (0xffff0000); /* mask reserved stuff */
2264 imm_command |= (unsigned long) (IM_RESET_IMM_CMD); 2135 imm_command |= (unsigned long) (IM_RESET_IMM_CMD);
2265 /* must wait for attention reg not busy */ 2136 /* must wait for attention reg not busy */
2266 while (1) { 2137 while (1) {
2267 if (!(inb(IM_STAT_REG(host_index)) & IM_BUSY)) 2138 if (!(inb(IM_STAT_REG(shpnt)) & IM_BUSY))
2268 break; 2139 break;
2269 spin_unlock_irq(shpnt->host_lock); 2140 spin_unlock_irq(shpnt->host_lock);
2270 yield(); 2141 yield();
2271 spin_lock_irq(shpnt->host_lock); 2142 spin_lock_irq(shpnt->host_lock);
2272 } 2143 }
2273 /*write registers and enable system interrupts */ 2144 /*write registers and enable system interrupts */
2274 outl(imm_command, IM_CMD_REG(host_index)); 2145 outl(imm_command, IM_CMD_REG(shpnt));
2275 outb(IM_IMM_CMD | 0xf, IM_ATTN_REG(host_index)); 2146 outb(IM_IMM_CMD | 0xf, IM_ATTN_REG(shpnt));
2276 /* wait for interrupt finished or intr_stat register to be set, as the 2147 /* wait for interrupt finished or intr_stat register to be set, as the
2277 * interrupt will not be executed, while we are in here! */ 2148 * interrupt will not be executed, while we are in here! */
2278 2149
2279 /* FIXME: This is really really icky we so want a sleeping version of this ! */ 2150 /* FIXME: This is really really icky we so want a sleeping version of this ! */
2280 while (reset_status(host_index) == IM_RESET_IN_PROGRESS && --ticks && ((inb(IM_INTR_REG(host_index)) & 0x8f) != 0x8f)) { 2151 while (reset_status(shpnt) == IM_RESET_IN_PROGRESS && --ticks && ((inb(IM_INTR_REG(shpnt)) & 0x8f) != 0x8f)) {
2281 udelay((1 + 999 / HZ) * 1000); 2152 udelay((1 + 999 / HZ) * 1000);
2282 barrier(); 2153 barrier();
2283 } 2154 }
2284 /* if reset did not complete, just return an error */ 2155 /* if reset did not complete, just return an error */
2285 if (!ticks) { 2156 if (!ticks) {
2286 printk(KERN_ERR "IBM MCA SCSI: reset did not complete within %d seconds.\n", IM_RESET_DELAY); 2157 printk(KERN_ERR "IBM MCA SCSI: reset did not complete within %d seconds.\n", IM_RESET_DELAY);
2287 reset_status(host_index) = IM_RESET_FINISHED_FAIL; 2158 reset_status(shpnt) = IM_RESET_FINISHED_FAIL;
2288 return FAILED; 2159 return FAILED;
2289 } 2160 }
2290 2161
2291 if ((inb(IM_INTR_REG(host_index)) & 0x8f) == 0x8f) { 2162 if ((inb(IM_INTR_REG(shpnt)) & 0x8f) == 0x8f) {
2292 /* analysis done by this routine and not by the intr-routine */ 2163 /* analysis done by this routine and not by the intr-routine */
2293 if (inb(IM_INTR_REG(host_index)) == 0xaf) 2164 if (inb(IM_INTR_REG(shpnt)) == 0xaf)
2294 reset_status(host_index) = IM_RESET_FINISHED_OK_NO_INT; 2165 reset_status(shpnt) = IM_RESET_FINISHED_OK_NO_INT;
2295 else if (inb(IM_INTR_REG(host_index)) == 0xcf) 2166 else if (inb(IM_INTR_REG(shpnt)) == 0xcf)
2296 reset_status(host_index) = IM_RESET_FINISHED_FAIL; 2167 reset_status(shpnt) = IM_RESET_FINISHED_FAIL;
2297 else /* failed, 4get it */ 2168 else /* failed, 4get it */
2298 reset_status(host_index) = IM_RESET_NOT_IN_PROGRESS_NO_INT; 2169 reset_status(shpnt) = IM_RESET_NOT_IN_PROGRESS_NO_INT;
2299 outb(IM_EOI | 0xf, IM_ATTN_REG(host_index)); 2170 outb(IM_EOI | 0xf, IM_ATTN_REG(shpnt));
2300 } 2171 }
2301 2172
2302 /* if reset failed, just return an error */ 2173 /* if reset failed, just return an error */
2303 if (reset_status(host_index) == IM_RESET_FINISHED_FAIL) { 2174 if (reset_status(shpnt) == IM_RESET_FINISHED_FAIL) {
2304 printk(KERN_ERR "IBM MCA SCSI: reset failed.\n"); 2175 printk(KERN_ERR "IBM MCA SCSI: reset failed.\n");
2305 return FAILED; 2176 return FAILED;
2306 } 2177 }
@@ -2308,9 +2179,9 @@ static int __ibmmca_host_reset(Scsi_Cmnd * cmd)
2308 /* so reset finished ok - call outstanding done's, and return success */ 2179 /* so reset finished ok - call outstanding done's, and return success */
2309 printk(KERN_INFO "IBM MCA SCSI: Reset successfully completed.\n"); 2180 printk(KERN_INFO "IBM MCA SCSI: Reset successfully completed.\n");
2310 for (i = 0; i < MAX_LOG_DEV; i++) { 2181 for (i = 0; i < MAX_LOG_DEV; i++) {
2311 cmd_aid = ld(host_index)[i].cmd; 2182 cmd_aid = ld(shpnt)[i].cmd;
2312 if (cmd_aid && cmd_aid->scsi_done) { 2183 if (cmd_aid && cmd_aid->scsi_done) {
2313 ld(host_index)[i].cmd = NULL; 2184 ld(shpnt)[i].cmd = NULL;
2314 cmd_aid->result = DID_RESET << 16; 2185 cmd_aid->result = DID_RESET << 16;
2315 } 2186 }
2316 } 2187 }
@@ -2351,46 +2222,46 @@ static int ibmmca_biosparam(struct scsi_device *sdev, struct block_device *bdev,
2351} 2222}
2352 2223
2353/* calculate percentage of total accesses on a ldn */ 2224/* calculate percentage of total accesses on a ldn */
2354static int ldn_access_load(int host_index, int ldn) 2225static int ldn_access_load(struct Scsi_Host *shpnt, int ldn)
2355{ 2226{
2356 if (IBM_DS(host_index).total_accesses == 0) 2227 if (IBM_DS(shpnt).total_accesses == 0)
2357 return (0); 2228 return (0);
2358 if (IBM_DS(host_index).ldn_access[ldn] == 0) 2229 if (IBM_DS(shpnt).ldn_access[ldn] == 0)
2359 return (0); 2230 return (0);
2360 return (IBM_DS(host_index).ldn_access[ldn] * 100) / IBM_DS(host_index).total_accesses; 2231 return (IBM_DS(shpnt).ldn_access[ldn] * 100) / IBM_DS(shpnt).total_accesses;
2361} 2232}
2362 2233
2363/* calculate total amount of r/w-accesses */ 2234/* calculate total amount of r/w-accesses */
2364static int ldn_access_total_read_write(int host_index) 2235static int ldn_access_total_read_write(struct Scsi_Host *shpnt)
2365{ 2236{
2366 int a; 2237 int a;
2367 int i; 2238 int i;
2368 2239
2369 a = 0; 2240 a = 0;
2370 for (i = 0; i <= MAX_LOG_DEV; i++) 2241 for (i = 0; i <= MAX_LOG_DEV; i++)
2371 a += IBM_DS(host_index).ldn_read_access[i] + IBM_DS(host_index).ldn_write_access[i]; 2242 a += IBM_DS(shpnt).ldn_read_access[i] + IBM_DS(shpnt).ldn_write_access[i];
2372 return (a); 2243 return (a);
2373} 2244}
2374 2245
2375static int ldn_access_total_inquiry(int host_index) 2246static int ldn_access_total_inquiry(struct Scsi_Host *shpnt)
2376{ 2247{
2377 int a; 2248 int a;
2378 int i; 2249 int i;
2379 2250
2380 a = 0; 2251 a = 0;
2381 for (i = 0; i <= MAX_LOG_DEV; i++) 2252 for (i = 0; i <= MAX_LOG_DEV; i++)
2382 a += IBM_DS(host_index).ldn_inquiry_access[i]; 2253 a += IBM_DS(shpnt).ldn_inquiry_access[i];
2383 return (a); 2254 return (a);
2384} 2255}
2385 2256
2386static int ldn_access_total_modeselect(int host_index) 2257static int ldn_access_total_modeselect(struct Scsi_Host *shpnt)
2387{ 2258{
2388 int a; 2259 int a;
2389 int i; 2260 int i;
2390 2261
2391 a = 0; 2262 a = 0;
2392 for (i = 0; i <= MAX_LOG_DEV; i++) 2263 for (i = 0; i <= MAX_LOG_DEV; i++)
2393 a += IBM_DS(host_index).ldn_modeselect_access[i]; 2264 a += IBM_DS(shpnt).ldn_modeselect_access[i];
2394 return (a); 2265 return (a);
2395} 2266}
2396 2267
@@ -2398,19 +2269,14 @@ static int ldn_access_total_modeselect(int host_index)
2398static int ibmmca_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start, off_t offset, int length, int inout) 2269static int ibmmca_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start, off_t offset, int length, int inout)
2399{ 2270{
2400 int len = 0; 2271 int len = 0;
2401 int i, id, lun, host_index; 2272 int i, id, lun;
2402 unsigned long flags; 2273 unsigned long flags;
2403 int max_pun; 2274 int max_pun;
2404 2275
2405 for (i = 0; hosts[i] && hosts[i] != shpnt; i++);
2406 2276
2407 spin_lock_irqsave(hosts[i]->host_lock, flags); /* Check it */ 2277 spin_lock_irqsave(shpnt->host_lock, flags); /* Check it */
2408 host_index = i; 2278
2409 if (!shpnt) { 2279 max_pun = subsystem_maxid(shpnt);
2410 len += sprintf(buffer + len, "\nIBM MCA SCSI: Can't find adapter");
2411 return len;
2412 }
2413 max_pun = subsystem_maxid(host_index);
2414 2280
2415 len += sprintf(buffer + len, "\n IBM-SCSI-Subsystem-Linux-Driver, Version %s\n\n\n", IBMMCA_SCSI_DRIVER_VERSION); 2281 len += sprintf(buffer + len, "\n IBM-SCSI-Subsystem-Linux-Driver, Version %s\n\n\n", IBMMCA_SCSI_DRIVER_VERSION);
2416 len += sprintf(buffer + len, " SCSI Access-Statistics:\n"); 2282 len += sprintf(buffer + len, " SCSI Access-Statistics:\n");
@@ -2421,40 +2287,40 @@ static int ibmmca_proc_info(struct Scsi_Host *shpnt, char *buffer, char **start,
2421 len += sprintf(buffer + len, " Multiple LUN probing.....: No\n"); 2287 len += sprintf(buffer + len, " Multiple LUN probing.....: No\n");
2422#endif 2288#endif
2423 len += sprintf(buffer + len, " This Hostnumber..........: %d\n", shpnt->host_no); 2289 len += sprintf(buffer + len, " This Hostnumber..........: %d\n", shpnt->host_no);
2424 len += sprintf(buffer + len, " Base I/O-Port............: 0x%x\n", (unsigned int) (IM_CMD_REG(host_index))); 2290 len += sprintf(buffer + len, " Base I/O-Port............: 0x%x\n", (unsigned int) (IM_CMD_REG(shpnt)));
2425 len += sprintf(buffer + len, " (Shared) IRQ.............: %d\n", IM_IRQ); 2291 len += sprintf(buffer + len, " (Shared) IRQ.............: %d\n", IM_IRQ);
2426 len += sprintf(buffer + len, " Total Interrupts.........: %d\n", IBM_DS(host_index).total_interrupts); 2292 len += sprintf(buffer + len, " Total Interrupts.........: %d\n", IBM_DS(shpnt).total_interrupts);
2427 len += sprintf(buffer + len, " Total SCSI Accesses......: %d\n", IBM_DS(host_index).total_accesses); 2293 len += sprintf(buffer + len, " Total SCSI Accesses......: %d\n", IBM_DS(shpnt).total_accesses);
2428 len += sprintf(buffer + len, " Total short SCBs.........: %d\n", IBM_DS(host_index).scbs); 2294 len += sprintf(buffer + len, " Total short SCBs.........: %d\n", IBM_DS(shpnt).scbs);
2429 len += sprintf(buffer + len, " Total long SCBs..........: %d\n", IBM_DS(host_index).long_scbs); 2295 len += sprintf(buffer + len, " Total long SCBs..........: %d\n", IBM_DS(shpnt).long_scbs);
2430 len += sprintf(buffer + len, " Total SCSI READ/WRITE..: %d\n", ldn_access_total_read_write(host_index)); 2296 len += sprintf(buffer + len, " Total SCSI READ/WRITE..: %d\n", ldn_access_total_read_write(shpnt));
2431 len += sprintf(buffer + len, " Total SCSI Inquiries...: %d\n", ldn_access_total_inquiry(host_index)); 2297 len += sprintf(buffer + len, " Total SCSI Inquiries...: %d\n", ldn_access_total_inquiry(shpnt));
2432 len += sprintf(buffer + len, " Total SCSI Modeselects.: %d\n", ldn_access_total_modeselect(host_index)); 2298 len += sprintf(buffer + len, " Total SCSI Modeselects.: %d\n", ldn_access_total_modeselect(shpnt));
2433 len += sprintf(buffer + len, " Total SCSI other cmds..: %d\n", IBM_DS(host_index).total_accesses - ldn_access_total_read_write(host_index) 2299 len += sprintf(buffer + len, " Total SCSI other cmds..: %d\n", IBM_DS(shpnt).total_accesses - ldn_access_total_read_write(shpnt)
2434 - ldn_access_total_modeselect(host_index) 2300 - ldn_access_total_modeselect(shpnt)
2435 - ldn_access_total_inquiry(host_index)); 2301 - ldn_access_total_inquiry(shpnt));
2436 len += sprintf(buffer + len, " Total SCSI command fails.: %d\n\n", IBM_DS(host_index).total_errors); 2302 len += sprintf(buffer + len, " Total SCSI command fails.: %d\n\n", IBM_DS(shpnt).total_errors);
2437 len += sprintf(buffer + len, " Logical-Device-Number (LDN) Access-Statistics:\n"); 2303 len += sprintf(buffer + len, " Logical-Device-Number (LDN) Access-Statistics:\n");
2438 len += sprintf(buffer + len, " LDN | Accesses [%%] | READ | WRITE | ASSIGNMENTS\n"); 2304 len += sprintf(buffer + len, " LDN | Accesses [%%] | READ | WRITE | ASSIGNMENTS\n");
2439 len += sprintf(buffer + len, " -----|--------------|-----------|-----------|--------------\n"); 2305 len += sprintf(buffer + len, " -----|--------------|-----------|-----------|--------------\n");
2440 for (i = 0; i <= MAX_LOG_DEV; i++) 2306 for (i = 0; i <= MAX_LOG_DEV; i++)
2441 len += sprintf(buffer + len, " %2X | %3d | %8d | %8d | %8d\n", i, ldn_access_load(host_index, i), IBM_DS(host_index).ldn_read_access[i], IBM_DS(host_index).ldn_write_access[i], IBM_DS(host_index).ldn_assignments[i]); 2307 len += sprintf(buffer + len, " %2X | %3d | %8d | %8d | %8d\n", i, ldn_access_load(shpnt, i), IBM_DS(shpnt).ldn_read_access[i], IBM_DS(shpnt).ldn_write_access[i], IBM_DS(shpnt).ldn_assignments[i]);
2442 len += sprintf(buffer + len, " -----------------------------------------------------------\n\n"); 2308 len += sprintf(buffer + len, " -----------------------------------------------------------\n\n");
2443 len += sprintf(buffer + len, " Dynamical-LDN-Assignment-Statistics:\n"); 2309 len += sprintf(buffer + len, " Dynamical-LDN-Assignment-Statistics:\n");
2444 len += sprintf(buffer + len, " Number of physical SCSI-devices..: %d (+ Adapter)\n", IBM_DS(host_index).total_scsi_devices); 2310 len += sprintf(buffer + len, " Number of physical SCSI-devices..: %d (+ Adapter)\n", IBM_DS(shpnt).total_scsi_devices);
2445 len += sprintf(buffer + len, " Dynamical Assignment necessary...: %s\n", IBM_DS(host_index).dyn_flag ? "Yes" : "No "); 2311 len += sprintf(buffer + len, " Dynamical Assignment necessary...: %s\n", IBM_DS(shpnt).dyn_flag ? "Yes" : "No ");
2446 len += sprintf(buffer + len, " Next LDN to be assigned..........: 0x%x\n", next_ldn(host_index)); 2312 len += sprintf(buffer + len, " Next LDN to be assigned..........: 0x%x\n", next_ldn(shpnt));
2447 len += sprintf(buffer + len, " Dynamical assignments done yet...: %d\n", IBM_DS(host_index).dynamical_assignments); 2313 len += sprintf(buffer + len, " Dynamical assignments done yet...: %d\n", IBM_DS(shpnt).dynamical_assignments);
2448 len += sprintf(buffer + len, "\n Current SCSI-Device-Mapping:\n"); 2314 len += sprintf(buffer + len, "\n Current SCSI-Device-Mapping:\n");
2449 len += sprintf(buffer + len, " Physical SCSI-Device Map Logical SCSI-Device Map\n"); 2315 len += sprintf(buffer + len, " Physical SCSI-Device Map Logical SCSI-Device Map\n");
2450 len += sprintf(buffer + len, " ID\\LUN 0 1 2 3 4 5 6 7 ID\\LUN 0 1 2 3 4 5 6 7\n"); 2316 len += sprintf(buffer + len, " ID\\LUN 0 1 2 3 4 5 6 7 ID\\LUN 0 1 2 3 4 5 6 7\n");
2451 for (id = 0; id < max_pun; id++) { 2317 for (id = 0; id < max_pun; id++) {
2452 len += sprintf(buffer + len, " %2d ", id); 2318 len += sprintf(buffer + len, " %2d ", id);
2453 for (lun = 0; lun < 8; lun++) 2319 for (lun = 0; lun < 8; lun++)
2454 len += sprintf(buffer + len, "%2s ", ti_p(get_scsi(host_index)[id][lun])); 2320 len += sprintf(buffer + len, "%2s ", ti_p(get_scsi(shpnt)[id][lun]));
2455 len += sprintf(buffer + len, " %2d ", id); 2321 len += sprintf(buffer + len, " %2d ", id);
2456 for (lun = 0; lun < 8; lun++) 2322 for (lun = 0; lun < 8; lun++)
2457 len += sprintf(buffer + len, "%2s ", ti_l(get_ldn(host_index)[id][lun])); 2323 len += sprintf(buffer + len, "%2s ", ti_l(get_ldn(shpnt)[id][lun]));
2458 len += sprintf(buffer + len, "\n"); 2324 len += sprintf(buffer + len, "\n");
2459 } 2325 }
2460 2326
@@ -2488,20 +2354,31 @@ static int option_setup(char *str)
2488 2354
2489__setup("ibmmcascsi=", option_setup); 2355__setup("ibmmcascsi=", option_setup);
2490 2356
2491static struct scsi_host_template driver_template = { 2357static struct mca_driver ibmmca_driver = {
2492 .proc_name = "ibmmca", 2358 .id_table = ibmmca_id_table,
2493 .proc_info = ibmmca_proc_info, 2359 .driver = {
2494 .name = "IBM SCSI-Subsystem", 2360 .name = "ibmmca",
2495 .detect = ibmmca_detect, 2361 .bus = &mca_bus_type,
2496 .release = ibmmca_release, 2362 .probe = ibmmca_probe,
2497 .queuecommand = ibmmca_queuecommand, 2363 .remove = __devexit_p(ibmmca_remove),
2498 .eh_abort_handler = ibmmca_abort, 2364 },
2499 .eh_host_reset_handler = ibmmca_host_reset,
2500 .bios_param = ibmmca_biosparam,
2501 .can_queue = 16,
2502 .this_id = 7,
2503 .sg_tablesize = 16,
2504 .cmd_per_lun = 1,
2505 .use_clustering = ENABLE_CLUSTERING,
2506}; 2365};
2507#include "scsi_module.c" 2366
2367static int __init ibmmca_init(void)
2368{
2369#ifdef MODULE
2370 /* If the driver is run as module, read from conf.modules or cmd-line */
2371 if (boot_options)
2372 option_setup(boot_options);
2373#endif
2374
2375 return mca_register_driver_integrated(&ibmmca_driver, MCA_INTEGSCSI);
2376}
2377
2378static void __exit ibmmca_exit(void)
2379{
2380 mca_unregister_driver(&ibmmca_driver);
2381}
2382
2383module_init(ibmmca_init);
2384module_exit(ibmmca_exit);
diff --git a/drivers/scsi/ibmmca.h b/drivers/scsi/ibmmca.h
deleted file mode 100644
index 017ee2fa6d63..000000000000
--- a/drivers/scsi/ibmmca.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * Low Level Driver for the IBM Microchannel SCSI Subsystem
3 * (Headerfile, see Documentation/scsi/ibmmca.txt for description of the
4 * IBM MCA SCSI-driver.
5 * For use under the GNU General Public License within the Linux-kernel project.
6 * This include file works only correctly with kernel 2.4.0 or higher!!! */
7
8#ifndef _IBMMCA_H
9#define _IBMMCA_H
10
11/* Common forward declarations for all Linux-versions: */
12
13/* Interfaces to the midlevel Linux SCSI driver */
14static int ibmmca_detect (struct scsi_host_template *);
15static int ibmmca_release (struct Scsi_Host *);
16static int ibmmca_queuecommand (Scsi_Cmnd *, void (*done) (Scsi_Cmnd *));
17static int ibmmca_abort (Scsi_Cmnd *);
18static int ibmmca_host_reset (Scsi_Cmnd *);
19static int ibmmca_biosparam (struct scsi_device *, struct block_device *, sector_t, int *);
20
21#endif /* _IBMMCA_H */
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index b10eefe735c5..b580af95956b 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -1375,6 +1375,23 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
1375 return 0; 1375 return 0;
1376} 1376}
1377 1377
1378/**
1379 * ibmvscsi_change_queue_depth - Change the device's queue depth
1380 * @sdev: scsi device struct
1381 * @qdepth: depth to set
1382 *
1383 * Return value:
1384 * actual depth set
1385 **/
1386static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
1387{
1388 if (qdepth > IBMVSCSI_MAX_CMDS_PER_LUN)
1389 qdepth = IBMVSCSI_MAX_CMDS_PER_LUN;
1390
1391 scsi_adjust_queue_depth(sdev, 0, qdepth);
1392 return sdev->queue_depth;
1393}
1394
1378/* ------------------------------------------------------------ 1395/* ------------------------------------------------------------
1379 * sysfs attributes 1396 * sysfs attributes
1380 */ 1397 */
@@ -1521,6 +1538,7 @@ static struct scsi_host_template driver_template = {
1521 .eh_abort_handler = ibmvscsi_eh_abort_handler, 1538 .eh_abort_handler = ibmvscsi_eh_abort_handler,
1522 .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler, 1539 .eh_device_reset_handler = ibmvscsi_eh_device_reset_handler,
1523 .slave_configure = ibmvscsi_slave_configure, 1540 .slave_configure = ibmvscsi_slave_configure,
1541 .change_queue_depth = ibmvscsi_change_queue_depth,
1524 .cmd_per_lun = 16, 1542 .cmd_per_lun = 16,
1525 .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT, 1543 .can_queue = IBMVSCSI_MAX_REQUESTS_DEFAULT,
1526 .this_id = -1, 1544 .this_id = -1,
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 77cc1d40f5bb..727ca7c95926 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -45,6 +45,7 @@ struct Scsi_Host;
45#define MAX_INDIRECT_BUFS 10 45#define MAX_INDIRECT_BUFS 10
46 46
47#define IBMVSCSI_MAX_REQUESTS_DEFAULT 100 47#define IBMVSCSI_MAX_REQUESTS_DEFAULT 100
48#define IBMVSCSI_MAX_CMDS_PER_LUN 64
48 49
49/* ------------------------------------------------------------ 50/* ------------------------------------------------------------
50 * Data Structures 51 * Data Structures
diff --git a/drivers/scsi/initio.c b/drivers/scsi/initio.c
index 7e7635ca78f1..d9dfb69ae031 100644
--- a/drivers/scsi/initio.c
+++ b/drivers/scsi/initio.c
@@ -3,7 +3,8 @@
3 * 3 *
4 * Copyright (c) 1994-1998 Initio Corporation 4 * Copyright (c) 1994-1998 Initio Corporation
5 * Copyright (c) 1998 Bas Vermeulen <bvermeul@blackstar.xs4all.nl> 5 * Copyright (c) 1998 Bas Vermeulen <bvermeul@blackstar.xs4all.nl>
6 * All rights reserved. 6 * Copyright (c) 2004 Christoph Hellwig <hch@lst.de>
7 * Copyright (c) 2007 Red Hat <alan@redhat.com>
7 * 8 *
8 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -19,38 +20,6 @@
19 * along with this program; see the file COPYING. If not, write to 20 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 21 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
21 * 22 *
22 * --------------------------------------------------------------------------
23 *
24 * Redistribution and use in source and binary forms, with or without
25 * modification, are permitted provided that the following conditions
26 * are met:
27 * 1. Redistributions of source code must retain the above copyright
28 * notice, this list of conditions, and the following disclaimer,
29 * without modification, immediately at the beginning of the file.
30 * 2. Redistributions in binary form must reproduce the above copyright
31 * notice, this list of conditions and the following disclaimer in the
32 * documentation and/or other materials provided with the distribution.
33 * 3. The name of the author may not be used to endorse or promote products
34 * derived from this software without specific prior written permission.
35 *
36 * Where this Software is combined with software released under the terms of
37 * the GNU General Public License ("GPL") and the terms of the GPL would require the
38 * combined work to also be released under the terms of the GPL, the terms
39 * and conditions of this License will apply in addition to those of the
40 * GPL with the exception of any terms or conditions of this License that
41 * conflict with, or are expressly prohibited by, the GPL.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
44 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
45 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
46 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
47 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
48 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
49 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
50 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
51 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
52 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
53 * SUCH DAMAGE.
54 * 23 *
55 ************************************************************************* 24 *************************************************************************
56 * 25 *
@@ -70,14 +39,14 @@
70 * - Fix memory allocation problem 39 * - Fix memory allocation problem
71 * 03/04/98 hc - v1.01l 40 * 03/04/98 hc - v1.01l
72 * - Fix tape rewind which will hang the system problem 41 * - Fix tape rewind which will hang the system problem
73 * - Set can_queue to tul_num_scb 42 * - Set can_queue to initio_num_scb
74 * 06/25/98 hc - v1.01m 43 * 06/25/98 hc - v1.01m
75 * - Get it work for kernel version >= 2.1.75 44 * - Get it work for kernel version >= 2.1.75
76 * - Dynamic assign SCSI bus reset holding time in init_tulip() 45 * - Dynamic assign SCSI bus reset holding time in initio_init()
77 * 07/02/98 hc - v1.01n 46 * 07/02/98 hc - v1.01n
78 * - Support 0002134A 47 * - Support 0002134A
79 * 08/07/98 hc - v1.01o 48 * 08/07/98 hc - v1.01o
80 * - Change the tul_abort_srb routine to use scsi_done. <01> 49 * - Change the initio_abort_srb routine to use scsi_done. <01>
81 * 09/07/98 hl - v1.02 50 * 09/07/98 hl - v1.02
82 * - Change the INI9100U define and proc_dir_entry to 51 * - Change the INI9100U define and proc_dir_entry to
83 * reflect the newer Kernel 2.1.118, but the v1.o1o 52 * reflect the newer Kernel 2.1.118, but the v1.o1o
@@ -150,23 +119,13 @@
150static unsigned int i91u_debug = DEBUG_DEFAULT; 119static unsigned int i91u_debug = DEBUG_DEFAULT;
151#endif 120#endif
152 121
153#define TUL_RDWORD(x,y) (short)(inl((int)((ULONG)((ULONG)x+(UCHAR)y)) )) 122static int initio_tag_enable = 1;
154
155typedef struct PCI_ID_Struc {
156 unsigned short vendor_id;
157 unsigned short device_id;
158} PCI_ID;
159
160static int tul_num_ch = 4; /* Maximum 4 adapters */
161static int tul_num_scb;
162static int tul_tag_enable = 1;
163static SCB *tul_scb;
164 123
165#ifdef DEBUG_i91u 124#ifdef DEBUG_i91u
166static int setup_debug = 0; 125static int setup_debug = 0;
167#endif 126#endif
168 127
169static void i91uSCBPost(BYTE * pHcb, BYTE * pScb); 128static void i91uSCBPost(u8 * pHcb, u8 * pScb);
170 129
171/* PCI Devices supported by this driver */ 130/* PCI Devices supported by this driver */
172static struct pci_device_id i91u_pci_devices[] = { 131static struct pci_device_id i91u_pci_devices[] = {
@@ -184,74 +143,66 @@ MODULE_DEVICE_TABLE(pci, i91u_pci_devices);
184#define DEBUG_STATE 0 143#define DEBUG_STATE 0
185#define INT_DISC 0 144#define INT_DISC 0
186 145
187/*--- external functions --*/ 146/*--- forward references ---*/
188static void tul_se2_wait(void); 147static struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun);
189 148static struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host);
190/*--- forward refrence ---*/ 149
191static SCB *tul_find_busy_scb(HCS * pCurHcb, WORD tarlun); 150static int tulip_main(struct initio_host * host);
192static SCB *tul_find_done_scb(HCS * pCurHcb); 151
193 152static int initio_next_state(struct initio_host * host);
194static int tulip_main(HCS * pCurHcb); 153static int initio_state_1(struct initio_host * host);
195 154static int initio_state_2(struct initio_host * host);
196static int tul_next_state(HCS * pCurHcb); 155static int initio_state_3(struct initio_host * host);
197static int tul_state_1(HCS * pCurHcb); 156static int initio_state_4(struct initio_host * host);
198static int tul_state_2(HCS * pCurHcb); 157static int initio_state_5(struct initio_host * host);
199static int tul_state_3(HCS * pCurHcb); 158static int initio_state_6(struct initio_host * host);
200static int tul_state_4(HCS * pCurHcb); 159static int initio_state_7(struct initio_host * host);
201static int tul_state_5(HCS * pCurHcb); 160static int initio_xfer_data_in(struct initio_host * host);
202static int tul_state_6(HCS * pCurHcb); 161static int initio_xfer_data_out(struct initio_host * host);
203static int tul_state_7(HCS * pCurHcb); 162static int initio_xpad_in(struct initio_host * host);
204static int tul_xfer_data_in(HCS * pCurHcb); 163static int initio_xpad_out(struct initio_host * host);
205static int tul_xfer_data_out(HCS * pCurHcb); 164static int initio_status_msg(struct initio_host * host);
206static int tul_xpad_in(HCS * pCurHcb); 165
207static int tul_xpad_out(HCS * pCurHcb); 166static int initio_msgin(struct initio_host * host);
208static int tul_status_msg(HCS * pCurHcb); 167static int initio_msgin_sync(struct initio_host * host);
209 168static int initio_msgin_accept(struct initio_host * host);
210static int tul_msgin(HCS * pCurHcb); 169static int initio_msgout_reject(struct initio_host * host);
211static int tul_msgin_sync(HCS * pCurHcb); 170static int initio_msgin_extend(struct initio_host * host);
212static int tul_msgin_accept(HCS * pCurHcb); 171
213static int tul_msgout_reject(HCS * pCurHcb); 172static int initio_msgout_ide(struct initio_host * host);
214static int tul_msgin_extend(HCS * pCurHcb); 173static int initio_msgout_abort_targ(struct initio_host * host);
215 174static int initio_msgout_abort_tag(struct initio_host * host);
216static int tul_msgout_ide(HCS * pCurHcb); 175
217static int tul_msgout_abort_targ(HCS * pCurHcb); 176static int initio_bus_device_reset(struct initio_host * host);
218static int tul_msgout_abort_tag(HCS * pCurHcb); 177static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb);
219 178static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb);
220static int tul_bus_device_reset(HCS * pCurHcb); 179static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb);
221static void tul_select_atn(HCS * pCurHcb, SCB * pCurScb); 180static int int_initio_busfree(struct initio_host * host);
222static void tul_select_atn3(HCS * pCurHcb, SCB * pCurScb); 181static int int_initio_scsi_rst(struct initio_host * host);
223static void tul_select_atn_stop(HCS * pCurHcb, SCB * pCurScb); 182static int int_initio_bad_seq(struct initio_host * host);
224static int int_tul_busfree(HCS * pCurHcb); 183static int int_initio_resel(struct initio_host * host);
225static int int_tul_scsi_rst(HCS * pCurHcb); 184static int initio_sync_done(struct initio_host * host);
226static int int_tul_bad_seq(HCS * pCurHcb); 185static int wdtr_done(struct initio_host * host);
227static int int_tul_resel(HCS * pCurHcb); 186static int wait_tulip(struct initio_host * host);
228static int tul_sync_done(HCS * pCurHcb); 187static int initio_wait_done_disc(struct initio_host * host);
229static int wdtr_done(HCS * pCurHcb); 188static int initio_wait_disc(struct initio_host * host);
230static int wait_tulip(HCS * pCurHcb); 189static void tulip_scsi(struct initio_host * host);
231static int tul_wait_done_disc(HCS * pCurHcb); 190static int initio_post_scsi_rst(struct initio_host * host);
232static int tul_wait_disc(HCS * pCurHcb); 191
233static void tulip_scsi(HCS * pCurHcb); 192static void initio_se2_ew_en(unsigned long base);
234static int tul_post_scsi_rst(HCS * pCurHcb); 193static void initio_se2_ew_ds(unsigned long base);
235 194static int initio_se2_rd_all(unsigned long base);
236static void tul_se2_ew_en(WORD CurBase); 195static void initio_se2_update_all(unsigned long base); /* setup default pattern */
237static void tul_se2_ew_ds(WORD CurBase); 196static void initio_read_eeprom(unsigned long base);
238static int tul_se2_rd_all(WORD CurBase); 197
239static void tul_se2_update_all(WORD CurBase); /* setup default pattern */ 198/* ---- INTERNAL VARIABLES ---- */
240static void tul_read_eeprom(WORD CurBase); 199
241
242 /* ---- INTERNAL VARIABLES ---- */
243static HCS tul_hcs[MAX_SUPPORTED_ADAPTERS];
244static INI_ADPT_STRUCT i91u_adpt[MAX_SUPPORTED_ADAPTERS];
245
246/*NVRAM nvram, *nvramp = &nvram; */
247static NVRAM i91unvram; 200static NVRAM i91unvram;
248static NVRAM *i91unvramp; 201static NVRAM *i91unvramp;
249 202
250 203static u8 i91udftNvRam[64] =
251
252static UCHAR i91udftNvRam[64] =
253{ 204{
254/*----------- header -----------*/ 205 /*----------- header -----------*/
255 0x25, 0xc9, /* Signature */ 206 0x25, 0xc9, /* Signature */
256 0x40, /* Size */ 207 0x40, /* Size */
257 0x01, /* Revision */ 208 0x01, /* Revision */
@@ -289,7 +240,7 @@ static UCHAR i91udftNvRam[64] =
289 0, 0}; /* - CheckSum - */ 240 0, 0}; /* - CheckSum - */
290 241
291 242
292static UCHAR tul_rate_tbl[8] = /* fast 20 */ 243static u8 initio_rate_tbl[8] = /* fast 20 */
293{ 244{
294 /* nanosecond devide by 4 */ 245 /* nanosecond devide by 4 */
295 12, /* 50ns, 20M */ 246 12, /* 50ns, 20M */
@@ -302,53 +253,17 @@ static UCHAR tul_rate_tbl[8] = /* fast 20 */
302 62 /* 250ns, 4M */ 253 62 /* 250ns, 4M */
303}; 254};
304 255
305static void tul_do_pause(unsigned amount) 256static void initio_do_pause(unsigned amount)
306{ /* Pause for amount jiffies */ 257{
258 /* Pause for amount jiffies */
307 unsigned long the_time = jiffies + amount; 259 unsigned long the_time = jiffies + amount;
308 260
309 while (time_before_eq(jiffies, the_time)); 261 while (time_before_eq(jiffies, the_time))
262 cpu_relax();
310} 263}
311 264
312/*-- forward reference --*/ 265/*-- forward reference --*/
313 266
314/*******************************************************************
315 Use memeory refresh time ~ 15us * 2
316********************************************************************/
317void tul_se2_wait(void)
318{
319#if 1
320 udelay(30);
321#else
322 UCHAR readByte;
323
324 readByte = TUL_RD(0, 0x61);
325 if ((readByte & 0x10) == 0x10) {
326 for (;;) {
327 readByte = TUL_RD(0, 0x61);
328 if ((readByte & 0x10) == 0x10)
329 break;
330 }
331 for (;;) {
332 readByte = TUL_RD(0, 0x61);
333 if ((readByte & 0x10) != 0x10)
334 break;
335 }
336 } else {
337 for (;;) {
338 readByte = TUL_RD(0, 0x61);
339 if ((readByte & 0x10) == 0x10)
340 break;
341 }
342 for (;;) {
343 readByte = TUL_RD(0, 0x61);
344 if ((readByte & 0x10) != 0x10)
345 break;
346 }
347 }
348#endif
349}
350
351
352/****************************************************************** 267/******************************************************************
353 Input: instruction for Serial E2PROM 268 Input: instruction for Serial E2PROM
354 269
@@ -379,1174 +294,1019 @@ void tul_se2_wait(void)
379 294
380 295
381******************************************************************/ 296******************************************************************/
382static void tul_se2_instr(WORD CurBase, UCHAR instr) 297
298/**
299 * initio_se2_instr - bitbang an instruction
300 * @base: Base of InitIO controller
301 * @instr: Instruction for serial E2PROM
302 *
303 * Bitbang an instruction out to the serial E2Prom
304 */
305
306static void initio_se2_instr(unsigned long base, u8 instr)
383{ 307{
384 int i; 308 int i;
385 UCHAR b; 309 u8 b;
386 310
387 TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2DO); /* cs+start bit */ 311 outb(SE2CS | SE2DO, base + TUL_NVRAM); /* cs+start bit */
388 tul_se2_wait(); 312 udelay(30);
389 TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2CLK | SE2DO); /* +CLK */ 313 outb(SE2CS | SE2CLK | SE2DO, base + TUL_NVRAM); /* +CLK */
390 tul_se2_wait(); 314 udelay(30);
391 315
392 for (i = 0; i < 8; i++) { 316 for (i = 0; i < 8; i++) {
393 if (instr & 0x80) 317 if (instr & 0x80)
394 b = SE2CS | SE2DO; /* -CLK+dataBit */ 318 b = SE2CS | SE2DO; /* -CLK+dataBit */
395 else 319 else
396 b = SE2CS; /* -CLK */ 320 b = SE2CS; /* -CLK */
397 TUL_WR(CurBase + TUL_NVRAM, b); 321 outb(b, base + TUL_NVRAM);
398 tul_se2_wait(); 322 udelay(30);
399 TUL_WR(CurBase + TUL_NVRAM, b | SE2CLK); /* +CLK */ 323 outb(b | SE2CLK, base + TUL_NVRAM); /* +CLK */
400 tul_se2_wait(); 324 udelay(30);
401 instr <<= 1; 325 instr <<= 1;
402 } 326 }
403 TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* -CLK */ 327 outb(SE2CS, base + TUL_NVRAM); /* -CLK */
404 tul_se2_wait(); 328 udelay(30);
405 return;
406} 329}
407 330
408 331
409/****************************************************************** 332/**
410 Function name : tul_se2_ew_en 333 * initio_se2_ew_en - Enable erase/write
411 Description : Enable erase/write state of serial EEPROM 334 * @base: Base address of InitIO controller
412******************************************************************/ 335 *
413void tul_se2_ew_en(WORD CurBase) 336 * Enable erase/write state of serial EEPROM
337 */
338void initio_se2_ew_en(unsigned long base)
414{ 339{
415 tul_se2_instr(CurBase, 0x30); /* EWEN */ 340 initio_se2_instr(base, 0x30); /* EWEN */
416 TUL_WR(CurBase + TUL_NVRAM, 0); /* -CS */ 341 outb(0, base + TUL_NVRAM); /* -CS */
417 tul_se2_wait(); 342 udelay(30);
418 return;
419} 343}
420 344
421 345
422/************************************************************************ 346/**
423 Disable erase/write state of serial EEPROM 347 * initio_se2_ew_ds - Disable erase/write
424*************************************************************************/ 348 * @base: Base address of InitIO controller
425void tul_se2_ew_ds(WORD CurBase) 349 *
350 * Disable erase/write state of serial EEPROM
351 */
352void initio_se2_ew_ds(unsigned long base)
426{ 353{
427 tul_se2_instr(CurBase, 0); /* EWDS */ 354 initio_se2_instr(base, 0); /* EWDS */
428 TUL_WR(CurBase + TUL_NVRAM, 0); /* -CS */ 355 outb(0, base + TUL_NVRAM); /* -CS */
429 tul_se2_wait(); 356 udelay(30);
430 return;
431} 357}
432 358
433 359
434/****************************************************************** 360/**
435 Input :address of Serial E2PROM 361 * initio_se2_rd - read E2PROM word
436 Output :value stored in Serial E2PROM 362 * @base: Base of InitIO controller
437*******************************************************************/ 363 * @addr: Address of word in E2PROM
438static USHORT tul_se2_rd(WORD CurBase, ULONG adr) 364 *
365 * Read a word from the NV E2PROM device
366 */
367static u16 initio_se2_rd(unsigned long base, u8 addr)
439{ 368{
440 UCHAR instr, readByte; 369 u8 instr, rb;
441 USHORT readWord; 370 u16 val = 0;
442 int i; 371 int i;
443 372
444 instr = (UCHAR) (adr | 0x80); 373 instr = (u8) (addr | 0x80);
445 tul_se2_instr(CurBase, instr); /* READ INSTR */ 374 initio_se2_instr(base, instr); /* READ INSTR */
446 readWord = 0;
447 375
448 for (i = 15; i >= 0; i--) { 376 for (i = 15; i >= 0; i--) {
449 TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2CLK); /* +CLK */ 377 outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
450 tul_se2_wait(); 378 udelay(30);
451 TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* -CLK */ 379 outb(SE2CS, base + TUL_NVRAM); /* -CLK */
452 380
453 /* sample data after the following edge of clock */ 381 /* sample data after the following edge of clock */
454 readByte = TUL_RD(CurBase, TUL_NVRAM); 382 rb = inb(base + TUL_NVRAM);
455 readByte &= SE2DI; 383 rb &= SE2DI;
456 readWord += (readByte << i); 384 val += (rb << i);
457 tul_se2_wait(); /* 6/20/95 */ 385 udelay(30); /* 6/20/95 */
458 } 386 }
459 387
460 TUL_WR(CurBase + TUL_NVRAM, 0); /* no chip select */ 388 outb(0, base + TUL_NVRAM); /* no chip select */
461 tul_se2_wait(); 389 udelay(30);
462 return readWord; 390 return val;
463} 391}
464 392
465 393/**
466/****************************************************************** 394 * initio_se2_wr - read E2PROM word
467 Input: new value in Serial E2PROM, address of Serial E2PROM 395 * @base: Base of InitIO controller
468*******************************************************************/ 396 * @addr: Address of word in E2PROM
469static void tul_se2_wr(WORD CurBase, UCHAR adr, USHORT writeWord) 397 * @val: Value to write
398 *
399 * Write a word to the NV E2PROM device. Used when recovering from
400 * a problem with the NV.
401 */
402static void initio_se2_wr(unsigned long base, u8 addr, u16 val)
470{ 403{
471 UCHAR readByte; 404 u8 rb;
472 UCHAR instr; 405 u8 instr;
473 int i; 406 int i;
474 407
475 instr = (UCHAR) (adr | 0x40); 408 instr = (u8) (addr | 0x40);
476 tul_se2_instr(CurBase, instr); /* WRITE INSTR */ 409 initio_se2_instr(base, instr); /* WRITE INSTR */
477 for (i = 15; i >= 0; i--) { 410 for (i = 15; i >= 0; i--) {
478 if (writeWord & 0x8000) 411 if (val & 0x8000)
479 TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2DO); /* -CLK+dataBit 1 */ 412 outb(SE2CS | SE2DO, base + TUL_NVRAM); /* -CLK+dataBit 1 */
480 else 413 else
481 TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* -CLK+dataBit 0 */ 414 outb(SE2CS, base + TUL_NVRAM); /* -CLK+dataBit 0 */
482 tul_se2_wait(); 415 udelay(30);
483 TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2CLK); /* +CLK */ 416 outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
484 tul_se2_wait(); 417 udelay(30);
485 writeWord <<= 1; 418 val <<= 1;
486 } 419 }
487 TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* -CLK */ 420 outb(SE2CS, base + TUL_NVRAM); /* -CLK */
488 tul_se2_wait(); 421 udelay(30);
489 TUL_WR(CurBase + TUL_NVRAM, 0); /* -CS */ 422 outb(0, base + TUL_NVRAM); /* -CS */
490 tul_se2_wait(); 423 udelay(30);
491 424
492 TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* +CS */ 425 outb(SE2CS, base + TUL_NVRAM); /* +CS */
493 tul_se2_wait(); 426 udelay(30);
494 427
495 for (;;) { 428 for (;;) {
496 TUL_WR(CurBase + TUL_NVRAM, SE2CS | SE2CLK); /* +CLK */ 429 outb(SE2CS | SE2CLK, base + TUL_NVRAM); /* +CLK */
497 tul_se2_wait(); 430 udelay(30);
498 TUL_WR(CurBase + TUL_NVRAM, SE2CS); /* -CLK */ 431 outb(SE2CS, base + TUL_NVRAM); /* -CLK */
499 tul_se2_wait(); 432 udelay(30);
500 if ((readByte = TUL_RD(CurBase, TUL_NVRAM)) & SE2DI) 433 if ((rb = inb(base + TUL_NVRAM)) & SE2DI)
501 break; /* write complete */ 434 break; /* write complete */
502 } 435 }
503 TUL_WR(CurBase + TUL_NVRAM, 0); /* -CS */ 436 outb(0, base + TUL_NVRAM); /* -CS */
504 return;
505} 437}
506 438
439/**
440 * initio_se2_rd_all - read hostadapter NV configuration
441 * @base: Base address of InitIO controller
442 *
443 * Reads the E2PROM data into main memory. Ensures that the checksum
444 * and header marker are valid. Returns 1 on success -1 on error.
445 */
507 446
508/*********************************************************************** 447static int initio_se2_rd_all(unsigned long base)
509 Read SCSI H/A configuration parameters from serial EEPROM
510************************************************************************/
511int tul_se2_rd_all(WORD CurBase)
512{ 448{
513 int i; 449 int i;
514 ULONG chksum = 0; 450 u16 chksum = 0;
515 USHORT *np; 451 u16 *np;
516 452
517 i91unvramp = &i91unvram; 453 i91unvramp = &i91unvram;
518 np = (USHORT *) i91unvramp; 454 np = (u16 *) i91unvramp;
519 for (i = 0; i < 32; i++) { 455 for (i = 0; i < 32; i++)
520 *np++ = tul_se2_rd(CurBase, i); 456 *np++ = initio_se2_rd(base, i);
521 }
522 457
523/*--------------------Is signature "ini" ok ? ----------------*/ 458 /* Is signature "ini" ok ? */
524 if (i91unvramp->NVM_Signature != INI_SIGNATURE) 459 if (i91unvramp->NVM_Signature != INI_SIGNATURE)
525 return -1; 460 return -1;
526/*---------------------- Is ckecksum ok ? ----------------------*/ 461 /* Is ckecksum ok ? */
527 np = (USHORT *) i91unvramp; 462 np = (u16 *) i91unvramp;
528 for (i = 0; i < 31; i++) 463 for (i = 0; i < 31; i++)
529 chksum += *np++; 464 chksum += *np++;
530 if (i91unvramp->NVM_CheckSum != (USHORT) chksum) 465 if (i91unvramp->NVM_CheckSum != chksum)
531 return -1; 466 return -1;
532 return 1; 467 return 1;
533} 468}
534 469
535 470/**
536/*********************************************************************** 471 * initio_se2_update_all - Update E2PROM
537 Update SCSI H/A configuration parameters from serial EEPROM 472 * @base: Base of InitIO controller
538************************************************************************/ 473 *
539void tul_se2_update_all(WORD CurBase) 474 * Update the E2PROM by wrting any changes into the E2PROM
475 * chip, rewriting the checksum.
476 */
477static void initio_se2_update_all(unsigned long base)
540{ /* setup default pattern */ 478{ /* setup default pattern */
541 int i; 479 int i;
542 ULONG chksum = 0; 480 u16 chksum = 0;
543 USHORT *np, *np1; 481 u16 *np, *np1;
544 482
545 i91unvramp = &i91unvram; 483 i91unvramp = &i91unvram;
546 /* Calculate checksum first */ 484 /* Calculate checksum first */
547 np = (USHORT *) i91udftNvRam; 485 np = (u16 *) i91udftNvRam;
548 for (i = 0; i < 31; i++) 486 for (i = 0; i < 31; i++)
549 chksum += *np++; 487 chksum += *np++;
550 *np = (USHORT) chksum; 488 *np = chksum;
551 tul_se2_ew_en(CurBase); /* Enable write */ 489 initio_se2_ew_en(base); /* Enable write */
552 490
553 np = (USHORT *) i91udftNvRam; 491 np = (u16 *) i91udftNvRam;
554 np1 = (USHORT *) i91unvramp; 492 np1 = (u16 *) i91unvramp;
555 for (i = 0; i < 32; i++, np++, np1++) { 493 for (i = 0; i < 32; i++, np++, np1++) {
556 if (*np != *np1) { 494 if (*np != *np1)
557 tul_se2_wr(CurBase, i, *np); 495 initio_se2_wr(base, i, *np);
558 }
559 } 496 }
560 497 initio_se2_ew_ds(base); /* Disable write */
561 tul_se2_ew_ds(CurBase); /* Disable write */
562 return;
563} 498}
564 499
565/************************************************************************* 500/**
566 Function name : read_eeprom 501 * initio_read_eeprom - Retrieve configuration
567**************************************************************************/ 502 * @base: Base of InitIO Host Adapter
568void tul_read_eeprom(WORD CurBase) 503 *
569{ 504 * Retrieve the host adapter configuration data from E2Prom. If the
570 UCHAR gctrl; 505 * data is invalid then the defaults are used and are also restored
571 506 * into the E2PROM. This forms the access point for the SCSI driver
572 i91unvramp = &i91unvram; 507 * into the E2PROM layer, the other functions for the E2PROM are all
573/*------Enable EEProm programming ---*/ 508 * internal use.
574 gctrl = TUL_RD(CurBase, TUL_GCTRL); 509 *
575 TUL_WR(CurBase + TUL_GCTRL, gctrl | TUL_GCTRL_EEPROM_BIT); 510 * Must be called single threaded, uses a shared global area.
576 if (tul_se2_rd_all(CurBase) != 1) { 511 */
577 tul_se2_update_all(CurBase); /* setup default pattern */
578 tul_se2_rd_all(CurBase); /* load again */
579 }
580/*------ Disable EEProm programming ---*/
581 gctrl = TUL_RD(CurBase, TUL_GCTRL);
582 TUL_WR(CurBase + TUL_GCTRL, gctrl & ~TUL_GCTRL_EEPROM_BIT);
583} /* read_eeprom */
584 512
585static int Addi91u_into_Adapter_table(WORD wBIOS, WORD wBASE, BYTE bInterrupt, 513static void initio_read_eeprom(unsigned long base)
586 BYTE bBus, BYTE bDevice)
587{ 514{
588 int i, j; 515 u8 gctrl;
589 516
590 for (i = 0; i < MAX_SUPPORTED_ADAPTERS; i++) { 517 i91unvramp = &i91unvram;
591 if (i91u_adpt[i].ADPT_BIOS < wBIOS) 518 /* Enable EEProm programming */
592 continue; 519 gctrl = inb(base + TUL_GCTRL);
593 if (i91u_adpt[i].ADPT_BIOS == wBIOS) { 520 outb(gctrl | TUL_GCTRL_EEPROM_BIT, base + TUL_GCTRL);
594 if (i91u_adpt[i].ADPT_BASE == wBASE) { 521 if (initio_se2_rd_all(base) != 1) {
595 if (i91u_adpt[i].ADPT_Bus != 0xFF) 522 initio_se2_update_all(base); /* setup default pattern */
596 return 1; 523 initio_se2_rd_all(base); /* load again */
597 } else if (i91u_adpt[i].ADPT_BASE < wBASE)
598 continue;
599 }
600 for (j = MAX_SUPPORTED_ADAPTERS - 1; j > i; j--) {
601 i91u_adpt[j].ADPT_BASE = i91u_adpt[j - 1].ADPT_BASE;
602 i91u_adpt[j].ADPT_INTR = i91u_adpt[j - 1].ADPT_INTR;
603 i91u_adpt[j].ADPT_BIOS = i91u_adpt[j - 1].ADPT_BIOS;
604 i91u_adpt[j].ADPT_Bus = i91u_adpt[j - 1].ADPT_Bus;
605 i91u_adpt[j].ADPT_Device = i91u_adpt[j - 1].ADPT_Device;
606 }
607 i91u_adpt[i].ADPT_BASE = wBASE;
608 i91u_adpt[i].ADPT_INTR = bInterrupt;
609 i91u_adpt[i].ADPT_BIOS = wBIOS;
610 i91u_adpt[i].ADPT_Bus = bBus;
611 i91u_adpt[i].ADPT_Device = bDevice;
612 return 0;
613 } 524 }
614 return 1; 525 /* Disable EEProm programming */
526 gctrl = inb(base + TUL_GCTRL);
527 outb(gctrl & ~TUL_GCTRL_EEPROM_BIT, base + TUL_GCTRL);
615} 528}
616 529
617static void init_i91uAdapter_table(void) 530/**
618{ 531 * initio_stop_bm - stop bus master
619 int i; 532 * @host: InitIO we are stopping
620 533 *
621 for (i = 0; i < MAX_SUPPORTED_ADAPTERS; i++) { /* Initialize adapter structure */ 534 * Stop any pending DMA operation, aborting the DMA if neccessary
622 i91u_adpt[i].ADPT_BIOS = 0xffff; 535 */
623 i91u_adpt[i].ADPT_BASE = 0xffff;
624 i91u_adpt[i].ADPT_INTR = 0xff;
625 i91u_adpt[i].ADPT_Bus = 0xff;
626 i91u_adpt[i].ADPT_Device = 0xff;
627 }
628 return;
629}
630 536
631static void tul_stop_bm(HCS * pCurHcb) 537static void initio_stop_bm(struct initio_host * host)
632{ 538{
633 539
634 if (TUL_RD(pCurHcb->HCS_Base, TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */ 540 if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */
635 TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_X_ABT | TAX_X_CLR_FIFO); 541 outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd);
636 /* wait Abort DMA xfer done */ 542 /* wait Abort DMA xfer done */
637 while ((TUL_RD(pCurHcb->HCS_Base, TUL_Int) & XABT) == 0); 543 while ((inb(host->addr + TUL_Int) & XABT) == 0)
544 cpu_relax();
638 } 545 }
639 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 546 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
640} 547}
641 548
642/***************************************************************************/ 549/**
643static void get_tulipPCIConfig(HCS * pCurHcb, int ch_idx) 550 * initio_reset_scsi - Reset SCSI host controller
644{ 551 * @host: InitIO host to reset
645 pCurHcb->HCS_Base = i91u_adpt[ch_idx].ADPT_BASE; /* Supply base address */ 552 * @seconds: Recovery time
646 pCurHcb->HCS_BIOS = i91u_adpt[ch_idx].ADPT_BIOS; /* Supply BIOS address */ 553 *
647 pCurHcb->HCS_Intr = i91u_adpt[ch_idx].ADPT_INTR; /* Supply interrupt line */ 554 * Perform a full reset of the SCSI subsystem.
648 return; 555 */
649}
650 556
651/***************************************************************************/ 557static int initio_reset_scsi(struct initio_host * host, int seconds)
652static int tul_reset_scsi(HCS * pCurHcb, int seconds)
653{ 558{
654 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_RST_BUS); 559 outb(TSC_RST_BUS, host->addr + TUL_SCtrl0);
655 560
656 while (!((pCurHcb->HCS_JSInt = TUL_RD(pCurHcb->HCS_Base, TUL_SInt)) & TSS_SCSIRST_INT)); 561 while (!((host->jsint = inb(host->addr + TUL_SInt)) & TSS_SCSIRST_INT))
657 /* reset tulip chip */ 562 cpu_relax();
658 563
659 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, 0); 564 /* reset tulip chip */
565 outb(0, host->addr + TUL_SSignal);
660 566
661 /* Stall for a while, wait for target's firmware ready,make it 2 sec ! */ 567 /* Stall for a while, wait for target's firmware ready,make it 2 sec ! */
662 /* SONY 5200 tape drive won't work if only stall for 1 sec */ 568 /* SONY 5200 tape drive won't work if only stall for 1 sec */
663 tul_do_pause(seconds * HZ); 569 /* FIXME: this is a very long busy wait right now */
664 570 initio_do_pause(seconds * HZ);
665 TUL_RD(pCurHcb->HCS_Base, TUL_SInt);
666 571
667 return (SCSI_RESET_SUCCESS); 572 inb(host->addr + TUL_SInt);
573 return SCSI_RESET_SUCCESS;
668} 574}
669 575
670/***************************************************************************/ 576/**
671static int init_tulip(HCS * pCurHcb, SCB * scbp, int tul_num_scb, 577 * initio_init - set up an InitIO host adapter
672 BYTE * pbBiosAdr, int seconds) 578 * @host: InitIO host adapter
579 * @num_scbs: Number of SCBS
580 * @bios_addr: BIOS address
581 *
582 * Set up the host adapter and devices according to the configuration
583 * retrieved from the E2PROM.
584 *
585 * Locking: Calls E2PROM layer code which is not re-enterable so must
586 * run single threaded for now.
587 */
588
589static void initio_init(struct initio_host * host, u8 *bios_addr)
673{ 590{
674 int i; 591 int i;
675 BYTE *pwFlags; 592 u8 *flags;
676 BYTE *pbHeads; 593 u8 *heads;
677 SCB *pTmpScb, *pPrevScb = NULL; 594
678 595 /* Get E2Prom configuration */
679 pCurHcb->HCS_NumScbs = tul_num_scb; 596 initio_read_eeprom(host->addr);
680 pCurHcb->HCS_Semaph = 1;
681 spin_lock_init(&pCurHcb->HCS_SemaphLock);
682 pCurHcb->HCS_JSStatus0 = 0;
683 pCurHcb->HCS_Scb = scbp;
684 pCurHcb->HCS_NxtPend = scbp;
685 pCurHcb->HCS_NxtAvail = scbp;
686 for (i = 0, pTmpScb = scbp; i < tul_num_scb; i++, pTmpScb++) {
687 pTmpScb->SCB_TagId = i;
688 if (i != 0)
689 pPrevScb->SCB_NxtScb = pTmpScb;
690 pPrevScb = pTmpScb;
691 }
692 pPrevScb->SCB_NxtScb = NULL;
693 pCurHcb->HCS_ScbEnd = pTmpScb;
694 pCurHcb->HCS_FirstAvail = scbp;
695 pCurHcb->HCS_LastAvail = pPrevScb;
696 spin_lock_init(&pCurHcb->HCS_AvailLock);
697 pCurHcb->HCS_FirstPend = NULL;
698 pCurHcb->HCS_LastPend = NULL;
699 pCurHcb->HCS_FirstBusy = NULL;
700 pCurHcb->HCS_LastBusy = NULL;
701 pCurHcb->HCS_FirstDone = NULL;
702 pCurHcb->HCS_LastDone = NULL;
703 pCurHcb->HCS_ActScb = NULL;
704 pCurHcb->HCS_ActTcs = NULL;
705
706 tul_read_eeprom(pCurHcb->HCS_Base);
707/*---------- get H/A configuration -------------*/
708 if (i91unvramp->NVM_SCSIInfo[0].NVM_NumOfTarg == 8) 597 if (i91unvramp->NVM_SCSIInfo[0].NVM_NumOfTarg == 8)
709 pCurHcb->HCS_MaxTar = 8; 598 host->max_tar = 8;
710 else 599 else
711 pCurHcb->HCS_MaxTar = 16; 600 host->max_tar = 16;
712 601
713 pCurHcb->HCS_Config = i91unvramp->NVM_SCSIInfo[0].NVM_ChConfig1; 602 host->config = i91unvramp->NVM_SCSIInfo[0].NVM_ChConfig1;
714 603
715 pCurHcb->HCS_SCSI_ID = i91unvramp->NVM_SCSIInfo[0].NVM_ChSCSIID; 604 host->scsi_id = i91unvramp->NVM_SCSIInfo[0].NVM_ChSCSIID;
716 pCurHcb->HCS_IdMask = ~(1 << pCurHcb->HCS_SCSI_ID); 605 host->idmask = ~(1 << host->scsi_id);
717 606
718#ifdef CHK_PARITY 607#ifdef CHK_PARITY
719 /* Enable parity error response */ 608 /* Enable parity error response */
720 TUL_WR(pCurHcb->HCS_Base + TUL_PCMD, TUL_RD(pCurHcb->HCS_Base, TUL_PCMD) | 0x40); 609 outb(inb(host->addr + TUL_PCMD) | 0x40, host->addr + TUL_PCMD);
721#endif 610#endif
722 611
723 /* Mask all the interrupt */ 612 /* Mask all the interrupt */
724 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F); 613 outb(0x1F, host->addr + TUL_Mask);
725 614
726 tul_stop_bm(pCurHcb); 615 initio_stop_bm(host);
727 /* --- Initialize the tulip --- */ 616 /* --- Initialize the tulip --- */
728 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_RST_CHIP); 617 outb(TSC_RST_CHIP, host->addr + TUL_SCtrl0);
729 618
730 /* program HBA's SCSI ID */ 619 /* program HBA's SCSI ID */
731 TUL_WR(pCurHcb->HCS_Base + TUL_SScsiId, pCurHcb->HCS_SCSI_ID << 4); 620 outb(host->scsi_id << 4, host->addr + TUL_SScsiId);
732 621
733 /* Enable Initiator Mode ,phase latch,alternate sync period mode, 622 /* Enable Initiator Mode ,phase latch,alternate sync period mode,
734 disable SCSI reset */ 623 disable SCSI reset */
735 if (pCurHcb->HCS_Config & HCC_EN_PAR) 624 if (host->config & HCC_EN_PAR)
736 pCurHcb->HCS_SConf1 = (TSC_INITDEFAULT | TSC_EN_SCSI_PAR); 625 host->sconf1 = (TSC_INITDEFAULT | TSC_EN_SCSI_PAR);
737 else 626 else
738 pCurHcb->HCS_SConf1 = (TSC_INITDEFAULT); 627 host->sconf1 = (TSC_INITDEFAULT);
739 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurHcb->HCS_SConf1); 628 outb(host->sconf1, host->addr + TUL_SConfig);
740 629
741 /* Enable HW reselect */ 630 /* Enable HW reselect */
742 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); 631 outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
743 632
744 TUL_WR(pCurHcb->HCS_Base + TUL_SPeriod, 0); 633 outb(0, host->addr + TUL_SPeriod);
745 634
746 /* selection time out = 250 ms */ 635 /* selection time out = 250 ms */
747 TUL_WR(pCurHcb->HCS_Base + TUL_STimeOut, 153); 636 outb(153, host->addr + TUL_STimeOut);
748 637
749/*--------- Enable SCSI terminator -----*/ 638 /* Enable SCSI terminator */
750 TUL_WR(pCurHcb->HCS_Base + TUL_XCtrl, (pCurHcb->HCS_Config & (HCC_ACT_TERM1 | HCC_ACT_TERM2))); 639 outb((host->config & (HCC_ACT_TERM1 | HCC_ACT_TERM2)),
751 TUL_WR(pCurHcb->HCS_Base + TUL_GCTRL1, 640 host->addr + TUL_XCtrl);
752 ((pCurHcb->HCS_Config & HCC_AUTO_TERM) >> 4) | (TUL_RD(pCurHcb->HCS_Base, TUL_GCTRL1) & 0xFE)); 641 outb(((host->config & HCC_AUTO_TERM) >> 4) |
642 (inb(host->addr + TUL_GCTRL1) & 0xFE),
643 host->addr + TUL_GCTRL1);
753 644
754 for (i = 0, 645 for (i = 0,
755 pwFlags = & (i91unvramp->NVM_SCSIInfo[0].NVM_Targ0Config), 646 flags = & (i91unvramp->NVM_SCSIInfo[0].NVM_Targ0Config),
756 pbHeads = pbBiosAdr + 0x180; 647 heads = bios_addr + 0x180;
757 i < pCurHcb->HCS_MaxTar; 648 i < host->max_tar;
758 i++, pwFlags++) { 649 i++, flags++) {
759 pCurHcb->HCS_Tcs[i].TCS_Flags = *pwFlags & ~(TCF_SYNC_DONE | TCF_WDTR_DONE); 650 host->targets[i].flags = *flags & ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
760 if (pCurHcb->HCS_Tcs[i].TCS_Flags & TCF_EN_255) 651 if (host->targets[i].flags & TCF_EN_255)
761 pCurHcb->HCS_Tcs[i].TCS_DrvFlags = TCF_DRV_255_63; 652 host->targets[i].drv_flags = TCF_DRV_255_63;
762 else 653 else
763 pCurHcb->HCS_Tcs[i].TCS_DrvFlags = 0; 654 host->targets[i].drv_flags = 0;
764 pCurHcb->HCS_Tcs[i].TCS_JS_Period = 0; 655 host->targets[i].js_period = 0;
765 pCurHcb->HCS_Tcs[i].TCS_SConfig0 = pCurHcb->HCS_SConf1; 656 host->targets[i].sconfig0 = host->sconf1;
766 pCurHcb->HCS_Tcs[i].TCS_DrvHead = *pbHeads++; 657 host->targets[i].heads = *heads++;
767 if (pCurHcb->HCS_Tcs[i].TCS_DrvHead == 255) 658 if (host->targets[i].heads == 255)
768 pCurHcb->HCS_Tcs[i].TCS_DrvFlags = TCF_DRV_255_63; 659 host->targets[i].drv_flags = TCF_DRV_255_63;
769 else 660 else
770 pCurHcb->HCS_Tcs[i].TCS_DrvFlags = 0; 661 host->targets[i].drv_flags = 0;
771 pCurHcb->HCS_Tcs[i].TCS_DrvSector = *pbHeads++; 662 host->targets[i].sectors = *heads++;
772 pCurHcb->HCS_Tcs[i].TCS_Flags &= ~TCF_BUSY; 663 host->targets[i].flags &= ~TCF_BUSY;
773 pCurHcb->HCS_ActTags[i] = 0; 664 host->act_tags[i] = 0;
774 pCurHcb->HCS_MaxTags[i] = 0xFF; 665 host->max_tags[i] = 0xFF;
775 } /* for */ 666 } /* for */
776 printk("i91u: PCI Base=0x%04X, IRQ=%d, BIOS=0x%04X0, SCSI ID=%d\n", 667 printk("i91u: PCI Base=0x%04X, IRQ=%d, BIOS=0x%04X0, SCSI ID=%d\n",
777 pCurHcb->HCS_Base, pCurHcb->HCS_Intr, 668 host->addr, host->irq,
778 pCurHcb->HCS_BIOS, pCurHcb->HCS_SCSI_ID); 669 host->bios_addr, host->scsi_id);
779/*------------------- reset SCSI Bus ---------------------------*/ 670 /* Reset SCSI Bus */
780 if (pCurHcb->HCS_Config & HCC_SCSI_RESET) { 671 if (host->config & HCC_SCSI_RESET) {
781 printk("i91u: Reset SCSI Bus ... \n"); 672 printk(KERN_INFO "i91u: Reset SCSI Bus ... \n");
782 tul_reset_scsi(pCurHcb, seconds); 673 initio_reset_scsi(host, 10);
783 } 674 }
784 TUL_WR(pCurHcb->HCS_Base + TUL_SCFG1, 0x17); 675 outb(0x17, host->addr + TUL_SCFG1);
785 TUL_WR(pCurHcb->HCS_Base + TUL_SIntEnable, 0xE9); 676 outb(0xE9, host->addr + TUL_SIntEnable);
786 return (0);
787} 677}
788 678
789/***************************************************************************/ 679/**
790static SCB *tul_alloc_scb(HCS * hcsp) 680 * initio_alloc_scb - Allocate an SCB
681 * @host: InitIO host we are allocating for
682 *
683 * Walk the SCB list for the controller and allocate a free SCB if
684 * one exists.
685 */
686static struct scsi_ctrl_blk *initio_alloc_scb(struct initio_host *host)
791{ 687{
792 SCB *pTmpScb; 688 struct scsi_ctrl_blk *scb;
793 ULONG flags; 689 unsigned long flags;
794 spin_lock_irqsave(&(hcsp->HCS_AvailLock), flags); 690
795 if ((pTmpScb = hcsp->HCS_FirstAvail) != NULL) { 691 spin_lock_irqsave(&host->avail_lock, flags);
692 if ((scb = host->first_avail) != NULL) {
796#if DEBUG_QUEUE 693#if DEBUG_QUEUE
797 printk("find scb at %08lx\n", (ULONG) pTmpScb); 694 printk("find scb at %p\n", scb);
798#endif 695#endif
799 if ((hcsp->HCS_FirstAvail = pTmpScb->SCB_NxtScb) == NULL) 696 if ((host->first_avail = scb->next) == NULL)
800 hcsp->HCS_LastAvail = NULL; 697 host->last_avail = NULL;
801 pTmpScb->SCB_NxtScb = NULL; 698 scb->next = NULL;
802 pTmpScb->SCB_Status = SCB_RENT; 699 scb->status = SCB_RENT;
803 } 700 }
804 spin_unlock_irqrestore(&(hcsp->HCS_AvailLock), flags); 701 spin_unlock_irqrestore(&host->avail_lock, flags);
805 return (pTmpScb); 702 return scb;
806} 703}
807 704
808/***************************************************************************/ 705/**
809static void tul_release_scb(HCS * hcsp, SCB * scbp) 706 * initio_release_scb - Release an SCB
707 * @host: InitIO host that owns the SCB
708 * @cmnd: SCB command block being returned
709 *
710 * Return an allocated SCB to the host free list
711 */
712
713static void initio_release_scb(struct initio_host * host, struct scsi_ctrl_blk * cmnd)
810{ 714{
811 ULONG flags; 715 unsigned long flags;
812 716
813#if DEBUG_QUEUE 717#if DEBUG_QUEUE
814 printk("Release SCB %lx; ", (ULONG) scbp); 718 printk("Release SCB %p; ", cmnd);
815#endif 719#endif
816 spin_lock_irqsave(&(hcsp->HCS_AvailLock), flags); 720 spin_lock_irqsave(&(host->avail_lock), flags);
817 scbp->SCB_Srb = NULL; 721 cmnd->srb = NULL;
818 scbp->SCB_Status = 0; 722 cmnd->status = 0;
819 scbp->SCB_NxtScb = NULL; 723 cmnd->next = NULL;
820 if (hcsp->HCS_LastAvail != NULL) { 724 if (host->last_avail != NULL) {
821 hcsp->HCS_LastAvail->SCB_NxtScb = scbp; 725 host->last_avail->next = cmnd;
822 hcsp->HCS_LastAvail = scbp; 726 host->last_avail = cmnd;
823 } else { 727 } else {
824 hcsp->HCS_FirstAvail = scbp; 728 host->first_avail = cmnd;
825 hcsp->HCS_LastAvail = scbp; 729 host->last_avail = cmnd;
826 } 730 }
827 spin_unlock_irqrestore(&(hcsp->HCS_AvailLock), flags); 731 spin_unlock_irqrestore(&(host->avail_lock), flags);
828} 732}
829 733
830/***************************************************************************/ 734/***************************************************************************/
831static void tul_append_pend_scb(HCS * pCurHcb, SCB * scbp) 735static void initio_append_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
832{ 736{
833 737
834#if DEBUG_QUEUE 738#if DEBUG_QUEUE
835 printk("Append pend SCB %lx; ", (ULONG) scbp); 739 printk("Append pend SCB %p; ", scbp);
836#endif 740#endif
837 scbp->SCB_Status = SCB_PEND; 741 scbp->status = SCB_PEND;
838 scbp->SCB_NxtScb = NULL; 742 scbp->next = NULL;
839 if (pCurHcb->HCS_LastPend != NULL) { 743 if (host->last_pending != NULL) {
840 pCurHcb->HCS_LastPend->SCB_NxtScb = scbp; 744 host->last_pending->next = scbp;
841 pCurHcb->HCS_LastPend = scbp; 745 host->last_pending = scbp;
842 } else { 746 } else {
843 pCurHcb->HCS_FirstPend = scbp; 747 host->first_pending = scbp;
844 pCurHcb->HCS_LastPend = scbp; 748 host->last_pending = scbp;
845 } 749 }
846} 750}
847 751
848/***************************************************************************/ 752/***************************************************************************/
849static void tul_push_pend_scb(HCS * pCurHcb, SCB * scbp) 753static void initio_push_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
850{ 754{
851 755
852#if DEBUG_QUEUE 756#if DEBUG_QUEUE
853 printk("Push pend SCB %lx; ", (ULONG) scbp); 757 printk("Push pend SCB %p; ", scbp);
854#endif 758#endif
855 scbp->SCB_Status = SCB_PEND; 759 scbp->status = SCB_PEND;
856 if ((scbp->SCB_NxtScb = pCurHcb->HCS_FirstPend) != NULL) { 760 if ((scbp->next = host->first_pending) != NULL) {
857 pCurHcb->HCS_FirstPend = scbp; 761 host->first_pending = scbp;
858 } else { 762 } else {
859 pCurHcb->HCS_FirstPend = scbp; 763 host->first_pending = scbp;
860 pCurHcb->HCS_LastPend = scbp; 764 host->last_pending = scbp;
861 } 765 }
862} 766}
863 767
864/***************************************************************************/ 768static struct scsi_ctrl_blk *initio_find_first_pend_scb(struct initio_host * host)
865static SCB *tul_find_first_pend_scb(HCS * pCurHcb)
866{ 769{
867 SCB *pFirstPend; 770 struct scsi_ctrl_blk *first;
868 771
869 772
870 pFirstPend = pCurHcb->HCS_FirstPend; 773 first = host->first_pending;
871 while (pFirstPend != NULL) { 774 while (first != NULL) {
872 if (pFirstPend->SCB_Opcode != ExecSCSI) { 775 if (first->opcode != ExecSCSI)
873 return (pFirstPend); 776 return first;
874 } 777 if (first->tagmsg == 0) {
875 if (pFirstPend->SCB_TagMsg == 0) { 778 if ((host->act_tags[first->target] == 0) &&
876 if ((pCurHcb->HCS_ActTags[pFirstPend->SCB_Target] == 0) && 779 !(host->targets[first->target].flags & TCF_BUSY))
877 !(pCurHcb->HCS_Tcs[pFirstPend->SCB_Target].TCS_Flags & TCF_BUSY)) { 780 return first;
878 return (pFirstPend);
879 }
880 } else { 781 } else {
881 if ((pCurHcb->HCS_ActTags[pFirstPend->SCB_Target] >= 782 if ((host->act_tags[first->target] >=
882 pCurHcb->HCS_MaxTags[pFirstPend->SCB_Target]) | 783 host->max_tags[first->target]) |
883 (pCurHcb->HCS_Tcs[pFirstPend->SCB_Target].TCS_Flags & TCF_BUSY)) { 784 (host->targets[first->target].flags & TCF_BUSY)) {
884 pFirstPend = pFirstPend->SCB_NxtScb; 785 first = first->next;
885 continue; 786 continue;
886 } 787 }
887 return (pFirstPend); 788 return first;
888 } 789 }
889 pFirstPend = pFirstPend->SCB_NxtScb; 790 first = first->next;
890 } 791 }
891 792 return first;
892
893 return (pFirstPend);
894} 793}
895/***************************************************************************/ 794
896static void tul_unlink_pend_scb(HCS * pCurHcb, SCB * pCurScb) 795static void initio_unlink_pend_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
897{ 796{
898 SCB *pTmpScb, *pPrevScb; 797 struct scsi_ctrl_blk *tmp, *prev;
899 798
900#if DEBUG_QUEUE 799#if DEBUG_QUEUE
901 printk("unlink pend SCB %lx; ", (ULONG) pCurScb); 800 printk("unlink pend SCB %p; ", scb);
902#endif 801#endif
903 802
904 pPrevScb = pTmpScb = pCurHcb->HCS_FirstPend; 803 prev = tmp = host->first_pending;
905 while (pTmpScb != NULL) { 804 while (tmp != NULL) {
906 if (pCurScb == pTmpScb) { /* Unlink this SCB */ 805 if (scb == tmp) { /* Unlink this SCB */
907 if (pTmpScb == pCurHcb->HCS_FirstPend) { 806 if (tmp == host->first_pending) {
908 if ((pCurHcb->HCS_FirstPend = pTmpScb->SCB_NxtScb) == NULL) 807 if ((host->first_pending = tmp->next) == NULL)
909 pCurHcb->HCS_LastPend = NULL; 808 host->last_pending = NULL;
910 } else { 809 } else {
911 pPrevScb->SCB_NxtScb = pTmpScb->SCB_NxtScb; 810 prev->next = tmp->next;
912 if (pTmpScb == pCurHcb->HCS_LastPend) 811 if (tmp == host->last_pending)
913 pCurHcb->HCS_LastPend = pPrevScb; 812 host->last_pending = prev;
914 } 813 }
915 pTmpScb->SCB_NxtScb = NULL; 814 tmp->next = NULL;
916 break; 815 break;
917 } 816 }
918 pPrevScb = pTmpScb; 817 prev = tmp;
919 pTmpScb = pTmpScb->SCB_NxtScb; 818 tmp = tmp->next;
920 } 819 }
921 return;
922} 820}
923/***************************************************************************/ 821
924static void tul_append_busy_scb(HCS * pCurHcb, SCB * scbp) 822static void initio_append_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
925{ 823{
926 824
927#if DEBUG_QUEUE 825#if DEBUG_QUEUE
928 printk("append busy SCB %lx; ", (ULONG) scbp); 826 printk("append busy SCB %o; ", scbp);
929#endif 827#endif
930 if (scbp->SCB_TagMsg) 828 if (scbp->tagmsg)
931 pCurHcb->HCS_ActTags[scbp->SCB_Target]++; 829 host->act_tags[scbp->target]++;
932 else 830 else
933 pCurHcb->HCS_Tcs[scbp->SCB_Target].TCS_Flags |= TCF_BUSY; 831 host->targets[scbp->target].flags |= TCF_BUSY;
934 scbp->SCB_Status = SCB_BUSY; 832 scbp->status = SCB_BUSY;
935 scbp->SCB_NxtScb = NULL; 833 scbp->next = NULL;
936 if (pCurHcb->HCS_LastBusy != NULL) { 834 if (host->last_busy != NULL) {
937 pCurHcb->HCS_LastBusy->SCB_NxtScb = scbp; 835 host->last_busy->next = scbp;
938 pCurHcb->HCS_LastBusy = scbp; 836 host->last_busy = scbp;
939 } else { 837 } else {
940 pCurHcb->HCS_FirstBusy = scbp; 838 host->first_busy = scbp;
941 pCurHcb->HCS_LastBusy = scbp; 839 host->last_busy = scbp;
942 } 840 }
943} 841}
944 842
945/***************************************************************************/ 843/***************************************************************************/
946static SCB *tul_pop_busy_scb(HCS * pCurHcb) 844static struct scsi_ctrl_blk *initio_pop_busy_scb(struct initio_host * host)
947{ 845{
948 SCB *pTmpScb; 846 struct scsi_ctrl_blk *tmp;
949 847
950 848
951 if ((pTmpScb = pCurHcb->HCS_FirstBusy) != NULL) { 849 if ((tmp = host->first_busy) != NULL) {
952 if ((pCurHcb->HCS_FirstBusy = pTmpScb->SCB_NxtScb) == NULL) 850 if ((host->first_busy = tmp->next) == NULL)
953 pCurHcb->HCS_LastBusy = NULL; 851 host->last_busy = NULL;
954 pTmpScb->SCB_NxtScb = NULL; 852 tmp->next = NULL;
955 if (pTmpScb->SCB_TagMsg) 853 if (tmp->tagmsg)
956 pCurHcb->HCS_ActTags[pTmpScb->SCB_Target]--; 854 host->act_tags[tmp->target]--;
957 else 855 else
958 pCurHcb->HCS_Tcs[pTmpScb->SCB_Target].TCS_Flags &= ~TCF_BUSY; 856 host->targets[tmp->target].flags &= ~TCF_BUSY;
959 } 857 }
960#if DEBUG_QUEUE 858#if DEBUG_QUEUE
961 printk("Pop busy SCB %lx; ", (ULONG) pTmpScb); 859 printk("Pop busy SCB %p; ", tmp);
962#endif 860#endif
963 return (pTmpScb); 861 return tmp;
964} 862}
965 863
966/***************************************************************************/ 864/***************************************************************************/
967static void tul_unlink_busy_scb(HCS * pCurHcb, SCB * pCurScb) 865static void initio_unlink_busy_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
968{ 866{
969 SCB *pTmpScb, *pPrevScb; 867 struct scsi_ctrl_blk *tmp, *prev;
970 868
971#if DEBUG_QUEUE 869#if DEBUG_QUEUE
972 printk("unlink busy SCB %lx; ", (ULONG) pCurScb); 870 printk("unlink busy SCB %p; ", scb);
973#endif 871#endif
974 872
975 pPrevScb = pTmpScb = pCurHcb->HCS_FirstBusy; 873 prev = tmp = host->first_busy;
976 while (pTmpScb != NULL) { 874 while (tmp != NULL) {
977 if (pCurScb == pTmpScb) { /* Unlink this SCB */ 875 if (scb == tmp) { /* Unlink this SCB */
978 if (pTmpScb == pCurHcb->HCS_FirstBusy) { 876 if (tmp == host->first_busy) {
979 if ((pCurHcb->HCS_FirstBusy = pTmpScb->SCB_NxtScb) == NULL) 877 if ((host->first_busy = tmp->next) == NULL)
980 pCurHcb->HCS_LastBusy = NULL; 878 host->last_busy = NULL;
981 } else { 879 } else {
982 pPrevScb->SCB_NxtScb = pTmpScb->SCB_NxtScb; 880 prev->next = tmp->next;
983 if (pTmpScb == pCurHcb->HCS_LastBusy) 881 if (tmp == host->last_busy)
984 pCurHcb->HCS_LastBusy = pPrevScb; 882 host->last_busy = prev;
985 } 883 }
986 pTmpScb->SCB_NxtScb = NULL; 884 tmp->next = NULL;
987 if (pTmpScb->SCB_TagMsg) 885 if (tmp->tagmsg)
988 pCurHcb->HCS_ActTags[pTmpScb->SCB_Target]--; 886 host->act_tags[tmp->target]--;
989 else 887 else
990 pCurHcb->HCS_Tcs[pTmpScb->SCB_Target].TCS_Flags &= ~TCF_BUSY; 888 host->targets[tmp->target].flags &= ~TCF_BUSY;
991 break; 889 break;
992 } 890 }
993 pPrevScb = pTmpScb; 891 prev = tmp;
994 pTmpScb = pTmpScb->SCB_NxtScb; 892 tmp = tmp->next;
995 } 893 }
996 return; 894 return;
997} 895}
998 896
999/***************************************************************************/ 897struct scsi_ctrl_blk *initio_find_busy_scb(struct initio_host * host, u16 tarlun)
1000SCB *tul_find_busy_scb(HCS * pCurHcb, WORD tarlun)
1001{ 898{
1002 SCB *pTmpScb, *pPrevScb; 899 struct scsi_ctrl_blk *tmp, *prev;
1003 WORD scbp_tarlun; 900 u16 scbp_tarlun;
1004 901
1005 902
1006 pPrevScb = pTmpScb = pCurHcb->HCS_FirstBusy; 903 prev = tmp = host->first_busy;
1007 while (pTmpScb != NULL) { 904 while (tmp != NULL) {
1008 scbp_tarlun = (pTmpScb->SCB_Lun << 8) | (pTmpScb->SCB_Target); 905 scbp_tarlun = (tmp->lun << 8) | (tmp->target);
1009 if (scbp_tarlun == tarlun) { /* Unlink this SCB */ 906 if (scbp_tarlun == tarlun) { /* Unlink this SCB */
1010 break; 907 break;
1011 } 908 }
1012 pPrevScb = pTmpScb; 909 prev = tmp;
1013 pTmpScb = pTmpScb->SCB_NxtScb; 910 tmp = tmp->next;
1014 } 911 }
1015#if DEBUG_QUEUE 912#if DEBUG_QUEUE
1016 printk("find busy SCB %lx; ", (ULONG) pTmpScb); 913 printk("find busy SCB %p; ", tmp);
1017#endif 914#endif
1018 return (pTmpScb); 915 return tmp;
1019} 916}
1020 917
1021/***************************************************************************/ 918static void initio_append_done_scb(struct initio_host * host, struct scsi_ctrl_blk * scbp)
1022static void tul_append_done_scb(HCS * pCurHcb, SCB * scbp)
1023{ 919{
1024
1025#if DEBUG_QUEUE 920#if DEBUG_QUEUE
1026 printk("append done SCB %lx; ", (ULONG) scbp); 921 printk("append done SCB %p; ", scbp);
1027#endif 922#endif
1028 923
1029 scbp->SCB_Status = SCB_DONE; 924 scbp->status = SCB_DONE;
1030 scbp->SCB_NxtScb = NULL; 925 scbp->next = NULL;
1031 if (pCurHcb->HCS_LastDone != NULL) { 926 if (host->last_done != NULL) {
1032 pCurHcb->HCS_LastDone->SCB_NxtScb = scbp; 927 host->last_done->next = scbp;
1033 pCurHcb->HCS_LastDone = scbp; 928 host->last_done = scbp;
1034 } else { 929 } else {
1035 pCurHcb->HCS_FirstDone = scbp; 930 host->first_done = scbp;
1036 pCurHcb->HCS_LastDone = scbp; 931 host->last_done = scbp;
1037 } 932 }
1038} 933}
1039 934
1040/***************************************************************************/ 935struct scsi_ctrl_blk *initio_find_done_scb(struct initio_host * host)
1041SCB *tul_find_done_scb(HCS * pCurHcb)
1042{ 936{
1043 SCB *pTmpScb; 937 struct scsi_ctrl_blk *tmp;
1044 938
1045 939 if ((tmp = host->first_done) != NULL) {
1046 if ((pTmpScb = pCurHcb->HCS_FirstDone) != NULL) { 940 if ((host->first_done = tmp->next) == NULL)
1047 if ((pCurHcb->HCS_FirstDone = pTmpScb->SCB_NxtScb) == NULL) 941 host->last_done = NULL;
1048 pCurHcb->HCS_LastDone = NULL; 942 tmp->next = NULL;
1049 pTmpScb->SCB_NxtScb = NULL;
1050 } 943 }
1051#if DEBUG_QUEUE 944#if DEBUG_QUEUE
1052 printk("find done SCB %lx; ", (ULONG) pTmpScb); 945 printk("find done SCB %p; ",tmp);
1053#endif 946#endif
1054 return (pTmpScb); 947 return tmp;
1055} 948}
1056 949
1057/***************************************************************************/ 950static int initio_abort_srb(struct initio_host * host, struct scsi_cmnd *srbp)
1058static int tul_abort_srb(HCS * pCurHcb, struct scsi_cmnd *srbp)
1059{ 951{
1060 ULONG flags; 952 unsigned long flags;
1061 SCB *pTmpScb, *pPrevScb; 953 struct scsi_ctrl_blk *tmp, *prev;
1062 954
1063 spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags); 955 spin_lock_irqsave(&host->semaph_lock, flags);
1064 956
1065 if ((pCurHcb->HCS_Semaph == 0) && (pCurHcb->HCS_ActScb == NULL)) { 957 if ((host->semaph == 0) && (host->active == NULL)) {
1066 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
1067 /* disable Jasmin SCSI Int */ 958 /* disable Jasmin SCSI Int */
1068 959 outb(0x1F, host->addr + TUL_Mask);
1069 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags); 960 spin_unlock_irqrestore(&host->semaph_lock, flags);
1070 961 /* FIXME: synchronize_irq needed ? */
1071 tulip_main(pCurHcb); 962 tulip_main(host);
1072 963 spin_lock_irqsave(&host->semaph_lock, flags);
1073 spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags); 964 host->semaph = 1;
1074 965 outb(0x0F, host->addr + TUL_Mask);
1075 pCurHcb->HCS_Semaph = 1; 966 spin_unlock_irqrestore(&host->semaph_lock, flags);
1076 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F);
1077
1078 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1079
1080 return SCSI_ABORT_SNOOZE; 967 return SCSI_ABORT_SNOOZE;
1081 } 968 }
1082 pPrevScb = pTmpScb = pCurHcb->HCS_FirstPend; /* Check Pend queue */ 969 prev = tmp = host->first_pending; /* Check Pend queue */
1083 while (pTmpScb != NULL) { 970 while (tmp != NULL) {
1084 /* 07/27/98 */ 971 /* 07/27/98 */
1085 if (pTmpScb->SCB_Srb == srbp) { 972 if (tmp->srb == srbp) {
1086 if (pTmpScb == pCurHcb->HCS_ActScb) { 973 if (tmp == host->active) {
1087 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags); 974 spin_unlock_irqrestore(&host->semaph_lock, flags);
1088 return SCSI_ABORT_BUSY; 975 return SCSI_ABORT_BUSY;
1089 } else if (pTmpScb == pCurHcb->HCS_FirstPend) { 976 } else if (tmp == host->first_pending) {
1090 if ((pCurHcb->HCS_FirstPend = pTmpScb->SCB_NxtScb) == NULL) 977 if ((host->first_pending = tmp->next) == NULL)
1091 pCurHcb->HCS_LastPend = NULL; 978 host->last_pending = NULL;
1092 } else { 979 } else {
1093 pPrevScb->SCB_NxtScb = pTmpScb->SCB_NxtScb; 980 prev->next = tmp->next;
1094 if (pTmpScb == pCurHcb->HCS_LastPend) 981 if (tmp == host->last_pending)
1095 pCurHcb->HCS_LastPend = pPrevScb; 982 host->last_pending = prev;
1096 } 983 }
1097 pTmpScb->SCB_HaStat = HOST_ABORTED; 984 tmp->hastat = HOST_ABORTED;
1098 pTmpScb->SCB_Flags |= SCF_DONE; 985 tmp->flags |= SCF_DONE;
1099 if (pTmpScb->SCB_Flags & SCF_POST) 986 if (tmp->flags & SCF_POST)
1100 (*pTmpScb->SCB_Post) ((BYTE *) pCurHcb, (BYTE *) pTmpScb); 987 (*tmp->post) ((u8 *) host, (u8 *) tmp);
1101 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags); 988 spin_unlock_irqrestore(&host->semaph_lock, flags);
1102 return SCSI_ABORT_SUCCESS; 989 return SCSI_ABORT_SUCCESS;
1103 } 990 }
1104 pPrevScb = pTmpScb; 991 prev = tmp;
1105 pTmpScb = pTmpScb->SCB_NxtScb; 992 tmp = tmp->next;
1106 } 993 }
1107 994
1108 pPrevScb = pTmpScb = pCurHcb->HCS_FirstBusy; /* Check Busy queue */ 995 prev = tmp = host->first_busy; /* Check Busy queue */
1109 while (pTmpScb != NULL) { 996 while (tmp != NULL) {
1110 997 if (tmp->srb == srbp) {
1111 if (pTmpScb->SCB_Srb == srbp) { 998 if (tmp == host->active) {
1112 999 spin_unlock_irqrestore(&host->semaph_lock, flags);
1113 if (pTmpScb == pCurHcb->HCS_ActScb) {
1114 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1115 return SCSI_ABORT_BUSY; 1000 return SCSI_ABORT_BUSY;
1116 } else if (pTmpScb->SCB_TagMsg == 0) { 1001 } else if (tmp->tagmsg == 0) {
1117 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags); 1002 spin_unlock_irqrestore(&host->semaph_lock, flags);
1118 return SCSI_ABORT_BUSY; 1003 return SCSI_ABORT_BUSY;
1119 } else { 1004 } else {
1120 pCurHcb->HCS_ActTags[pTmpScb->SCB_Target]--; 1005 host->act_tags[tmp->target]--;
1121 if (pTmpScb == pCurHcb->HCS_FirstBusy) { 1006 if (tmp == host->first_busy) {
1122 if ((pCurHcb->HCS_FirstBusy = pTmpScb->SCB_NxtScb) == NULL) 1007 if ((host->first_busy = tmp->next) == NULL)
1123 pCurHcb->HCS_LastBusy = NULL; 1008 host->last_busy = NULL;
1124 } else { 1009 } else {
1125 pPrevScb->SCB_NxtScb = pTmpScb->SCB_NxtScb; 1010 prev->next = tmp->next;
1126 if (pTmpScb == pCurHcb->HCS_LastBusy) 1011 if (tmp == host->last_busy)
1127 pCurHcb->HCS_LastBusy = pPrevScb; 1012 host->last_busy = prev;
1128 } 1013 }
1129 pTmpScb->SCB_NxtScb = NULL; 1014 tmp->next = NULL;
1130 1015
1131 1016
1132 pTmpScb->SCB_HaStat = HOST_ABORTED; 1017 tmp->hastat = HOST_ABORTED;
1133 pTmpScb->SCB_Flags |= SCF_DONE; 1018 tmp->flags |= SCF_DONE;
1134 if (pTmpScb->SCB_Flags & SCF_POST) 1019 if (tmp->flags & SCF_POST)
1135 (*pTmpScb->SCB_Post) ((BYTE *) pCurHcb, (BYTE *) pTmpScb); 1020 (*tmp->post) ((u8 *) host, (u8 *) tmp);
1136 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags); 1021 spin_unlock_irqrestore(&host->semaph_lock, flags);
1137 return SCSI_ABORT_SUCCESS; 1022 return SCSI_ABORT_SUCCESS;
1138 } 1023 }
1139 } 1024 }
1140 pPrevScb = pTmpScb; 1025 prev = tmp;
1141 pTmpScb = pTmpScb->SCB_NxtScb; 1026 tmp = tmp->next;
1142 } 1027 }
1143 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags); 1028 spin_unlock_irqrestore(&host->semaph_lock, flags);
1144 return (SCSI_ABORT_NOT_RUNNING); 1029 return SCSI_ABORT_NOT_RUNNING;
1145} 1030}
1146 1031
1147/***************************************************************************/ 1032/***************************************************************************/
1148static int tul_bad_seq(HCS * pCurHcb) 1033static int initio_bad_seq(struct initio_host * host)
1149{ 1034{
1150 SCB *pCurScb; 1035 struct scsi_ctrl_blk *scb;
1151
1152 printk("tul_bad_seg c=%d\n", pCurHcb->HCS_Index);
1153
1154 if ((pCurScb = pCurHcb->HCS_ActScb) != NULL) {
1155 tul_unlink_busy_scb(pCurHcb, pCurScb);
1156 pCurScb->SCB_HaStat = HOST_BAD_PHAS;
1157 pCurScb->SCB_TaStat = 0;
1158 tul_append_done_scb(pCurHcb, pCurScb);
1159 }
1160 tul_stop_bm(pCurHcb);
1161
1162 tul_reset_scsi(pCurHcb, 8); /* 7/29/98 */
1163
1164 return (tul_post_scsi_rst(pCurHcb));
1165}
1166
1167#if 0
1168
1169/************************************************************************/
1170static int tul_device_reset(HCS * pCurHcb, struct scsi_cmnd *pSrb,
1171 unsigned int target, unsigned int ResetFlags)
1172{
1173 ULONG flags;
1174 SCB *pScb;
1175 spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
1176
1177 if (ResetFlags & SCSI_RESET_ASYNCHRONOUS) {
1178
1179 if ((pCurHcb->HCS_Semaph == 0) && (pCurHcb->HCS_ActScb == NULL)) {
1180 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
1181 /* disable Jasmin SCSI Int */
1182
1183 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1184
1185 tulip_main(pCurHcb);
1186
1187 spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
1188
1189 pCurHcb->HCS_Semaph = 1;
1190 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F);
1191
1192 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1193
1194 return SCSI_RESET_SNOOZE;
1195 }
1196 pScb = pCurHcb->HCS_FirstBusy; /* Check Busy queue */
1197 while (pScb != NULL) {
1198 if (pScb->SCB_Srb == pSrb)
1199 break;
1200 pScb = pScb->SCB_NxtScb;
1201 }
1202 if (pScb == NULL) {
1203 printk("Unable to Reset - No SCB Found\n");
1204
1205 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1206 return SCSI_RESET_NOT_RUNNING;
1207 }
1208 }
1209 if ((pScb = tul_alloc_scb(pCurHcb)) == NULL) {
1210 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1211 return SCSI_RESET_NOT_RUNNING;
1212 }
1213 pScb->SCB_Opcode = BusDevRst;
1214 pScb->SCB_Flags = SCF_POST;
1215 pScb->SCB_Target = target;
1216 pScb->SCB_Mode = 0;
1217
1218 pScb->SCB_Srb = NULL;
1219 if (ResetFlags & SCSI_RESET_SYNCHRONOUS) {
1220 pScb->SCB_Srb = pSrb;
1221 }
1222 tul_push_pend_scb(pCurHcb, pScb); /* push this SCB to Pending queue */
1223
1224 if (pCurHcb->HCS_Semaph == 1) {
1225 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
1226 /* disable Jasmin SCSI Int */
1227 pCurHcb->HCS_Semaph = 0;
1228
1229 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1230 1036
1231 tulip_main(pCurHcb); 1037 printk("initio_bad_seg c=%d\n", host->index);
1232 1038
1233 spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags); 1039 if ((scb = host->active) != NULL) {
1234 1040 initio_unlink_busy_scb(host, scb);
1235 pCurHcb->HCS_Semaph = 1; 1041 scb->hastat = HOST_BAD_PHAS;
1236 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F); 1042 scb->tastat = 0;
1043 initio_append_done_scb(host, scb);
1237 } 1044 }
1238 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags); 1045 initio_stop_bm(host);
1239 return SCSI_RESET_PENDING; 1046 initio_reset_scsi(host, 8); /* 7/29/98 */
1047 return initio_post_scsi_rst(host);
1240} 1048}
1241 1049
1242static int tul_reset_scsi_bus(HCS * pCurHcb)
1243{
1244 ULONG flags;
1245
1246 spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
1247 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
1248 pCurHcb->HCS_Semaph = 0;
1249
1250 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1251
1252 tul_stop_bm(pCurHcb);
1253
1254 tul_reset_scsi(pCurHcb, 2); /* 7/29/98 */
1255
1256 spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
1257 tul_post_scsi_rst(pCurHcb);
1258
1259 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1260
1261 tulip_main(pCurHcb);
1262
1263 spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags);
1264
1265 pCurHcb->HCS_Semaph = 1;
1266 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F);
1267 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1268 return (SCSI_RESET_SUCCESS | SCSI_RESET_HOST_RESET);
1269}
1270
1271#endif /* 0 */
1272 1050
1273/************************************************************************/ 1051/************************************************************************/
1274static void tul_exec_scb(HCS * pCurHcb, SCB * pCurScb) 1052static void initio_exec_scb(struct initio_host * host, struct scsi_ctrl_blk * scb)
1275{ 1053{
1276 ULONG flags; 1054 unsigned long flags;
1277 1055
1278 pCurScb->SCB_Mode = 0; 1056 scb->mode = 0;
1279 1057
1280 pCurScb->SCB_SGIdx = 0; 1058 scb->sgidx = 0;
1281 pCurScb->SCB_SGMax = pCurScb->SCB_SGLen; 1059 scb->sgmax = scb->sglen;
1282 1060
1283 spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags); 1061 spin_lock_irqsave(&host->semaph_lock, flags);
1284 1062
1285 tul_append_pend_scb(pCurHcb, pCurScb); /* Append this SCB to Pending queue */ 1063 initio_append_pend_scb(host, scb); /* Append this SCB to Pending queue */
1286 1064
1287/* VVVVV 07/21/98 */ 1065/* VVVVV 07/21/98 */
1288 if (pCurHcb->HCS_Semaph == 1) { 1066 if (host->semaph == 1) {
1289 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F); 1067 /* Disable Jasmin SCSI Int */
1290 /* disable Jasmin SCSI Int */ 1068 outb(0x1F, host->addr + TUL_Mask);
1291 pCurHcb->HCS_Semaph = 0; 1069 host->semaph = 0;
1292 1070 spin_unlock_irqrestore(&host->semaph_lock, flags);
1293 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags);
1294 1071
1295 tulip_main(pCurHcb); 1072 tulip_main(host);
1296 1073
1297 spin_lock_irqsave(&(pCurHcb->HCS_SemaphLock), flags); 1074 spin_lock_irqsave(&host->semaph_lock, flags);
1298 1075 host->semaph = 1;
1299 pCurHcb->HCS_Semaph = 1; 1076 outb(0x0F, host->addr + TUL_Mask);
1300 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F);
1301 } 1077 }
1302 spin_unlock_irqrestore(&(pCurHcb->HCS_SemaphLock), flags); 1078 spin_unlock_irqrestore(&host->semaph_lock, flags);
1303 return; 1079 return;
1304} 1080}
1305 1081
1306/***************************************************************************/ 1082/***************************************************************************/
1307static int tul_isr(HCS * pCurHcb) 1083static int initio_isr(struct initio_host * host)
1308{ 1084{
1309 /* Enter critical section */ 1085 if (inb(host->addr + TUL_Int) & TSS_INT_PENDING) {
1310 1086 if (host->semaph == 1) {
1311 if (TUL_RD(pCurHcb->HCS_Base, TUL_Int) & TSS_INT_PENDING) { 1087 outb(0x1F, host->addr + TUL_Mask);
1312 if (pCurHcb->HCS_Semaph == 1) {
1313 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x1F);
1314 /* Disable Tulip SCSI Int */ 1088 /* Disable Tulip SCSI Int */
1315 pCurHcb->HCS_Semaph = 0; 1089 host->semaph = 0;
1316 1090
1317 tulip_main(pCurHcb); 1091 tulip_main(host);
1318 1092
1319 pCurHcb->HCS_Semaph = 1; 1093 host->semaph = 1;
1320 TUL_WR(pCurHcb->HCS_Base + TUL_Mask, 0x0F); 1094 outb(0x0F, host->addr + TUL_Mask);
1321 return (1); 1095 return 1;
1322 } 1096 }
1323 } 1097 }
1324 return (0); 1098 return 0;
1325} 1099}
1326 1100
1327/***************************************************************************/ 1101static int tulip_main(struct initio_host * host)
1328int tulip_main(HCS * pCurHcb)
1329{ 1102{
1330 SCB *pCurScb; 1103 struct scsi_ctrl_blk *scb;
1331 1104
1332 for (;;) { 1105 for (;;) {
1333 1106 tulip_scsi(host); /* Call tulip_scsi */
1334 tulip_scsi(pCurHcb); /* Call tulip_scsi */ 1107
1335 1108 /* Walk the list of completed SCBs */
1336 while ((pCurScb = tul_find_done_scb(pCurHcb)) != NULL) { /* find done entry */ 1109 while ((scb = initio_find_done_scb(host)) != NULL) { /* find done entry */
1337 if (pCurScb->SCB_TaStat == INI_QUEUE_FULL) { 1110 if (scb->tastat == INI_QUEUE_FULL) {
1338 pCurHcb->HCS_MaxTags[pCurScb->SCB_Target] = 1111 host->max_tags[scb->target] =
1339 pCurHcb->HCS_ActTags[pCurScb->SCB_Target] - 1; 1112 host->act_tags[scb->target] - 1;
1340 pCurScb->SCB_TaStat = 0; 1113 scb->tastat = 0;
1341 tul_append_pend_scb(pCurHcb, pCurScb); 1114 initio_append_pend_scb(host, scb);
1342 continue; 1115 continue;
1343 } 1116 }
1344 if (!(pCurScb->SCB_Mode & SCM_RSENS)) { /* not in auto req. sense mode */ 1117 if (!(scb->mode & SCM_RSENS)) { /* not in auto req. sense mode */
1345 if (pCurScb->SCB_TaStat == 2) { 1118 if (scb->tastat == 2) {
1346 1119
1347 /* clr sync. nego flag */ 1120 /* clr sync. nego flag */
1348 1121
1349 if (pCurScb->SCB_Flags & SCF_SENSE) { 1122 if (scb->flags & SCF_SENSE) {
1350 BYTE len; 1123 u8 len;
1351 len = pCurScb->SCB_SenseLen; 1124 len = scb->senselen;
1352 if (len == 0) 1125 if (len == 0)
1353 len = 1; 1126 len = 1;
1354 pCurScb->SCB_BufLen = pCurScb->SCB_SenseLen; 1127 scb->buflen = scb->senselen;
1355 pCurScb->SCB_BufPtr = pCurScb->SCB_SensePtr; 1128 scb->bufptr = scb->senseptr;
1356 pCurScb->SCB_Flags &= ~(SCF_SG | SCF_DIR); /* for xfer_data_in */ 1129 scb->flags &= ~(SCF_SG | SCF_DIR); /* for xfer_data_in */
1357/* pCurScb->SCB_Flags |= SCF_NO_DCHK; */ 1130 /* so, we won't report wrong direction in xfer_data_in,
1358 /* so, we won't report worng direction in xfer_data_in,
1359 and won't report HOST_DO_DU in state_6 */ 1131 and won't report HOST_DO_DU in state_6 */
1360 pCurScb->SCB_Mode = SCM_RSENS; 1132 scb->mode = SCM_RSENS;
1361 pCurScb->SCB_Ident &= 0xBF; /* Disable Disconnect */ 1133 scb->ident &= 0xBF; /* Disable Disconnect */
1362 pCurScb->SCB_TagMsg = 0; 1134 scb->tagmsg = 0;
1363 pCurScb->SCB_TaStat = 0; 1135 scb->tastat = 0;
1364 pCurScb->SCB_CDBLen = 6; 1136 scb->cdblen = 6;
1365 pCurScb->SCB_CDB[0] = SCSICMD_RequestSense; 1137 scb->cdb[0] = SCSICMD_RequestSense;
1366 pCurScb->SCB_CDB[1] = 0; 1138 scb->cdb[1] = 0;
1367 pCurScb->SCB_CDB[2] = 0; 1139 scb->cdb[2] = 0;
1368 pCurScb->SCB_CDB[3] = 0; 1140 scb->cdb[3] = 0;
1369 pCurScb->SCB_CDB[4] = len; 1141 scb->cdb[4] = len;
1370 pCurScb->SCB_CDB[5] = 0; 1142 scb->cdb[5] = 0;
1371 tul_push_pend_scb(pCurHcb, pCurScb); 1143 initio_push_pend_scb(host, scb);
1372 break; 1144 break;
1373 } 1145 }
1374 } 1146 }
1375 } else { /* in request sense mode */ 1147 } else { /* in request sense mode */
1376 1148
1377 if (pCurScb->SCB_TaStat == 2) { /* check contition status again after sending 1149 if (scb->tastat == 2) { /* check contition status again after sending
1378 requset sense cmd 0x3 */ 1150 requset sense cmd 0x3 */
1379 pCurScb->SCB_HaStat = HOST_BAD_PHAS; 1151 scb->hastat = HOST_BAD_PHAS;
1380 } 1152 }
1381 pCurScb->SCB_TaStat = 2; 1153 scb->tastat = 2;
1382 } 1154 }
1383 pCurScb->SCB_Flags |= SCF_DONE; 1155 scb->flags |= SCF_DONE;
1384 if (pCurScb->SCB_Flags & SCF_POST) { 1156 if (scb->flags & SCF_POST) {
1385 (*pCurScb->SCB_Post) ((BYTE *) pCurHcb, (BYTE *) pCurScb); 1157 /* FIXME: only one post method and lose casts */
1158 (*scb->post) ((u8 *) host, (u8 *) scb);
1386 } 1159 }
1387 } /* while */ 1160 } /* while */
1388
1389 /* find_active: */ 1161 /* find_active: */
1390 if (TUL_RD(pCurHcb->HCS_Base, TUL_SStatus0) & TSS_INT_PENDING) 1162 if (inb(host->addr + TUL_SStatus0) & TSS_INT_PENDING)
1391 continue; 1163 continue;
1392 1164 if (host->active) /* return to OS and wait for xfer_done_ISR/Selected_ISR */
1393 if (pCurHcb->HCS_ActScb) { /* return to OS and wait for xfer_done_ISR/Selected_ISR */
1394 return 1; /* return to OS, enable interrupt */ 1165 return 1; /* return to OS, enable interrupt */
1395 }
1396 /* Check pending SCB */ 1166 /* Check pending SCB */
1397 if (tul_find_first_pend_scb(pCurHcb) == NULL) { 1167 if (initio_find_first_pend_scb(host) == NULL)
1398 return 1; /* return to OS, enable interrupt */ 1168 return 1; /* return to OS, enable interrupt */
1399 }
1400 } /* End of for loop */ 1169 } /* End of for loop */
1401 /* statement won't reach here */ 1170 /* statement won't reach here */
1402} 1171}
1403 1172
1404 1173static void tulip_scsi(struct initio_host * host)
1405
1406
1407/*+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ */
1408/***************************************************************************/
1409/***************************************************************************/
1410/***************************************************************************/
1411/***************************************************************************/
1412
1413/***************************************************************************/
1414void tulip_scsi(HCS * pCurHcb)
1415{ 1174{
1416 SCB *pCurScb; 1175 struct scsi_ctrl_blk *scb;
1417 TCS *pCurTcb; 1176 struct target_control *active_tc;
1418 1177
1419 /* make sure to service interrupt asap */ 1178 /* make sure to service interrupt asap */
1420 1179 if ((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING) {
1421 if ((pCurHcb->HCS_JSStatus0 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus0)) & TSS_INT_PENDING) { 1180 host->phase = host->jsstatus0 & TSS_PH_MASK;
1422 1181 host->jsstatus1 = inb(host->addr + TUL_SStatus1);
1423 pCurHcb->HCS_Phase = pCurHcb->HCS_JSStatus0 & TSS_PH_MASK; 1182 host->jsint = inb(host->addr + TUL_SInt);
1424 pCurHcb->HCS_JSStatus1 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus1); 1183 if (host->jsint & TSS_SCSIRST_INT) { /* SCSI bus reset detected */
1425 pCurHcb->HCS_JSInt = TUL_RD(pCurHcb->HCS_Base, TUL_SInt); 1184 int_initio_scsi_rst(host);
1426 if (pCurHcb->HCS_JSInt & TSS_SCSIRST_INT) { /* SCSI bus reset detected */
1427 int_tul_scsi_rst(pCurHcb);
1428 return; 1185 return;
1429 } 1186 }
1430 if (pCurHcb->HCS_JSInt & TSS_RESEL_INT) { /* if selected/reselected interrupt */ 1187 if (host->jsint & TSS_RESEL_INT) { /* if selected/reselected interrupt */
1431 if (int_tul_resel(pCurHcb) == 0) 1188 if (int_initio_resel(host) == 0)
1432 tul_next_state(pCurHcb); 1189 initio_next_state(host);
1433 return; 1190 return;
1434 } 1191 }
1435 if (pCurHcb->HCS_JSInt & TSS_SEL_TIMEOUT) { 1192 if (host->jsint & TSS_SEL_TIMEOUT) {
1436 int_tul_busfree(pCurHcb); 1193 int_initio_busfree(host);
1437 return; 1194 return;
1438 } 1195 }
1439 if (pCurHcb->HCS_JSInt & TSS_DISC_INT) { /* BUS disconnection */ 1196 if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
1440 int_tul_busfree(pCurHcb); /* unexpected bus free or sel timeout */ 1197 int_initio_busfree(host); /* unexpected bus free or sel timeout */
1441 return; 1198 return;
1442 } 1199 }
1443 if (pCurHcb->HCS_JSInt & (TSS_FUNC_COMP | TSS_BUS_SERV)) { /* func complete or Bus service */ 1200 if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV)) { /* func complete or Bus service */
1444 if ((pCurScb = pCurHcb->HCS_ActScb) != NULL) 1201 if ((scb = host->active) != NULL)
1445 tul_next_state(pCurHcb); 1202 initio_next_state(host);
1446 return; 1203 return;
1447 } 1204 }
1448 } 1205 }
1449 if (pCurHcb->HCS_ActScb != NULL) 1206 if (host->active != NULL)
1450 return; 1207 return;
1451 1208
1452 if ((pCurScb = tul_find_first_pend_scb(pCurHcb)) == NULL) 1209 if ((scb = initio_find_first_pend_scb(host)) == NULL)
1453 return; 1210 return;
1454 1211
1455 /* program HBA's SCSI ID & target SCSI ID */ 1212 /* program HBA's SCSI ID & target SCSI ID */
1456 TUL_WR(pCurHcb->HCS_Base + TUL_SScsiId, 1213 outb((host->scsi_id << 4) | (scb->target & 0x0F),
1457 (pCurHcb->HCS_SCSI_ID << 4) | (pCurScb->SCB_Target & 0x0F)); 1214 host->addr + TUL_SScsiId);
1458 if (pCurScb->SCB_Opcode == ExecSCSI) { 1215 if (scb->opcode == ExecSCSI) {
1459 pCurTcb = &pCurHcb->HCS_Tcs[pCurScb->SCB_Target]; 1216 active_tc = &host->targets[scb->target];
1460 1217
1461 if (pCurScb->SCB_TagMsg) 1218 if (scb->tagmsg)
1462 pCurTcb->TCS_DrvFlags |= TCF_DRV_EN_TAG; 1219 active_tc->drv_flags |= TCF_DRV_EN_TAG;
1463 else 1220 else
1464 pCurTcb->TCS_DrvFlags &= ~TCF_DRV_EN_TAG; 1221 active_tc->drv_flags &= ~TCF_DRV_EN_TAG;
1465 1222
1466 TUL_WR(pCurHcb->HCS_Base + TUL_SPeriod, pCurTcb->TCS_JS_Period); 1223 outb(active_tc->js_period, host->addr + TUL_SPeriod);
1467 if ((pCurTcb->TCS_Flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) { /* do wdtr negotiation */ 1224 if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) { /* do wdtr negotiation */
1468 tul_select_atn_stop(pCurHcb, pCurScb); 1225 initio_select_atn_stop(host, scb);
1469 } else { 1226 } else {
1470 if ((pCurTcb->TCS_Flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { /* do sync negotiation */ 1227 if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { /* do sync negotiation */
1471 tul_select_atn_stop(pCurHcb, pCurScb); 1228 initio_select_atn_stop(host, scb);
1472 } else { 1229 } else {
1473 if (pCurScb->SCB_TagMsg) 1230 if (scb->tagmsg)
1474 tul_select_atn3(pCurHcb, pCurScb); 1231 initio_select_atn3(host, scb);
1475 else 1232 else
1476 tul_select_atn(pCurHcb, pCurScb); 1233 initio_select_atn(host, scb);
1477 } 1234 }
1478 } 1235 }
1479 if (pCurScb->SCB_Flags & SCF_POLL) { 1236 if (scb->flags & SCF_POLL) {
1480 while (wait_tulip(pCurHcb) != -1) { 1237 while (wait_tulip(host) != -1) {
1481 if (tul_next_state(pCurHcb) == -1) 1238 if (initio_next_state(host) == -1)
1482 break; 1239 break;
1483 } 1240 }
1484 } 1241 }
1485 } else if (pCurScb->SCB_Opcode == BusDevRst) { 1242 } else if (scb->opcode == BusDevRst) {
1486 tul_select_atn_stop(pCurHcb, pCurScb); 1243 initio_select_atn_stop(host, scb);
1487 pCurScb->SCB_NxtStat = 8; 1244 scb->next_state = 8;
1488 if (pCurScb->SCB_Flags & SCF_POLL) { 1245 if (scb->flags & SCF_POLL) {
1489 while (wait_tulip(pCurHcb) != -1) { 1246 while (wait_tulip(host) != -1) {
1490 if (tul_next_state(pCurHcb) == -1) 1247 if (initio_next_state(host) == -1)
1491 break; 1248 break;
1492 } 1249 }
1493 } 1250 }
1494 } else if (pCurScb->SCB_Opcode == AbortCmd) { 1251 } else if (scb->opcode == AbortCmd) {
1495 if (tul_abort_srb(pCurHcb, pCurScb->SCB_Srb) != 0) { 1252 if (initio_abort_srb(host, scb->srb) != 0) {
1496 1253 initio_unlink_pend_scb(host, scb);
1497 1254 initio_release_scb(host, scb);
1498 tul_unlink_pend_scb(pCurHcb, pCurScb);
1499
1500 tul_release_scb(pCurHcb, pCurScb);
1501 } else { 1255 } else {
1502 pCurScb->SCB_Opcode = BusDevRst; 1256 scb->opcode = BusDevRst;
1503 tul_select_atn_stop(pCurHcb, pCurScb); 1257 initio_select_atn_stop(host, scb);
1504 pCurScb->SCB_NxtStat = 8; 1258 scb->next_state = 8;
1505 } 1259 }
1506
1507/* 08/03/98 */
1508 } else { 1260 } else {
1509 tul_unlink_pend_scb(pCurHcb, pCurScb); 1261 initio_unlink_pend_scb(host, scb);
1510 pCurScb->SCB_HaStat = 0x16; /* bad command */ 1262 scb->hastat = 0x16; /* bad command */
1511 tul_append_done_scb(pCurHcb, pCurScb); 1263 initio_append_done_scb(host, scb);
1512 } 1264 }
1513 return; 1265 return;
1514} 1266}
1515 1267
1268/**
1269 * initio_next_state - Next SCSI state
1270 * @host: InitIO host we are processing
1271 *
1272 * Progress the active command block along the state machine
1273 * until we hit a state which we must wait for activity to occur.
1274 *
1275 * Returns zero or a negative code.
1276 */
1516 1277
1517/***************************************************************************/ 1278static int initio_next_state(struct initio_host * host)
1518int tul_next_state(HCS * pCurHcb)
1519{ 1279{
1520 int next; 1280 int next;
1521 1281
1522 next = pCurHcb->HCS_ActScb->SCB_NxtStat; 1282 next = host->active->next_state;
1523 for (;;) { 1283 for (;;) {
1524 switch (next) { 1284 switch (next) {
1525 case 1: 1285 case 1:
1526 next = tul_state_1(pCurHcb); 1286 next = initio_state_1(host);
1527 break; 1287 break;
1528 case 2: 1288 case 2:
1529 next = tul_state_2(pCurHcb); 1289 next = initio_state_2(host);
1530 break; 1290 break;
1531 case 3: 1291 case 3:
1532 next = tul_state_3(pCurHcb); 1292 next = initio_state_3(host);
1533 break; 1293 break;
1534 case 4: 1294 case 4:
1535 next = tul_state_4(pCurHcb); 1295 next = initio_state_4(host);
1536 break; 1296 break;
1537 case 5: 1297 case 5:
1538 next = tul_state_5(pCurHcb); 1298 next = initio_state_5(host);
1539 break; 1299 break;
1540 case 6: 1300 case 6:
1541 next = tul_state_6(pCurHcb); 1301 next = initio_state_6(host);
1542 break; 1302 break;
1543 case 7: 1303 case 7:
1544 next = tul_state_7(pCurHcb); 1304 next = initio_state_7(host);
1545 break; 1305 break;
1546 case 8: 1306 case 8:
1547 return (tul_bus_device_reset(pCurHcb)); 1307 return initio_bus_device_reset(host);
1548 default: 1308 default:
1549 return (tul_bad_seq(pCurHcb)); 1309 return initio_bad_seq(host);
1550 } 1310 }
1551 if (next <= 0) 1311 if (next <= 0)
1552 return next; 1312 return next;
@@ -1554,338 +1314,363 @@ int tul_next_state(HCS * pCurHcb)
1554} 1314}
1555 1315
1556 1316
1557/***************************************************************************/ 1317/**
1558/* sTate after selection with attention & stop */ 1318 * initio_state_1 - SCSI state machine
1559int tul_state_1(HCS * pCurHcb) 1319 * @host: InitIO host we are controlling
1320 *
1321 * Perform SCSI state processing for Select/Attention/Stop
1322 */
1323
1324static int initio_state_1(struct initio_host * host)
1560{ 1325{
1561 SCB *pCurScb = pCurHcb->HCS_ActScb; 1326 struct scsi_ctrl_blk *scb = host->active;
1562 TCS *pCurTcb = pCurHcb->HCS_ActTcs; 1327 struct target_control *active_tc = host->active_tc;
1563#if DEBUG_STATE 1328#if DEBUG_STATE
1564 printk("-s1-"); 1329 printk("-s1-");
1565#endif 1330#endif
1566 1331
1567 tul_unlink_pend_scb(pCurHcb, pCurScb); 1332 /* Move the SCB from pending to busy */
1568 tul_append_busy_scb(pCurHcb, pCurScb); 1333 initio_unlink_pend_scb(host, scb);
1334 initio_append_busy_scb(host, scb);
1569 1335
1570 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurTcb->TCS_SConfig0); 1336 outb(active_tc->sconfig0, host->addr + TUL_SConfig );
1571 /* ATN on */ 1337 /* ATN on */
1572 if (pCurHcb->HCS_Phase == MSG_OUT) { 1338 if (host->phase == MSG_OUT) {
1573 1339 outb(TSC_EN_BUS_IN | TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
1574 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, (TSC_EN_BUS_IN | TSC_HW_RESELECT)); 1340 outb(scb->ident, host->addr + TUL_SFifo);
1575 1341
1576 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_Ident); 1342 if (scb->tagmsg) {
1577 1343 outb(scb->tagmsg, host->addr + TUL_SFifo);
1578 if (pCurScb->SCB_TagMsg) { 1344 outb(scb->tagid, host->addr + TUL_SFifo);
1579 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_TagMsg); 1345 }
1580 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_TagId); 1346 if ((active_tc->flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) {
1581 } 1347 active_tc->flags |= TCF_WDTR_DONE;
1582 if ((pCurTcb->TCS_Flags & (TCF_WDTR_DONE | TCF_NO_WDTR)) == 0) { 1348 outb(MSG_EXTEND, host->addr + TUL_SFifo);
1583 1349 outb(2, host->addr + TUL_SFifo); /* Extended msg length */
1584 pCurTcb->TCS_Flags |= TCF_WDTR_DONE; 1350 outb(3, host->addr + TUL_SFifo); /* Sync request */
1585 1351 outb(1, host->addr + TUL_SFifo); /* Start from 16 bits */
1586 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_EXTEND); 1352 } else if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) {
1587 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 2); /* Extended msg length */ 1353 active_tc->flags |= TCF_SYNC_DONE;
1588 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 3); /* Sync request */ 1354 outb(MSG_EXTEND, host->addr + TUL_SFifo);
1589 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 1); /* Start from 16 bits */ 1355 outb(3, host->addr + TUL_SFifo); /* extended msg length */
1590 } else if ((pCurTcb->TCS_Flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { 1356 outb(1, host->addr + TUL_SFifo); /* sync request */
1591 1357 outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo);
1592 pCurTcb->TCS_Flags |= TCF_SYNC_DONE; 1358 outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */
1593 1359 }
1594 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_EXTEND); 1360 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
1595 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 3); /* extended msg length */ 1361 if (wait_tulip(host) == -1)
1596 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 1); /* sync request */ 1362 return -1;
1597 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, tul_rate_tbl[pCurTcb->TCS_Flags & TCF_SCSI_RATE]); 1363 }
1598 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MAX_OFFSET); /* REQ/ACK offset */ 1364 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
1599 } 1365 outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal);
1600 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 1366 /* Into before CDB xfer */
1601 if (wait_tulip(pCurHcb) == -1) 1367 return 3;
1602 return (-1); 1368}
1603 } 1369
1604 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 1370
1605 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, (TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7))); 1371/**
1606 return (3); 1372 * initio_state_2 - SCSI state machine
1607} 1373 * @host: InitIO host we are controlling
1608 1374 *
1375 * state after selection with attention
1376 * state after selection with attention3
1377 */
1609 1378
1610/***************************************************************************/ 1379static int initio_state_2(struct initio_host * host)
1611/* state after selection with attention */
1612/* state after selection with attention3 */
1613int tul_state_2(HCS * pCurHcb)
1614{ 1380{
1615 SCB *pCurScb = pCurHcb->HCS_ActScb; 1381 struct scsi_ctrl_blk *scb = host->active;
1616 TCS *pCurTcb = pCurHcb->HCS_ActTcs; 1382 struct target_control *active_tc = host->active_tc;
1617#if DEBUG_STATE 1383#if DEBUG_STATE
1618 printk("-s2-"); 1384 printk("-s2-");
1619#endif 1385#endif
1620 1386
1621 tul_unlink_pend_scb(pCurHcb, pCurScb); 1387 initio_unlink_pend_scb(host, scb);
1622 tul_append_busy_scb(pCurHcb, pCurScb); 1388 initio_append_busy_scb(host, scb);
1623 1389
1624 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurTcb->TCS_SConfig0); 1390 outb(active_tc->sconfig0, host->addr + TUL_SConfig);
1625 1391
1626 if (pCurHcb->HCS_JSStatus1 & TSS_CMD_PH_CMP) { 1392 if (host->jsstatus1 & TSS_CMD_PH_CMP)
1627 return (4); 1393 return 4;
1628 } 1394
1629 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 1395 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
1630 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, (TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7))); 1396 outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)), host->addr + TUL_SSignal);
1631 return (3); 1397 /* Into before CDB xfer */
1398 return 3;
1632} 1399}
1633 1400
1634/***************************************************************************/ 1401/**
1635/* state before CDB xfer is done */ 1402 * initio_state_3 - SCSI state machine
1636int tul_state_3(HCS * pCurHcb) 1403 * @host: InitIO host we are controlling
1404 *
1405 * state before CDB xfer is done
1406 */
1407
1408static int initio_state_3(struct initio_host * host)
1637{ 1409{
1638 SCB *pCurScb = pCurHcb->HCS_ActScb; 1410 struct scsi_ctrl_blk *scb = host->active;
1639 TCS *pCurTcb = pCurHcb->HCS_ActTcs; 1411 struct target_control *active_tc = host->active_tc;
1640 int i; 1412 int i;
1641 1413
1642#if DEBUG_STATE 1414#if DEBUG_STATE
1643 printk("-s3-"); 1415 printk("-s3-");
1644#endif 1416#endif
1645 for (;;) { 1417 for (;;) {
1646 switch (pCurHcb->HCS_Phase) { 1418 switch (host->phase) {
1647 case CMD_OUT: /* Command out phase */ 1419 case CMD_OUT: /* Command out phase */
1648 for (i = 0; i < (int) pCurScb->SCB_CDBLen; i++) 1420 for (i = 0; i < (int) scb->cdblen; i++)
1649 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_CDB[i]); 1421 outb(scb->cdb[i], host->addr + TUL_SFifo);
1650 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 1422 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
1651 if (wait_tulip(pCurHcb) == -1) 1423 if (wait_tulip(host) == -1)
1652 return (-1); 1424 return -1;
1653 if (pCurHcb->HCS_Phase == CMD_OUT) { 1425 if (host->phase == CMD_OUT)
1654 return (tul_bad_seq(pCurHcb)); 1426 return initio_bad_seq(host);
1655 } 1427 return 4;
1656 return (4);
1657 1428
1658 case MSG_IN: /* Message in phase */ 1429 case MSG_IN: /* Message in phase */
1659 pCurScb->SCB_NxtStat = 3; 1430 scb->next_state = 3;
1660 if (tul_msgin(pCurHcb) == -1) 1431 if (initio_msgin(host) == -1)
1661 return (-1); 1432 return -1;
1662 break; 1433 break;
1663 1434
1664 case STATUS_IN: /* Status phase */ 1435 case STATUS_IN: /* Status phase */
1665 if (tul_status_msg(pCurHcb) == -1) 1436 if (initio_status_msg(host) == -1)
1666 return (-1); 1437 return -1;
1667 break; 1438 break;
1668 1439
1669 case MSG_OUT: /* Message out phase */ 1440 case MSG_OUT: /* Message out phase */
1670 if (pCurTcb->TCS_Flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) { 1441 if (active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) {
1671 1442 outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
1672 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_NOP); /* msg nop */ 1443 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
1673 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 1444 if (wait_tulip(host) == -1)
1674 if (wait_tulip(pCurHcb) == -1) 1445 return -1;
1675 return (-1);
1676
1677 } else { 1446 } else {
1678 pCurTcb->TCS_Flags |= TCF_SYNC_DONE; 1447 active_tc->flags |= TCF_SYNC_DONE;
1679 1448
1680 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_EXTEND); 1449 outb(MSG_EXTEND, host->addr + TUL_SFifo);
1681 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 3); /* ext. msg len */ 1450 outb(3, host->addr + TUL_SFifo); /* ext. msg len */
1682 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 1); /* sync request */ 1451 outb(1, host->addr + TUL_SFifo); /* sync request */
1683 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, tul_rate_tbl[pCurTcb->TCS_Flags & TCF_SCSI_RATE]); 1452 outb(initio_rate_tbl[active_tc->flags & TCF_SCSI_RATE], host->addr + TUL_SFifo);
1684 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MAX_OFFSET); /* REQ/ACK offset */ 1453 outb(MAX_OFFSET, host->addr + TUL_SFifo); /* REQ/ACK offset */
1685 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 1454 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
1686 if (wait_tulip(pCurHcb) == -1) 1455 if (wait_tulip(host) == -1)
1687 return (-1); 1456 return -1;
1688 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 1457 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
1689 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)); 1458 outb(inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7), host->addr + TUL_SSignal);
1690 1459
1691 } 1460 }
1692 break; 1461 break;
1693
1694 default: 1462 default:
1695 return (tul_bad_seq(pCurHcb)); 1463 return initio_bad_seq(host);
1696 } 1464 }
1697 } 1465 }
1698} 1466}
1699 1467
1468/**
1469 * initio_state_4 - SCSI state machine
1470 * @host: InitIO host we are controlling
1471 *
1472 * SCSI state machine. State 4
1473 */
1700 1474
1701/***************************************************************************/ 1475static int initio_state_4(struct initio_host * host)
1702int tul_state_4(HCS * pCurHcb)
1703{ 1476{
1704 SCB *pCurScb = pCurHcb->HCS_ActScb; 1477 struct scsi_ctrl_blk *scb = host->active;
1705 1478
1706#if DEBUG_STATE 1479#if DEBUG_STATE
1707 printk("-s4-"); 1480 printk("-s4-");
1708#endif 1481#endif
1709 if ((pCurScb->SCB_Flags & SCF_DIR) == SCF_NO_XF) { 1482 if ((scb->flags & SCF_DIR) == SCF_NO_XF) {
1710 return (6); /* Go to state 6 */ 1483 return 6; /* Go to state 6 (After data) */
1711 } 1484 }
1712 for (;;) { 1485 for (;;) {
1713 if (pCurScb->SCB_BufLen == 0) 1486 if (scb->buflen == 0)
1714 return (6); /* Go to state 6 */ 1487 return 6;
1715 1488
1716 switch (pCurHcb->HCS_Phase) { 1489 switch (host->phase) {
1717 1490
1718 case STATUS_IN: /* Status phase */ 1491 case STATUS_IN: /* Status phase */
1719 if ((pCurScb->SCB_Flags & SCF_DIR) != 0) { /* if direction bit set then report data underrun */ 1492 if ((scb->flags & SCF_DIR) != 0) /* if direction bit set then report data underrun */
1720 pCurScb->SCB_HaStat = HOST_DO_DU; 1493 scb->hastat = HOST_DO_DU;
1721 } 1494 if ((initio_status_msg(host)) == -1)
1722 if ((tul_status_msg(pCurHcb)) == -1) 1495 return -1;
1723 return (-1);
1724 break; 1496 break;
1725 1497
1726 case MSG_IN: /* Message in phase */ 1498 case MSG_IN: /* Message in phase */
1727 pCurScb->SCB_NxtStat = 0x4; 1499 scb->next_state = 0x4;
1728 if (tul_msgin(pCurHcb) == -1) 1500 if (initio_msgin(host) == -1)
1729 return (-1); 1501 return -1;
1730 break; 1502 break;
1731 1503
1732 case MSG_OUT: /* Message out phase */ 1504 case MSG_OUT: /* Message out phase */
1733 if (pCurHcb->HCS_JSStatus0 & TSS_PAR_ERROR) { 1505 if (host->jsstatus0 & TSS_PAR_ERROR) {
1734 pCurScb->SCB_BufLen = 0; 1506 scb->buflen = 0;
1735 pCurScb->SCB_HaStat = HOST_DO_DU; 1507 scb->hastat = HOST_DO_DU;
1736 if (tul_msgout_ide(pCurHcb) == -1) 1508 if (initio_msgout_ide(host) == -1)
1737 return (-1); 1509 return -1;
1738 return (6); /* Go to state 6 */ 1510 return 6;
1739 } else { 1511 } else {
1740 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_NOP); /* msg nop */ 1512 outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
1741 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 1513 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
1742 if (wait_tulip(pCurHcb) == -1) 1514 if (wait_tulip(host) == -1)
1743 return (-1); 1515 return -1;
1744 } 1516 }
1745 break; 1517 break;
1746 1518
1747 case DATA_IN: /* Data in phase */ 1519 case DATA_IN: /* Data in phase */
1748 return (tul_xfer_data_in(pCurHcb)); 1520 return initio_xfer_data_in(host);
1749 1521
1750 case DATA_OUT: /* Data out phase */ 1522 case DATA_OUT: /* Data out phase */
1751 return (tul_xfer_data_out(pCurHcb)); 1523 return initio_xfer_data_out(host);
1752 1524
1753 default: 1525 default:
1754 return (tul_bad_seq(pCurHcb)); 1526 return initio_bad_seq(host);
1755 } 1527 }
1756 } 1528 }
1757} 1529}
1758 1530
1759 1531
1760/***************************************************************************/ 1532/**
1761/* state after dma xfer done or phase change before xfer done */ 1533 * initio_state_5 - SCSI state machine
1762int tul_state_5(HCS * pCurHcb) 1534 * @host: InitIO host we are controlling
1535 *
1536 * State after dma xfer done or phase change before xfer done
1537 */
1538
1539static int initio_state_5(struct initio_host * host)
1763{ 1540{
1764 SCB *pCurScb = pCurHcb->HCS_ActScb; 1541 struct scsi_ctrl_blk *scb = host->active;
1765 long cnt, xcnt; /* cannot use unsigned !! code: if (xcnt < 0) */ 1542 long cnt, xcnt; /* cannot use unsigned !! code: if (xcnt < 0) */
1766 1543
1767#if DEBUG_STATE 1544#if DEBUG_STATE
1768 printk("-s5-"); 1545 printk("-s5-");
1769#endif 1546#endif
1770/*------ get remaining count -------*/ 1547 /*------ get remaining count -------*/
1771 1548 cnt = inl(host->addr + TUL_SCnt0) & 0x0FFFFFF;
1772 cnt = TUL_RDLONG(pCurHcb->HCS_Base, TUL_SCnt0) & 0x0FFFFFF;
1773 1549
1774 if (TUL_RD(pCurHcb->HCS_Base, TUL_XCmd) & 0x20) { 1550 if (inb(host->addr + TUL_XCmd) & 0x20) {
1775 /* ----------------------- DATA_IN ----------------------------- */ 1551 /* ----------------------- DATA_IN ----------------------------- */
1776 /* check scsi parity error */ 1552 /* check scsi parity error */
1777 if (pCurHcb->HCS_JSStatus0 & TSS_PAR_ERROR) { 1553 if (host->jsstatus0 & TSS_PAR_ERROR)
1778 pCurScb->SCB_HaStat = HOST_DO_DU; 1554 scb->hastat = HOST_DO_DU;
1779 } 1555 if (inb(host->addr + TUL_XStatus) & XPEND) { /* DMA xfer pending, Send STOP */
1780 if (TUL_RD(pCurHcb->HCS_Base, TUL_XStatus) & XPEND) { /* DMA xfer pending, Send STOP */
1781 /* tell Hardware scsi xfer has been terminated */ 1556 /* tell Hardware scsi xfer has been terminated */
1782 TUL_WR(pCurHcb->HCS_Base + TUL_XCtrl, TUL_RD(pCurHcb->HCS_Base, TUL_XCtrl) | 0x80); 1557 outb(inb(host->addr + TUL_XCtrl) | 0x80, host->addr + TUL_XCtrl);
1783 /* wait until DMA xfer not pending */ 1558 /* wait until DMA xfer not pending */
1784 while (TUL_RD(pCurHcb->HCS_Base, TUL_XStatus) & XPEND); 1559 while (inb(host->addr + TUL_XStatus) & XPEND)
1560 cpu_relax();
1785 } 1561 }
1786 } else { 1562 } else {
1787/*-------- DATA OUT -----------*/ 1563 /*-------- DATA OUT -----------*/
1788 if ((TUL_RD(pCurHcb->HCS_Base, TUL_SStatus1) & TSS_XFER_CMP) == 0) { 1564 if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0) {
1789 if (pCurHcb->HCS_ActTcs->TCS_JS_Period & TSC_WIDE_SCSI) 1565 if (host->active_tc->js_period & TSC_WIDE_SCSI)
1790 cnt += (TUL_RD(pCurHcb->HCS_Base, TUL_SFifoCnt) & 0x1F) << 1; 1566 cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F) << 1;
1791 else 1567 else
1792 cnt += (TUL_RD(pCurHcb->HCS_Base, TUL_SFifoCnt) & 0x1F); 1568 cnt += (inb(host->addr + TUL_SFifoCnt) & 0x1F);
1793 } 1569 }
1794 if (TUL_RD(pCurHcb->HCS_Base, TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */ 1570 if (inb(host->addr + TUL_XStatus) & XPEND) { /* if DMA xfer is pending, abort DMA xfer */
1795 TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_X_ABT); 1571 outb(TAX_X_ABT, host->addr + TUL_XCmd);
1796 /* wait Abort DMA xfer done */ 1572 /* wait Abort DMA xfer done */
1797 while ((TUL_RD(pCurHcb->HCS_Base, TUL_Int) & XABT) == 0); 1573 while ((inb(host->addr + TUL_Int) & XABT) == 0)
1574 cpu_relax();
1798 } 1575 }
1799 if ((cnt == 1) && (pCurHcb->HCS_Phase == DATA_OUT)) { 1576 if ((cnt == 1) && (host->phase == DATA_OUT)) {
1800 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 1577 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
1801 if (wait_tulip(pCurHcb) == -1) { 1578 if (wait_tulip(host) == -1)
1802 return (-1); 1579 return -1;
1803 }
1804 cnt = 0; 1580 cnt = 0;
1805 } else { 1581 } else {
1806 if ((TUL_RD(pCurHcb->HCS_Base, TUL_SStatus1) & TSS_XFER_CMP) == 0) 1582 if ((inb(host->addr + TUL_SStatus1) & TSS_XFER_CMP) == 0)
1807 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 1583 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
1808 } 1584 }
1809 } 1585 }
1810
1811 if (cnt == 0) { 1586 if (cnt == 0) {
1812 pCurScb->SCB_BufLen = 0; 1587 scb->buflen = 0;
1813 return (6); /* Go to state 6 */ 1588 return 6; /* After Data */
1814 } 1589 }
1815 /* Update active data pointer */ 1590 /* Update active data pointer */
1816 xcnt = (long) pCurScb->SCB_BufLen - cnt; /* xcnt== bytes already xferred */ 1591 xcnt = (long) scb->buflen - cnt; /* xcnt== bytes already xferred */
1817 pCurScb->SCB_BufLen = (U32) cnt; /* cnt == bytes left to be xferred */ 1592 scb->buflen = (u32) cnt; /* cnt == bytes left to be xferred */
1818 if (pCurScb->SCB_Flags & SCF_SG) { 1593 if (scb->flags & SCF_SG) {
1819 register SG *sgp; 1594 struct sg_entry *sgp;
1820 ULONG i; 1595 unsigned long i;
1821 1596
1822 sgp = &pCurScb->SCB_SGList[pCurScb->SCB_SGIdx]; 1597 sgp = &scb->sglist[scb->sgidx];
1823 for (i = pCurScb->SCB_SGIdx; i < pCurScb->SCB_SGMax; sgp++, i++) { 1598 for (i = scb->sgidx; i < scb->sgmax; sgp++, i++) {
1824 xcnt -= (long) sgp->SG_Len; 1599 xcnt -= (long) sgp->len;
1825 if (xcnt < 0) { /* this sgp xfer half done */ 1600 if (xcnt < 0) { /* this sgp xfer half done */
1826 xcnt += (long) sgp->SG_Len; /* xcnt == bytes xferred in this sgp */ 1601 xcnt += (long) sgp->len; /* xcnt == bytes xferred in this sgp */
1827 sgp->SG_Ptr += (U32) xcnt; /* new ptr to be xfer */ 1602 sgp->data += (u32) xcnt; /* new ptr to be xfer */
1828 sgp->SG_Len -= (U32) xcnt; /* new len to be xfer */ 1603 sgp->len -= (u32) xcnt; /* new len to be xfer */
1829 pCurScb->SCB_BufPtr += ((U32) (i - pCurScb->SCB_SGIdx) << 3); 1604 scb->bufptr += ((u32) (i - scb->sgidx) << 3);
1830 /* new SG table ptr */ 1605 /* new SG table ptr */
1831 pCurScb->SCB_SGLen = (BYTE) (pCurScb->SCB_SGMax - i); 1606 scb->sglen = (u8) (scb->sgmax - i);
1832 /* new SG table len */ 1607 /* new SG table len */
1833 pCurScb->SCB_SGIdx = (WORD) i; 1608 scb->sgidx = (u16) i;
1834 /* for next disc and come in this loop */ 1609 /* for next disc and come in this loop */
1835 return (4); /* Go to state 4 */ 1610 return 4; /* Go to state 4 */
1836 } 1611 }
1837 /* else (xcnt >= 0 , i.e. this sgp already xferred */ 1612 /* else (xcnt >= 0 , i.e. this sgp already xferred */
1838 } /* for */ 1613 } /* for */
1839 return (6); /* Go to state 6 */ 1614 return 6; /* Go to state 6 */
1840 } else { 1615 } else {
1841 pCurScb->SCB_BufPtr += (U32) xcnt; 1616 scb->bufptr += (u32) xcnt;
1842 } 1617 }
1843 return (4); /* Go to state 4 */ 1618 return 4; /* Go to state 4 */
1844} 1619}
1845 1620
1846/***************************************************************************/ 1621/**
1847/* state after Data phase */ 1622 * initio_state_6 - SCSI state machine
1848int tul_state_6(HCS * pCurHcb) 1623 * @host: InitIO host we are controlling
1624 *
1625 * State after Data phase
1626 */
1627
1628static int initio_state_6(struct initio_host * host)
1849{ 1629{
1850 SCB *pCurScb = pCurHcb->HCS_ActScb; 1630 struct scsi_ctrl_blk *scb = host->active;
1851 1631
1852#if DEBUG_STATE 1632#if DEBUG_STATE
1853 printk("-s6-"); 1633 printk("-s6-");
1854#endif 1634#endif
1855 for (;;) { 1635 for (;;) {
1856 switch (pCurHcb->HCS_Phase) { 1636 switch (host->phase) {
1857 case STATUS_IN: /* Status phase */ 1637 case STATUS_IN: /* Status phase */
1858 if ((tul_status_msg(pCurHcb)) == -1) 1638 if ((initio_status_msg(host)) == -1)
1859 return (-1); 1639 return -1;
1860 break; 1640 break;
1861 1641
1862 case MSG_IN: /* Message in phase */ 1642 case MSG_IN: /* Message in phase */
1863 pCurScb->SCB_NxtStat = 6; 1643 scb->next_state = 6;
1864 if ((tul_msgin(pCurHcb)) == -1) 1644 if ((initio_msgin(host)) == -1)
1865 return (-1); 1645 return -1;
1866 break; 1646 break;
1867 1647
1868 case MSG_OUT: /* Message out phase */ 1648 case MSG_OUT: /* Message out phase */
1869 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_NOP); /* msg nop */ 1649 outb(MSG_NOP, host->addr + TUL_SFifo); /* msg nop */
1870 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 1650 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
1871 if (wait_tulip(pCurHcb) == -1) 1651 if (wait_tulip(host) == -1)
1872 return (-1); 1652 return -1;
1873 break; 1653 break;
1874 1654
1875 case DATA_IN: /* Data in phase */ 1655 case DATA_IN: /* Data in phase */
1876 return (tul_xpad_in(pCurHcb)); 1656 return initio_xpad_in(host);
1877 1657
1878 case DATA_OUT: /* Data out phase */ 1658 case DATA_OUT: /* Data out phase */
1879 return (tul_xpad_out(pCurHcb)); 1659 return initio_xpad_out(host);
1880 1660
1881 default: 1661 default:
1882 return (tul_bad_seq(pCurHcb)); 1662 return initio_bad_seq(host);
1883 } 1663 }
1884 } 1664 }
1885} 1665}
1886 1666
1887/***************************************************************************/ 1667/**
1888int tul_state_7(HCS * pCurHcb) 1668 * initio_state_7 - SCSI state machine
1669 * @host: InitIO host we are controlling
1670 *
1671 */
1672
1673int initio_state_7(struct initio_host * host)
1889{ 1674{
1890 int cnt, i; 1675 int cnt, i;
1891 1676
@@ -1893,1139 +1678,1029 @@ int tul_state_7(HCS * pCurHcb)
1893 printk("-s7-"); 1678 printk("-s7-");
1894#endif 1679#endif
1895 /* flush SCSI FIFO */ 1680 /* flush SCSI FIFO */
1896 cnt = TUL_RD(pCurHcb->HCS_Base, TUL_SFifoCnt) & 0x1F; 1681 cnt = inb(host->addr + TUL_SFifoCnt) & 0x1F;
1897 if (cnt) { 1682 if (cnt) {
1898 for (i = 0; i < cnt; i++) 1683 for (i = 0; i < cnt; i++)
1899 TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); 1684 inb(host->addr + TUL_SFifo);
1900 } 1685 }
1901 switch (pCurHcb->HCS_Phase) { 1686 switch (host->phase) {
1902 case DATA_IN: /* Data in phase */ 1687 case DATA_IN: /* Data in phase */
1903 case DATA_OUT: /* Data out phase */ 1688 case DATA_OUT: /* Data out phase */
1904 return (tul_bad_seq(pCurHcb)); 1689 return initio_bad_seq(host);
1905 default: 1690 default:
1906 return (6); /* Go to state 6 */ 1691 return 6; /* Go to state 6 */
1907 } 1692 }
1908} 1693}
1909 1694
1910/***************************************************************************/ 1695/**
1911int tul_xfer_data_in(HCS * pCurHcb) 1696 * initio_xfer_data_in - Commence data input
1697 * @host: InitIO host in use
1698 *
1699 * Commence a block of data transfer. The transfer itself will
1700 * be managed by the controller and we will get a completion (or
1701 * failure) interrupt.
1702 */
1703static int initio_xfer_data_in(struct initio_host * host)
1912{ 1704{
1913 SCB *pCurScb = pCurHcb->HCS_ActScb; 1705 struct scsi_ctrl_blk *scb = host->active;
1914 1706
1915 if ((pCurScb->SCB_Flags & SCF_DIR) == SCF_DOUT) { 1707 if ((scb->flags & SCF_DIR) == SCF_DOUT)
1916 return (6); /* wrong direction */ 1708 return 6; /* wrong direction */
1917 }
1918 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, pCurScb->SCB_BufLen);
1919 1709
1920 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_DMA_IN); /* 7/25/95 */ 1710 outl(scb->buflen, host->addr + TUL_SCnt0);
1711 outb(TSC_XF_DMA_IN, host->addr + TUL_SCmd); /* 7/25/95 */
1921 1712
1922 if (pCurScb->SCB_Flags & SCF_SG) { /* S/G xfer */ 1713 if (scb->flags & SCF_SG) { /* S/G xfer */
1923 TUL_WRLONG(pCurHcb->HCS_Base + TUL_XCntH, ((ULONG) pCurScb->SCB_SGLen) << 3); 1714 outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH);
1924 TUL_WRLONG(pCurHcb->HCS_Base + TUL_XAddH, pCurScb->SCB_BufPtr); 1715 outl(scb->bufptr, host->addr + TUL_XAddH);
1925 TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_SG_IN); 1716 outb(TAX_SG_IN, host->addr + TUL_XCmd);
1926 } else { 1717 } else {
1927 TUL_WRLONG(pCurHcb->HCS_Base + TUL_XCntH, pCurScb->SCB_BufLen); 1718 outl(scb->buflen, host->addr + TUL_XCntH);
1928 TUL_WRLONG(pCurHcb->HCS_Base + TUL_XAddH, pCurScb->SCB_BufPtr); 1719 outl(scb->bufptr, host->addr + TUL_XAddH);
1929 TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_X_IN); 1720 outb(TAX_X_IN, host->addr + TUL_XCmd);
1930 } 1721 }
1931 pCurScb->SCB_NxtStat = 0x5; 1722 scb->next_state = 0x5;
1932 return (0); /* return to OS, wait xfer done , let jas_isr come in */ 1723 return 0; /* return to OS, wait xfer done , let jas_isr come in */
1933} 1724}
1934 1725
1726/**
1727 * initio_xfer_data_out - Commence data output
1728 * @host: InitIO host in use
1729 *
1730 * Commence a block of data transfer. The transfer itself will
1731 * be managed by the controller and we will get a completion (or
1732 * failure) interrupt.
1733 */
1935 1734
1936/***************************************************************************/ 1735static int initio_xfer_data_out(struct initio_host * host)
1937int tul_xfer_data_out(HCS * pCurHcb)
1938{ 1736{
1939 SCB *pCurScb = pCurHcb->HCS_ActScb; 1737 struct scsi_ctrl_blk *scb = host->active;
1940 1738
1941 if ((pCurScb->SCB_Flags & SCF_DIR) == SCF_DIN) { 1739 if ((scb->flags & SCF_DIR) == SCF_DIN)
1942 return (6); /* wrong direction */ 1740 return 6; /* wrong direction */
1943 }
1944 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, pCurScb->SCB_BufLen);
1945 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_DMA_OUT);
1946 1741
1947 if (pCurScb->SCB_Flags & SCF_SG) { /* S/G xfer */ 1742 outl(scb->buflen, host->addr + TUL_SCnt0);
1948 TUL_WRLONG(pCurHcb->HCS_Base + TUL_XCntH, ((ULONG) pCurScb->SCB_SGLen) << 3); 1743 outb(TSC_XF_DMA_OUT, host->addr + TUL_SCmd);
1949 TUL_WRLONG(pCurHcb->HCS_Base + TUL_XAddH, pCurScb->SCB_BufPtr); 1744
1950 TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_SG_OUT); 1745 if (scb->flags & SCF_SG) { /* S/G xfer */
1746 outl(((u32) scb->sglen) << 3, host->addr + TUL_XCntH);
1747 outl(scb->bufptr, host->addr + TUL_XAddH);
1748 outb(TAX_SG_OUT, host->addr + TUL_XCmd);
1951 } else { 1749 } else {
1952 TUL_WRLONG(pCurHcb->HCS_Base + TUL_XCntH, pCurScb->SCB_BufLen); 1750 outl(scb->buflen, host->addr + TUL_XCntH);
1953 TUL_WRLONG(pCurHcb->HCS_Base + TUL_XAddH, pCurScb->SCB_BufPtr); 1751 outl(scb->bufptr, host->addr + TUL_XAddH);
1954 TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_X_OUT); 1752 outb(TAX_X_OUT, host->addr + TUL_XCmd);
1955 } 1753 }
1956 1754
1957 pCurScb->SCB_NxtStat = 0x5; 1755 scb->next_state = 0x5;
1958 return (0); /* return to OS, wait xfer done , let jas_isr come in */ 1756 return 0; /* return to OS, wait xfer done , let jas_isr come in */
1959} 1757}
1960 1758
1961 1759int initio_xpad_in(struct initio_host * host)
1962/***************************************************************************/
1963int tul_xpad_in(HCS * pCurHcb)
1964{ 1760{
1965 SCB *pCurScb = pCurHcb->HCS_ActScb; 1761 struct scsi_ctrl_blk *scb = host->active;
1966 TCS *pCurTcb = pCurHcb->HCS_ActTcs; 1762 struct target_control *active_tc = host->active_tc;
1967 1763
1968 if ((pCurScb->SCB_Flags & SCF_DIR) != SCF_NO_DCHK) { 1764 if ((scb->flags & SCF_DIR) != SCF_NO_DCHK)
1969 pCurScb->SCB_HaStat = HOST_DO_DU; /* over run */ 1765 scb->hastat = HOST_DO_DU; /* over run */
1970 }
1971 for (;;) { 1766 for (;;) {
1972 if (pCurTcb->TCS_JS_Period & TSC_WIDE_SCSI) 1767 if (active_tc->js_period & TSC_WIDE_SCSI)
1973 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 2); 1768 outl(2, host->addr + TUL_SCnt0);
1974 else 1769 else
1975 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1); 1770 outl(1, host->addr + TUL_SCnt0);
1976 1771
1977 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN); 1772 outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
1978 if ((wait_tulip(pCurHcb)) == -1) { 1773 if (wait_tulip(host) == -1)
1979 return (-1); 1774 return -1;
1775 if (host->phase != DATA_IN) {
1776 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
1777 return 6;
1980 } 1778 }
1981 if (pCurHcb->HCS_Phase != DATA_IN) { 1779 inb(host->addr + TUL_SFifo);
1982 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO);
1983 return (6);
1984 }
1985 TUL_RD(pCurHcb->HCS_Base, TUL_SFifo);
1986 } 1780 }
1987} 1781}
1988 1782
1989int tul_xpad_out(HCS * pCurHcb) 1783int initio_xpad_out(struct initio_host * host)
1990{ 1784{
1991 SCB *pCurScb = pCurHcb->HCS_ActScb; 1785 struct scsi_ctrl_blk *scb = host->active;
1992 TCS *pCurTcb = pCurHcb->HCS_ActTcs; 1786 struct target_control *active_tc = host->active_tc;
1993 1787
1994 if ((pCurScb->SCB_Flags & SCF_DIR) != SCF_NO_DCHK) { 1788 if ((scb->flags & SCF_DIR) != SCF_NO_DCHK)
1995 pCurScb->SCB_HaStat = HOST_DO_DU; /* over run */ 1789 scb->hastat = HOST_DO_DU; /* over run */
1996 }
1997 for (;;) { 1790 for (;;) {
1998 if (pCurTcb->TCS_JS_Period & TSC_WIDE_SCSI) 1791 if (active_tc->js_period & TSC_WIDE_SCSI)
1999 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 2); 1792 outl(2, host->addr + TUL_SCnt0);
2000 else 1793 else
2001 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1); 1794 outl(1, host->addr + TUL_SCnt0);
2002 1795
2003 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 0); 1796 outb(0, host->addr + TUL_SFifo);
2004 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 1797 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
2005 if ((wait_tulip(pCurHcb)) == -1) { 1798 if ((wait_tulip(host)) == -1)
2006 return (-1); 1799 return -1;
2007 } 1800 if (host->phase != DATA_OUT) { /* Disable wide CPU to allow read 16 bits */
2008 if (pCurHcb->HCS_Phase != DATA_OUT) { /* Disable wide CPU to allow read 16 bits */ 1801 outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1);
2009 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); 1802 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
2010 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 1803 return 6;
2011 return (6);
2012 } 1804 }
2013 } 1805 }
2014} 1806}
2015 1807
2016 1808int initio_status_msg(struct initio_host * host)
2017/***************************************************************************/
2018int tul_status_msg(HCS * pCurHcb)
2019{ /* status & MSG_IN */ 1809{ /* status & MSG_IN */
2020 SCB *pCurScb = pCurHcb->HCS_ActScb; 1810 struct scsi_ctrl_blk *scb = host->active;
2021 BYTE msg; 1811 u8 msg;
1812
1813 outb(TSC_CMD_COMP, host->addr + TUL_SCmd);
1814 if (wait_tulip(host) == -1)
1815 return -1;
2022 1816
2023 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_CMD_COMP);
2024 if ((wait_tulip(pCurHcb)) == -1) {
2025 return (-1);
2026 }
2027 /* get status */ 1817 /* get status */
2028 pCurScb->SCB_TaStat = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); 1818 scb->tastat = inb(host->addr + TUL_SFifo);
2029 1819
2030 if (pCurHcb->HCS_Phase == MSG_OUT) { 1820 if (host->phase == MSG_OUT) {
2031 if (pCurHcb->HCS_JSStatus0 & TSS_PAR_ERROR) { 1821 if (host->jsstatus0 & TSS_PAR_ERROR)
2032 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_PARITY); 1822 outb(MSG_PARITY, host->addr + TUL_SFifo);
2033 } else { 1823 else
2034 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_NOP); 1824 outb(MSG_NOP, host->addr + TUL_SFifo);
2035 } 1825 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
2036 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 1826 return wait_tulip(host);
2037 return (wait_tulip(pCurHcb)); 1827 }
2038 } 1828 if (host->phase == MSG_IN) {
2039 if (pCurHcb->HCS_Phase == MSG_IN) { 1829 msg = inb(host->addr + TUL_SFifo);
2040 msg = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); 1830 if (host->jsstatus0 & TSS_PAR_ERROR) { /* Parity error */
2041 if (pCurHcb->HCS_JSStatus0 & TSS_PAR_ERROR) { /* Parity error */ 1831 if ((initio_msgin_accept(host)) == -1)
2042 if ((tul_msgin_accept(pCurHcb)) == -1) 1832 return -1;
2043 return (-1); 1833 if (host->phase != MSG_OUT)
2044 if (pCurHcb->HCS_Phase != MSG_OUT) 1834 return initio_bad_seq(host);
2045 return (tul_bad_seq(pCurHcb)); 1835 outb(MSG_PARITY, host->addr + TUL_SFifo);
2046 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_PARITY); 1836 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
2047 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 1837 return wait_tulip(host);
2048 return (wait_tulip(pCurHcb));
2049 } 1838 }
2050 if (msg == 0) { /* Command complete */ 1839 if (msg == 0) { /* Command complete */
2051 1840
2052 if ((pCurScb->SCB_TaStat & 0x18) == 0x10) { /* No link support */ 1841 if ((scb->tastat & 0x18) == 0x10) /* No link support */
2053 return (tul_bad_seq(pCurHcb)); 1842 return initio_bad_seq(host);
2054 } 1843 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
2055 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 1844 outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
2056 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_MSG_ACCEPT); 1845 return initio_wait_done_disc(host);
2057 return tul_wait_done_disc(pCurHcb);
2058 1846
2059 } 1847 }
2060 if ((msg == MSG_LINK_COMP) || (msg == MSG_LINK_FLAG)) { 1848 if (msg == MSG_LINK_COMP || msg == MSG_LINK_FLAG) {
2061 if ((pCurScb->SCB_TaStat & 0x18) == 0x10) 1849 if ((scb->tastat & 0x18) == 0x10)
2062 return (tul_msgin_accept(pCurHcb)); 1850 return initio_msgin_accept(host);
2063 } 1851 }
2064 } 1852 }
2065 return (tul_bad_seq(pCurHcb)); 1853 return initio_bad_seq(host);
2066} 1854}
2067 1855
2068 1856
2069/***************************************************************************/
2070/* scsi bus free */ 1857/* scsi bus free */
2071int int_tul_busfree(HCS * pCurHcb) 1858int int_initio_busfree(struct initio_host * host)
2072{ 1859{
2073 SCB *pCurScb = pCurHcb->HCS_ActScb; 1860 struct scsi_ctrl_blk *scb = host->active;
2074 1861
2075 if (pCurScb != NULL) { 1862 if (scb != NULL) {
2076 if (pCurScb->SCB_Status & SCB_SELECT) { /* selection timeout */ 1863 if (scb->status & SCB_SELECT) { /* selection timeout */
2077 tul_unlink_pend_scb(pCurHcb, pCurScb); 1864 initio_unlink_pend_scb(host, scb);
2078 pCurScb->SCB_HaStat = HOST_SEL_TOUT; 1865 scb->hastat = HOST_SEL_TOUT;
2079 tul_append_done_scb(pCurHcb, pCurScb); 1866 initio_append_done_scb(host, scb);
2080 } else { /* Unexpected bus free */ 1867 } else { /* Unexpected bus free */
2081 tul_unlink_busy_scb(pCurHcb, pCurScb); 1868 initio_unlink_busy_scb(host, scb);
2082 pCurScb->SCB_HaStat = HOST_BUS_FREE; 1869 scb->hastat = HOST_BUS_FREE;
2083 tul_append_done_scb(pCurHcb, pCurScb); 1870 initio_append_done_scb(host, scb);
2084 } 1871 }
2085 pCurHcb->HCS_ActScb = NULL; 1872 host->active = NULL;
2086 pCurHcb->HCS_ActTcs = NULL; 1873 host->active_tc = NULL;
2087 } 1874 }
2088 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); /* Flush SCSI FIFO */ 1875 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
2089 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, TSC_INITDEFAULT); 1876 outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
2090 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); /* Enable HW reselect */ 1877 outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
2091 return (-1); 1878 return -1;
2092} 1879}
2093 1880
2094 1881
2095/***************************************************************************/ 1882/**
2096/* scsi bus reset */ 1883 * int_initio_scsi_rst - SCSI reset occurred
2097static int int_tul_scsi_rst(HCS * pCurHcb) 1884 * @host: Host seeing the reset
1885 *
1886 * A SCSI bus reset has occurred. Clean up any pending transfer
1887 * the hardware is doing by DMA and then abort all active and
1888 * disconnected commands. The mid layer should sort the rest out
1889 * for us
1890 */
1891
1892static int int_initio_scsi_rst(struct initio_host * host)
2098{ 1893{
2099 SCB *pCurScb; 1894 struct scsi_ctrl_blk *scb;
2100 int i; 1895 int i;
2101 1896
2102 /* if DMA xfer is pending, abort DMA xfer */ 1897 /* if DMA xfer is pending, abort DMA xfer */
2103 if (TUL_RD(pCurHcb->HCS_Base, TUL_XStatus) & 0x01) { 1898 if (inb(host->addr + TUL_XStatus) & 0x01) {
2104 TUL_WR(pCurHcb->HCS_Base + TUL_XCmd, TAX_X_ABT | TAX_X_CLR_FIFO); 1899 outb(TAX_X_ABT | TAX_X_CLR_FIFO, host->addr + TUL_XCmd);
2105 /* wait Abort DMA xfer done */ 1900 /* wait Abort DMA xfer done */
2106 while ((TUL_RD(pCurHcb->HCS_Base, TUL_Int) & 0x04) == 0); 1901 while ((inb(host->addr + TUL_Int) & 0x04) == 0)
2107 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 1902 cpu_relax();
1903 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
2108 } 1904 }
2109 /* Abort all active & disconnected scb */ 1905 /* Abort all active & disconnected scb */
2110 while ((pCurScb = tul_pop_busy_scb(pCurHcb)) != NULL) { 1906 while ((scb = initio_pop_busy_scb(host)) != NULL) {
2111 pCurScb->SCB_HaStat = HOST_BAD_PHAS; 1907 scb->hastat = HOST_BAD_PHAS;
2112 tul_append_done_scb(pCurHcb, pCurScb); 1908 initio_append_done_scb(host, scb);
2113 } 1909 }
2114 pCurHcb->HCS_ActScb = NULL; 1910 host->active = NULL;
2115 pCurHcb->HCS_ActTcs = NULL; 1911 host->active_tc = NULL;
2116 1912
2117 /* clr sync nego. done flag */ 1913 /* clr sync nego. done flag */
2118 for (i = 0; i < pCurHcb->HCS_MaxTar; i++) { 1914 for (i = 0; i < host->max_tar; i++)
2119 pCurHcb->HCS_Tcs[i].TCS_Flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE); 1915 host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
2120 } 1916 return -1;
2121 return (-1);
2122} 1917}
2123 1918
1919/**
1920 * int_initio_scsi_resel - Reselection occured
1921 * @host: InitIO host adapter
1922 *
1923 * A SCSI reselection event has been signalled and the interrupt
1924 * is now being processed. Work out which command block needs attention
1925 * and continue processing that command.
1926 */
2124 1927
2125/***************************************************************************/ 1928int int_initio_resel(struct initio_host * host)
2126/* scsi reselection */
2127int int_tul_resel(HCS * pCurHcb)
2128{ 1929{
2129 SCB *pCurScb; 1930 struct scsi_ctrl_blk *scb;
2130 TCS *pCurTcb; 1931 struct target_control *active_tc;
2131 BYTE tag, msg = 0; 1932 u8 tag, msg = 0;
2132 BYTE tar, lun; 1933 u8 tar, lun;
2133 1934
2134 if ((pCurScb = pCurHcb->HCS_ActScb) != NULL) { 1935 if ((scb = host->active) != NULL) {
2135 if (pCurScb->SCB_Status & SCB_SELECT) { /* if waiting for selection complete */ 1936 /* FIXME: Why check and not just clear ? */
2136 pCurScb->SCB_Status &= ~SCB_SELECT; 1937 if (scb->status & SCB_SELECT) /* if waiting for selection complete */
2137 } 1938 scb->status &= ~SCB_SELECT;
2138 pCurHcb->HCS_ActScb = NULL; 1939 host->active = NULL;
2139 } 1940 }
2140 /* --------- get target id---------------------- */ 1941 /* --------- get target id---------------------- */
2141 tar = TUL_RD(pCurHcb->HCS_Base, TUL_SBusId); 1942 tar = inb(host->addr + TUL_SBusId);
2142 /* ------ get LUN from Identify message----------- */ 1943 /* ------ get LUN from Identify message----------- */
2143 lun = TUL_RD(pCurHcb->HCS_Base, TUL_SIdent) & 0x0F; 1944 lun = inb(host->addr + TUL_SIdent) & 0x0F;
2144 /* 07/22/98 from 0x1F -> 0x0F */ 1945 /* 07/22/98 from 0x1F -> 0x0F */
2145 pCurTcb = &pCurHcb->HCS_Tcs[tar]; 1946 active_tc = &host->targets[tar];
2146 pCurHcb->HCS_ActTcs = pCurTcb; 1947 host->active_tc = active_tc;
2147 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurTcb->TCS_SConfig0); 1948 outb(active_tc->sconfig0, host->addr + TUL_SConfig);
2148 TUL_WR(pCurHcb->HCS_Base + TUL_SPeriod, pCurTcb->TCS_JS_Period); 1949 outb(active_tc->js_period, host->addr + TUL_SPeriod);
2149
2150 1950
2151 /* ------------- tag queueing ? ------------------- */ 1951 /* ------------- tag queueing ? ------------------- */
2152 if (pCurTcb->TCS_DrvFlags & TCF_DRV_EN_TAG) { 1952 if (active_tc->drv_flags & TCF_DRV_EN_TAG) {
2153 if ((tul_msgin_accept(pCurHcb)) == -1) 1953 if ((initio_msgin_accept(host)) == -1)
2154 return (-1); 1954 return -1;
2155 if (pCurHcb->HCS_Phase != MSG_IN) 1955 if (host->phase != MSG_IN)
2156 goto no_tag; 1956 goto no_tag;
2157 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1); 1957 outl(1, host->addr + TUL_SCnt0);
2158 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN); 1958 outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
2159 if ((wait_tulip(pCurHcb)) == -1) 1959 if (wait_tulip(host) == -1)
2160 return (-1); 1960 return -1;
2161 msg = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); /* Read Tag Message */ 1961 msg = inb(host->addr + TUL_SFifo); /* Read Tag Message */
2162 1962
2163 if ((msg < MSG_STAG) || (msg > MSG_OTAG)) /* Is simple Tag */ 1963 if (msg < MSG_STAG || msg > MSG_OTAG) /* Is simple Tag */
2164 goto no_tag; 1964 goto no_tag;
2165 1965
2166 if ((tul_msgin_accept(pCurHcb)) == -1) 1966 if (initio_msgin_accept(host) == -1)
2167 return (-1); 1967 return -1;
2168 1968
2169 if (pCurHcb->HCS_Phase != MSG_IN) 1969 if (host->phase != MSG_IN)
2170 goto no_tag; 1970 goto no_tag;
2171 1971
2172 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1); 1972 outl(1, host->addr + TUL_SCnt0);
2173 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN); 1973 outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
2174 if ((wait_tulip(pCurHcb)) == -1) 1974 if (wait_tulip(host) == -1)
2175 return (-1); 1975 return -1;
2176 tag = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); /* Read Tag ID */ 1976 tag = inb(host->addr + TUL_SFifo); /* Read Tag ID */
2177 pCurScb = pCurHcb->HCS_Scb + tag; 1977 scb = host->scb + tag;
2178 if ((pCurScb->SCB_Target != tar) || (pCurScb->SCB_Lun != lun)) { 1978 if (scb->target != tar || scb->lun != lun) {
2179 return tul_msgout_abort_tag(pCurHcb); 1979 return initio_msgout_abort_tag(host);
2180 } 1980 }
2181 if (pCurScb->SCB_Status != SCB_BUSY) { /* 03/24/95 */ 1981 if (scb->status != SCB_BUSY) { /* 03/24/95 */
2182 return tul_msgout_abort_tag(pCurHcb); 1982 return initio_msgout_abort_tag(host);
2183 } 1983 }
2184 pCurHcb->HCS_ActScb = pCurScb; 1984 host->active = scb;
2185 if ((tul_msgin_accept(pCurHcb)) == -1) 1985 if ((initio_msgin_accept(host)) == -1)
2186 return (-1); 1986 return -1;
2187 } else { /* No tag */ 1987 } else { /* No tag */
2188 no_tag: 1988 no_tag:
2189 if ((pCurScb = tul_find_busy_scb(pCurHcb, tar | (lun << 8))) == NULL) { 1989 if ((scb = initio_find_busy_scb(host, tar | (lun << 8))) == NULL) {
2190 return tul_msgout_abort_targ(pCurHcb); 1990 return initio_msgout_abort_targ(host);
2191 } 1991 }
2192 pCurHcb->HCS_ActScb = pCurScb; 1992 host->active = scb;
2193 if (!(pCurTcb->TCS_DrvFlags & TCF_DRV_EN_TAG)) { 1993 if (!(active_tc->drv_flags & TCF_DRV_EN_TAG)) {
2194 if ((tul_msgin_accept(pCurHcb)) == -1) 1994 if ((initio_msgin_accept(host)) == -1)
2195 return (-1); 1995 return -1;
2196 } 1996 }
2197 } 1997 }
2198 return 0; 1998 return 0;
2199} 1999}
2200 2000
2001/**
2002 * int_initio_bad_seq - out of phase
2003 * @host: InitIO host flagging event
2004 *
2005 * We have ended up out of phase somehow. Reset the host controller
2006 * and throw all our toys out of the pram. Let the midlayer clean up
2007 */
2201 2008
2202/***************************************************************************/ 2009static int int_initio_bad_seq(struct initio_host * host)
2203static int int_tul_bad_seq(HCS * pCurHcb)
2204{ /* target wrong phase */ 2010{ /* target wrong phase */
2205 SCB *pCurScb; 2011 struct scsi_ctrl_blk *scb;
2206 int i; 2012 int i;
2207 2013
2208 tul_reset_scsi(pCurHcb, 10); 2014 initio_reset_scsi(host, 10);
2209 2015
2210 while ((pCurScb = tul_pop_busy_scb(pCurHcb)) != NULL) { 2016 while ((scb = initio_pop_busy_scb(host)) != NULL) {
2211 pCurScb->SCB_HaStat = HOST_BAD_PHAS; 2017 scb->hastat = HOST_BAD_PHAS;
2212 tul_append_done_scb(pCurHcb, pCurScb); 2018 initio_append_done_scb(host, scb);
2213 }
2214 for (i = 0; i < pCurHcb->HCS_MaxTar; i++) {
2215 pCurHcb->HCS_Tcs[i].TCS_Flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
2216 } 2019 }
2217 return (-1); 2020 for (i = 0; i < host->max_tar; i++)
2021 host->targets[i].flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
2022 return -1;
2218} 2023}
2219 2024
2220 2025
2221/***************************************************************************/ 2026/**
2222int tul_msgout_abort_targ(HCS * pCurHcb) 2027 * initio_msgout_abort_targ - abort a tag
2028 * @host: InitIO host
2029 *
2030 * Abort when the target/lun does not match or when our SCB is not
2031 * busy. Used by untagged commands.
2032 */
2033
2034static int initio_msgout_abort_targ(struct initio_host * host)
2223{ 2035{
2224 2036
2225 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN)); 2037 outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
2226 if (tul_msgin_accept(pCurHcb) == -1) 2038 if (initio_msgin_accept(host) == -1)
2227 return (-1); 2039 return -1;
2228 if (pCurHcb->HCS_Phase != MSG_OUT) 2040 if (host->phase != MSG_OUT)
2229 return (tul_bad_seq(pCurHcb)); 2041 return initio_bad_seq(host);
2230 2042
2231 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_ABORT); 2043 outb(MSG_ABORT, host->addr + TUL_SFifo);
2232 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 2044 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
2233 2045
2234 return tul_wait_disc(pCurHcb); 2046 return initio_wait_disc(host);
2235} 2047}
2236 2048
2237/***************************************************************************/ 2049/**
2238int tul_msgout_abort_tag(HCS * pCurHcb) 2050 * initio_msgout_abort_tag - abort a tag
2051 * @host: InitIO host
2052 *
2053 * Abort when the target/lun does not match or when our SCB is not
2054 * busy. Used for tagged commands.
2055 */
2056
2057static int initio_msgout_abort_tag(struct initio_host * host)
2239{ 2058{
2240 2059
2241 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN)); 2060 outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
2242 if (tul_msgin_accept(pCurHcb) == -1) 2061 if (initio_msgin_accept(host) == -1)
2243 return (-1); 2062 return -1;
2244 if (pCurHcb->HCS_Phase != MSG_OUT) 2063 if (host->phase != MSG_OUT)
2245 return (tul_bad_seq(pCurHcb)); 2064 return initio_bad_seq(host);
2246 2065
2247 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_ABORT_TAG); 2066 outb(MSG_ABORT_TAG, host->addr + TUL_SFifo);
2248 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 2067 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
2249 2068
2250 return tul_wait_disc(pCurHcb); 2069 return initio_wait_disc(host);
2251 2070
2252} 2071}
2253 2072
2254/***************************************************************************/ 2073/**
2255int tul_msgin(HCS * pCurHcb) 2074 * initio_msgin - Message in
2075 * @host: InitIO Host
2076 *
2077 * Process incoming message
2078 */
2079static int initio_msgin(struct initio_host * host)
2256{ 2080{
2257 TCS *pCurTcb; 2081 struct target_control *active_tc;
2258 2082
2259 for (;;) { 2083 for (;;) {
2084 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
2260 2085
2261 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 2086 outl(1, host->addr + TUL_SCnt0);
2262 2087 outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
2263 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1); 2088 if (wait_tulip(host) == -1)
2264 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN); 2089 return -1;
2265 if ((wait_tulip(pCurHcb)) == -1)
2266 return (-1);
2267 2090
2268 switch (TUL_RD(pCurHcb->HCS_Base, TUL_SFifo)) { 2091 switch (inb(host->addr + TUL_SFifo)) {
2269 case MSG_DISC: /* Disconnect msg */ 2092 case MSG_DISC: /* Disconnect msg */
2270 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_MSG_ACCEPT); 2093 outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
2271 2094 return initio_wait_disc(host);
2272 return tul_wait_disc(pCurHcb);
2273
2274 case MSG_SDP: 2095 case MSG_SDP:
2275 case MSG_RESTORE: 2096 case MSG_RESTORE:
2276 case MSG_NOP: 2097 case MSG_NOP:
2277 tul_msgin_accept(pCurHcb); 2098 initio_msgin_accept(host);
2278 break; 2099 break;
2279
2280 case MSG_REJ: /* Clear ATN first */ 2100 case MSG_REJ: /* Clear ATN first */
2281 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, 2101 outb((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)),
2282 (TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7))); 2102 host->addr + TUL_SSignal);
2283 pCurTcb = pCurHcb->HCS_ActTcs; 2103 active_tc = host->active_tc;
2284 if ((pCurTcb->TCS_Flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) { /* do sync nego */ 2104 if ((active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) /* do sync nego */
2285 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN)); 2105 outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN),
2286 } 2106 host->addr + TUL_SSignal);
2287 tul_msgin_accept(pCurHcb); 2107 initio_msgin_accept(host);
2288 break; 2108 break;
2289
2290 case MSG_EXTEND: /* extended msg */ 2109 case MSG_EXTEND: /* extended msg */
2291 tul_msgin_extend(pCurHcb); 2110 initio_msgin_extend(host);
2292 break; 2111 break;
2293
2294 case MSG_IGNOREWIDE: 2112 case MSG_IGNOREWIDE:
2295 tul_msgin_accept(pCurHcb); 2113 initio_msgin_accept(host);
2296 break; 2114 break;
2297
2298 /* get */
2299 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN);
2300 if (wait_tulip(pCurHcb) == -1)
2301 return -1;
2302
2303 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 0); /* put pad */
2304 TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); /* get IGNORE field */
2305 TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); /* get pad */
2306
2307 tul_msgin_accept(pCurHcb);
2308 break;
2309
2310 case MSG_COMP: 2115 case MSG_COMP:
2311 { 2116 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
2312 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 2117 outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
2313 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_MSG_ACCEPT); 2118 return initio_wait_done_disc(host);
2314 return tul_wait_done_disc(pCurHcb);
2315 }
2316 default: 2119 default:
2317 tul_msgout_reject(pCurHcb); 2120 initio_msgout_reject(host);
2318 break; 2121 break;
2319 } 2122 }
2320 if (pCurHcb->HCS_Phase != MSG_IN) 2123 if (host->phase != MSG_IN)
2321 return (pCurHcb->HCS_Phase); 2124 return host->phase;
2322 } 2125 }
2323 /* statement won't reach here */ 2126 /* statement won't reach here */
2324} 2127}
2325 2128
2326 2129static int initio_msgout_reject(struct initio_host * host)
2327
2328
2329/***************************************************************************/
2330int tul_msgout_reject(HCS * pCurHcb)
2331{ 2130{
2131 outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
2332 2132
2333 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN)); 2133 if (initio_msgin_accept(host) == -1)
2334 2134 return -1;
2335 if ((tul_msgin_accept(pCurHcb)) == -1)
2336 return (-1);
2337 2135
2338 if (pCurHcb->HCS_Phase == MSG_OUT) { 2136 if (host->phase == MSG_OUT) {
2339 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_REJ); /* Msg reject */ 2137 outb(MSG_REJ, host->addr + TUL_SFifo); /* Msg reject */
2340 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 2138 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
2341 return (wait_tulip(pCurHcb)); 2139 return wait_tulip(host);
2342 } 2140 }
2343 return (pCurHcb->HCS_Phase); 2141 return host->phase;
2344} 2142}
2345 2143
2346 2144static int initio_msgout_ide(struct initio_host * host)
2347
2348/***************************************************************************/
2349int tul_msgout_ide(HCS * pCurHcb)
2350{ 2145{
2351 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_IDE); /* Initiator Detected Error */ 2146 outb(MSG_IDE, host->addr + TUL_SFifo); /* Initiator Detected Error */
2352 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 2147 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
2353 return (wait_tulip(pCurHcb)); 2148 return wait_tulip(host);
2354} 2149}
2355 2150
2356 2151static int initio_msgin_extend(struct initio_host * host)
2357/***************************************************************************/
2358int tul_msgin_extend(HCS * pCurHcb)
2359{ 2152{
2360 BYTE len, idx; 2153 u8 len, idx;
2361 2154
2362 if (tul_msgin_accept(pCurHcb) != MSG_IN) 2155 if (initio_msgin_accept(host) != MSG_IN)
2363 return (pCurHcb->HCS_Phase); 2156 return host->phase;
2364 2157
2365 /* Get extended msg length */ 2158 /* Get extended msg length */
2366 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1); 2159 outl(1, host->addr + TUL_SCnt0);
2367 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN); 2160 outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
2368 if (wait_tulip(pCurHcb) == -1) 2161 if (wait_tulip(host) == -1)
2369 return (-1); 2162 return -1;
2370 2163
2371 len = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); 2164 len = inb(host->addr + TUL_SFifo);
2372 pCurHcb->HCS_Msg[0] = len; 2165 host->msg[0] = len;
2373 for (idx = 1; len != 0; len--) { 2166 for (idx = 1; len != 0; len--) {
2374 2167
2375 if ((tul_msgin_accept(pCurHcb)) != MSG_IN) 2168 if ((initio_msgin_accept(host)) != MSG_IN)
2376 return (pCurHcb->HCS_Phase); 2169 return host->phase;
2377 TUL_WRLONG(pCurHcb->HCS_Base + TUL_SCnt0, 1); 2170 outl(1, host->addr + TUL_SCnt0);
2378 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_IN); 2171 outb(TSC_XF_FIFO_IN, host->addr + TUL_SCmd);
2379 if (wait_tulip(pCurHcb) == -1) 2172 if (wait_tulip(host) == -1)
2380 return (-1); 2173 return -1;
2381 pCurHcb->HCS_Msg[idx++] = TUL_RD(pCurHcb->HCS_Base, TUL_SFifo); 2174 host->msg[idx++] = inb(host->addr + TUL_SFifo);
2382 } 2175 }
2383 if (pCurHcb->HCS_Msg[1] == 1) { /* if it's synchronous data transfer request */ 2176 if (host->msg[1] == 1) { /* if it's synchronous data transfer request */
2384 if (pCurHcb->HCS_Msg[0] != 3) /* if length is not right */ 2177 u8 r;
2385 return (tul_msgout_reject(pCurHcb)); 2178 if (host->msg[0] != 3) /* if length is not right */
2386 if (pCurHcb->HCS_ActTcs->TCS_Flags & TCF_NO_SYNC_NEGO) { /* Set OFFSET=0 to do async, nego back */ 2179 return initio_msgout_reject(host);
2387 pCurHcb->HCS_Msg[3] = 0; 2180 if (host->active_tc->flags & TCF_NO_SYNC_NEGO) { /* Set OFFSET=0 to do async, nego back */
2181 host->msg[3] = 0;
2388 } else { 2182 } else {
2389 if ((tul_msgin_sync(pCurHcb) == 0) && 2183 if (initio_msgin_sync(host) == 0 &&
2390 (pCurHcb->HCS_ActTcs->TCS_Flags & TCF_SYNC_DONE)) { 2184 (host->active_tc->flags & TCF_SYNC_DONE)) {
2391 tul_sync_done(pCurHcb); 2185 initio_sync_done(host);
2392 return (tul_msgin_accept(pCurHcb)); 2186 return initio_msgin_accept(host);
2393 } 2187 }
2394 } 2188 }
2395 2189
2396 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN)); 2190 r = inb(host->addr + TUL_SSignal);
2397 if ((tul_msgin_accept(pCurHcb)) != MSG_OUT) 2191 outb((r & (TSC_SET_ACK | 7)) | TSC_SET_ATN,
2398 return (pCurHcb->HCS_Phase); 2192 host->addr + TUL_SSignal);
2193 if (initio_msgin_accept(host) != MSG_OUT)
2194 return host->phase;
2399 /* sync msg out */ 2195 /* sync msg out */
2400 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); 2196 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0);
2401 2197
2402 tul_sync_done(pCurHcb); 2198 initio_sync_done(host);
2403 2199
2404 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_EXTEND); 2200 outb(MSG_EXTEND, host->addr + TUL_SFifo);
2405 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 3); 2201 outb(3, host->addr + TUL_SFifo);
2406 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 1); 2202 outb(1, host->addr + TUL_SFifo);
2407 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurHcb->HCS_Msg[2]); 2203 outb(host->msg[2], host->addr + TUL_SFifo);
2408 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurHcb->HCS_Msg[3]); 2204 outb(host->msg[3], host->addr + TUL_SFifo);
2409 2205 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
2410 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 2206 return wait_tulip(host);
2411 return (wait_tulip(pCurHcb));
2412 } 2207 }
2413 if ((pCurHcb->HCS_Msg[0] != 2) || (pCurHcb->HCS_Msg[1] != 3)) 2208 if (host->msg[0] != 2 || host->msg[1] != 3)
2414 return (tul_msgout_reject(pCurHcb)); 2209 return initio_msgout_reject(host);
2415 /* if it's WIDE DATA XFER REQ */ 2210 /* if it's WIDE DATA XFER REQ */
2416 if (pCurHcb->HCS_ActTcs->TCS_Flags & TCF_NO_WDTR) { 2211 if (host->active_tc->flags & TCF_NO_WDTR) {
2417 pCurHcb->HCS_Msg[2] = 0; 2212 host->msg[2] = 0;
2418 } else { 2213 } else {
2419 if (pCurHcb->HCS_Msg[2] > 2) /* > 32 bits */ 2214 if (host->msg[2] > 2) /* > 32 bits */
2420 return (tul_msgout_reject(pCurHcb)); 2215 return initio_msgout_reject(host);
2421 if (pCurHcb->HCS_Msg[2] == 2) { /* == 32 */ 2216 if (host->msg[2] == 2) { /* == 32 */
2422 pCurHcb->HCS_Msg[2] = 1; 2217 host->msg[2] = 1;
2423 } else { 2218 } else {
2424 if ((pCurHcb->HCS_ActTcs->TCS_Flags & TCF_NO_WDTR) == 0) { 2219 if ((host->active_tc->flags & TCF_NO_WDTR) == 0) {
2425 wdtr_done(pCurHcb); 2220 wdtr_done(host);
2426 if ((pCurHcb->HCS_ActTcs->TCS_Flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0) 2221 if ((host->active_tc->flags & (TCF_SYNC_DONE | TCF_NO_SYNC_NEGO)) == 0)
2427 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN)); 2222 outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
2428 return (tul_msgin_accept(pCurHcb)); 2223 return initio_msgin_accept(host);
2429 } 2224 }
2430 } 2225 }
2431 } 2226 }
2432 TUL_WR(pCurHcb->HCS_Base + TUL_SSignal, ((TUL_RD(pCurHcb->HCS_Base, TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN)); 2227 outb(((inb(host->addr + TUL_SSignal) & (TSC_SET_ACK | 7)) | TSC_SET_ATN), host->addr + TUL_SSignal);
2433 2228
2434 if (tul_msgin_accept(pCurHcb) != MSG_OUT) 2229 if (initio_msgin_accept(host) != MSG_OUT)
2435 return (pCurHcb->HCS_Phase); 2230 return host->phase;
2436 /* WDTR msg out */ 2231 /* WDTR msg out */
2437 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_EXTEND); 2232 outb(MSG_EXTEND, host->addr + TUL_SFifo);
2438 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 2); 2233 outb(2, host->addr + TUL_SFifo);
2439 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, 3); 2234 outb(3, host->addr + TUL_SFifo);
2440 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurHcb->HCS_Msg[2]); 2235 outb(host->msg[2], host->addr + TUL_SFifo);
2441 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 2236 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
2442 return (wait_tulip(pCurHcb)); 2237 return wait_tulip(host);
2443} 2238}
2444 2239
2445/***************************************************************************/ 2240static int initio_msgin_sync(struct initio_host * host)
2446int tul_msgin_sync(HCS * pCurHcb)
2447{ 2241{
2448 char default_period; 2242 char default_period;
2449 2243
2450 default_period = tul_rate_tbl[pCurHcb->HCS_ActTcs->TCS_Flags & TCF_SCSI_RATE]; 2244 default_period = initio_rate_tbl[host->active_tc->flags & TCF_SCSI_RATE];
2451 if (pCurHcb->HCS_Msg[3] > MAX_OFFSET) { 2245 if (host->msg[3] > MAX_OFFSET) {
2452 pCurHcb->HCS_Msg[3] = MAX_OFFSET; 2246 host->msg[3] = MAX_OFFSET;
2453 if (pCurHcb->HCS_Msg[2] < default_period) { 2247 if (host->msg[2] < default_period) {
2454 pCurHcb->HCS_Msg[2] = default_period; 2248 host->msg[2] = default_period;
2455 return 1; 2249 return 1;
2456 } 2250 }
2457 if (pCurHcb->HCS_Msg[2] >= 59) { /* Change to async */ 2251 if (host->msg[2] >= 59) /* Change to async */
2458 pCurHcb->HCS_Msg[3] = 0; 2252 host->msg[3] = 0;
2459 }
2460 return 1; 2253 return 1;
2461 } 2254 }
2462 /* offset requests asynchronous transfers ? */ 2255 /* offset requests asynchronous transfers ? */
2463 if (pCurHcb->HCS_Msg[3] == 0) { 2256 if (host->msg[3] == 0) {
2464 return 0; 2257 return 0;
2465 } 2258 }
2466 if (pCurHcb->HCS_Msg[2] < default_period) { 2259 if (host->msg[2] < default_period) {
2467 pCurHcb->HCS_Msg[2] = default_period; 2260 host->msg[2] = default_period;
2468 return 1; 2261 return 1;
2469 } 2262 }
2470 if (pCurHcb->HCS_Msg[2] >= 59) { 2263 if (host->msg[2] >= 59) {
2471 pCurHcb->HCS_Msg[3] = 0; 2264 host->msg[3] = 0;
2472 return 1; 2265 return 1;
2473 } 2266 }
2474 return 0; 2267 return 0;
2475} 2268}
2476 2269
2477 2270static int wdtr_done(struct initio_host * host)
2478/***************************************************************************/
2479int wdtr_done(HCS * pCurHcb)
2480{ 2271{
2481 pCurHcb->HCS_ActTcs->TCS_Flags &= ~TCF_SYNC_DONE; 2272 host->active_tc->flags &= ~TCF_SYNC_DONE;
2482 pCurHcb->HCS_ActTcs->TCS_Flags |= TCF_WDTR_DONE; 2273 host->active_tc->flags |= TCF_WDTR_DONE;
2483 2274
2484 pCurHcb->HCS_ActTcs->TCS_JS_Period = 0; 2275 host->active_tc->js_period = 0;
2485 if (pCurHcb->HCS_Msg[2]) { /* if 16 bit */ 2276 if (host->msg[2]) /* if 16 bit */
2486 pCurHcb->HCS_ActTcs->TCS_JS_Period |= TSC_WIDE_SCSI; 2277 host->active_tc->js_period |= TSC_WIDE_SCSI;
2487 } 2278 host->active_tc->sconfig0 &= ~TSC_ALT_PERIOD;
2488 pCurHcb->HCS_ActTcs->TCS_SConfig0 &= ~TSC_ALT_PERIOD; 2279 outb(host->active_tc->sconfig0, host->addr + TUL_SConfig);
2489 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurHcb->HCS_ActTcs->TCS_SConfig0); 2280 outb(host->active_tc->js_period, host->addr + TUL_SPeriod);
2490 TUL_WR(pCurHcb->HCS_Base + TUL_SPeriod, pCurHcb->HCS_ActTcs->TCS_JS_Period);
2491 2281
2492 return 1; 2282 return 1;
2493} 2283}
2494 2284
2495/***************************************************************************/ 2285static int initio_sync_done(struct initio_host * host)
2496int tul_sync_done(HCS * pCurHcb)
2497{ 2286{
2498 int i; 2287 int i;
2499 2288
2500 pCurHcb->HCS_ActTcs->TCS_Flags |= TCF_SYNC_DONE; 2289 host->active_tc->flags |= TCF_SYNC_DONE;
2501 2290
2502 if (pCurHcb->HCS_Msg[3]) { 2291 if (host->msg[3]) {
2503 pCurHcb->HCS_ActTcs->TCS_JS_Period |= pCurHcb->HCS_Msg[3]; 2292 host->active_tc->js_period |= host->msg[3];
2504 for (i = 0; i < 8; i++) { 2293 for (i = 0; i < 8; i++) {
2505 if (tul_rate_tbl[i] >= pCurHcb->HCS_Msg[2]) /* pick the big one */ 2294 if (initio_rate_tbl[i] >= host->msg[2]) /* pick the big one */
2506 break; 2295 break;
2507 } 2296 }
2508 pCurHcb->HCS_ActTcs->TCS_JS_Period |= (i << 4); 2297 host->active_tc->js_period |= (i << 4);
2509 pCurHcb->HCS_ActTcs->TCS_SConfig0 |= TSC_ALT_PERIOD; 2298 host->active_tc->sconfig0 |= TSC_ALT_PERIOD;
2510 } 2299 }
2511 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, pCurHcb->HCS_ActTcs->TCS_SConfig0); 2300 outb(host->active_tc->sconfig0, host->addr + TUL_SConfig);
2512 TUL_WR(pCurHcb->HCS_Base + TUL_SPeriod, pCurHcb->HCS_ActTcs->TCS_JS_Period); 2301 outb(host->active_tc->js_period, host->addr + TUL_SPeriod);
2513 2302
2514 return (-1); 2303 return -1;
2515} 2304}
2516 2305
2517 2306
2518int tul_post_scsi_rst(HCS * pCurHcb) 2307static int initio_post_scsi_rst(struct initio_host * host)
2519{ 2308{
2520 SCB *pCurScb; 2309 struct scsi_ctrl_blk *scb;
2521 TCS *pCurTcb; 2310 struct target_control *active_tc;
2522 int i; 2311 int i;
2523 2312
2524 pCurHcb->HCS_ActScb = NULL; 2313 host->active = NULL;
2525 pCurHcb->HCS_ActTcs = NULL; 2314 host->active_tc = NULL;
2526 pCurHcb->HCS_Flags = 0; 2315 host->flags = 0;
2527 2316
2528 while ((pCurScb = tul_pop_busy_scb(pCurHcb)) != NULL) { 2317 while ((scb = initio_pop_busy_scb(host)) != NULL) {
2529 pCurScb->SCB_HaStat = HOST_BAD_PHAS; 2318 scb->hastat = HOST_BAD_PHAS;
2530 tul_append_done_scb(pCurHcb, pCurScb); 2319 initio_append_done_scb(host, scb);
2531 } 2320 }
2532 /* clear sync done flag */ 2321 /* clear sync done flag */
2533 pCurTcb = &pCurHcb->HCS_Tcs[0]; 2322 active_tc = &host->targets[0];
2534 for (i = 0; i < pCurHcb->HCS_MaxTar; pCurTcb++, i++) { 2323 for (i = 0; i < host->max_tar; active_tc++, i++) {
2535 pCurTcb->TCS_Flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE); 2324 active_tc->flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE);
2536 /* Initialize the sync. xfer register values to an asyn xfer */ 2325 /* Initialize the sync. xfer register values to an asyn xfer */
2537 pCurTcb->TCS_JS_Period = 0; 2326 active_tc->js_period = 0;
2538 pCurTcb->TCS_SConfig0 = pCurHcb->HCS_SConf1; 2327 active_tc->sconfig0 = host->sconf1;
2539 pCurHcb->HCS_ActTags[0] = 0; /* 07/22/98 */ 2328 host->act_tags[0] = 0; /* 07/22/98 */
2540 pCurHcb->HCS_Tcs[i].TCS_Flags &= ~TCF_BUSY; /* 07/22/98 */ 2329 host->targets[i].flags &= ~TCF_BUSY; /* 07/22/98 */
2541 } /* for */ 2330 } /* for */
2542 2331
2543 return (-1); 2332 return -1;
2544} 2333}
2545 2334
2546/***************************************************************************/ 2335static void initio_select_atn_stop(struct initio_host * host, struct scsi_ctrl_blk * scb)
2547void tul_select_atn_stop(HCS * pCurHcb, SCB * pCurScb)
2548{ 2336{
2549 pCurScb->SCB_Status |= SCB_SELECT; 2337 scb->status |= SCB_SELECT;
2550 pCurScb->SCB_NxtStat = 0x1; 2338 scb->next_state = 0x1;
2551 pCurHcb->HCS_ActScb = pCurScb; 2339 host->active = scb;
2552 pCurHcb->HCS_ActTcs = &pCurHcb->HCS_Tcs[pCurScb->SCB_Target]; 2340 host->active_tc = &host->targets[scb->target];
2553 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_SELATNSTOP); 2341 outb(TSC_SELATNSTOP, host->addr + TUL_SCmd);
2554 return;
2555} 2342}
2556 2343
2557 2344
2558/***************************************************************************/ 2345static void initio_select_atn(struct initio_host * host, struct scsi_ctrl_blk * scb)
2559void tul_select_atn(HCS * pCurHcb, SCB * pCurScb)
2560{ 2346{
2561 int i; 2347 int i;
2562 2348
2563 pCurScb->SCB_Status |= SCB_SELECT; 2349 scb->status |= SCB_SELECT;
2564 pCurScb->SCB_NxtStat = 0x2; 2350 scb->next_state = 0x2;
2565 2351
2566 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_Ident); 2352 outb(scb->ident, host->addr + TUL_SFifo);
2567 for (i = 0; i < (int) pCurScb->SCB_CDBLen; i++) 2353 for (i = 0; i < (int) scb->cdblen; i++)
2568 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_CDB[i]); 2354 outb(scb->cdb[i], host->addr + TUL_SFifo);
2569 pCurHcb->HCS_ActTcs = &pCurHcb->HCS_Tcs[pCurScb->SCB_Target]; 2355 host->active_tc = &host->targets[scb->target];
2570 pCurHcb->HCS_ActScb = pCurScb; 2356 host->active = scb;
2571 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_SEL_ATN); 2357 outb(TSC_SEL_ATN, host->addr + TUL_SCmd);
2572 return;
2573} 2358}
2574 2359
2575/***************************************************************************/ 2360static void initio_select_atn3(struct initio_host * host, struct scsi_ctrl_blk * scb)
2576void tul_select_atn3(HCS * pCurHcb, SCB * pCurScb)
2577{ 2361{
2578 int i; 2362 int i;
2579 2363
2580 pCurScb->SCB_Status |= SCB_SELECT; 2364 scb->status |= SCB_SELECT;
2581 pCurScb->SCB_NxtStat = 0x2; 2365 scb->next_state = 0x2;
2582 2366
2583 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_Ident); 2367 outb(scb->ident, host->addr + TUL_SFifo);
2584 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_TagMsg); 2368 outb(scb->tagmsg, host->addr + TUL_SFifo);
2585 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_TagId); 2369 outb(scb->tagid, host->addr + TUL_SFifo);
2586 for (i = 0; i < (int) pCurScb->SCB_CDBLen; i++) 2370 for (i = 0; i < scb->cdblen; i++)
2587 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, pCurScb->SCB_CDB[i]); 2371 outb(scb->cdb[i], host->addr + TUL_SFifo);
2588 pCurHcb->HCS_ActTcs = &pCurHcb->HCS_Tcs[pCurScb->SCB_Target]; 2372 host->active_tc = &host->targets[scb->target];
2589 pCurHcb->HCS_ActScb = pCurScb; 2373 host->active = scb;
2590 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_SEL_ATN3); 2374 outb(TSC_SEL_ATN3, host->addr + TUL_SCmd);
2591 return;
2592} 2375}
2593 2376
2594/***************************************************************************/ 2377/**
2595/* SCSI Bus Device Reset */ 2378 * initio_bus_device_reset - SCSI Bus Device Reset
2596int tul_bus_device_reset(HCS * pCurHcb) 2379 * @host: InitIO host to reset
2380 *
2381 * Perform a device reset and abort all pending SCBs for the
2382 * victim device
2383 */
2384int initio_bus_device_reset(struct initio_host * host)
2597{ 2385{
2598 SCB *pCurScb = pCurHcb->HCS_ActScb; 2386 struct scsi_ctrl_blk *scb = host->active;
2599 TCS *pCurTcb = pCurHcb->HCS_ActTcs; 2387 struct target_control *active_tc = host->active_tc;
2600 SCB *pTmpScb, *pPrevScb; 2388 struct scsi_ctrl_blk *tmp, *prev;
2601 BYTE tar; 2389 u8 tar;
2602 2390
2603 if (pCurHcb->HCS_Phase != MSG_OUT) { 2391 if (host->phase != MSG_OUT)
2604 return (int_tul_bad_seq(pCurHcb)); /* Unexpected phase */ 2392 return int_initio_bad_seq(host); /* Unexpected phase */
2605 } 2393
2606 tul_unlink_pend_scb(pCurHcb, pCurScb); 2394 initio_unlink_pend_scb(host, scb);
2607 tul_release_scb(pCurHcb, pCurScb); 2395 initio_release_scb(host, scb);
2608 2396
2609 2397
2610 tar = pCurScb->SCB_Target; /* target */ 2398 tar = scb->target; /* target */
2611 pCurTcb->TCS_Flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE | TCF_BUSY); 2399 active_tc->flags &= ~(TCF_SYNC_DONE | TCF_WDTR_DONE | TCF_BUSY);
2612 /* clr sync. nego & WDTR flags 07/22/98 */ 2400 /* clr sync. nego & WDTR flags 07/22/98 */
2613 2401
2614 /* abort all SCB with same target */ 2402 /* abort all SCB with same target */
2615 pPrevScb = pTmpScb = pCurHcb->HCS_FirstBusy; /* Check Busy queue */ 2403 prev = tmp = host->first_busy; /* Check Busy queue */
2616 while (pTmpScb != NULL) { 2404 while (tmp != NULL) {
2617 2405 if (tmp->target == tar) {
2618 if (pTmpScb->SCB_Target == tar) {
2619 /* unlink it */ 2406 /* unlink it */
2620 if (pTmpScb == pCurHcb->HCS_FirstBusy) { 2407 if (tmp == host->first_busy) {
2621 if ((pCurHcb->HCS_FirstBusy = pTmpScb->SCB_NxtScb) == NULL) 2408 if ((host->first_busy = tmp->next) == NULL)
2622 pCurHcb->HCS_LastBusy = NULL; 2409 host->last_busy = NULL;
2623 } else { 2410 } else {
2624 pPrevScb->SCB_NxtScb = pTmpScb->SCB_NxtScb; 2411 prev->next = tmp->next;
2625 if (pTmpScb == pCurHcb->HCS_LastBusy) 2412 if (tmp == host->last_busy)
2626 pCurHcb->HCS_LastBusy = pPrevScb; 2413 host->last_busy = prev;
2627 } 2414 }
2628 pTmpScb->SCB_HaStat = HOST_ABORTED; 2415 tmp->hastat = HOST_ABORTED;
2629 tul_append_done_scb(pCurHcb, pTmpScb); 2416 initio_append_done_scb(host, tmp);
2630 } 2417 }
2631 /* Previous haven't change */ 2418 /* Previous haven't change */
2632 else { 2419 else {
2633 pPrevScb = pTmpScb; 2420 prev = tmp;
2634 } 2421 }
2635 pTmpScb = pTmpScb->SCB_NxtScb; 2422 tmp = tmp->next;
2636 } 2423 }
2637 2424 outb(MSG_DEVRST, host->addr + TUL_SFifo);
2638 TUL_WR(pCurHcb->HCS_Base + TUL_SFifo, MSG_DEVRST); 2425 outb(TSC_XF_FIFO_OUT, host->addr + TUL_SCmd);
2639 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_XF_FIFO_OUT); 2426 return initio_wait_disc(host);
2640
2641 return tul_wait_disc(pCurHcb);
2642 2427
2643} 2428}
2644 2429
2645/***************************************************************************/ 2430static int initio_msgin_accept(struct initio_host * host)
2646int tul_msgin_accept(HCS * pCurHcb)
2647{ 2431{
2648 TUL_WR(pCurHcb->HCS_Base + TUL_SCmd, TSC_MSG_ACCEPT); 2432 outb(TSC_MSG_ACCEPT, host->addr + TUL_SCmd);
2649 return (wait_tulip(pCurHcb)); 2433 return wait_tulip(host);
2650} 2434}
2651 2435
2652/***************************************************************************/ 2436static int wait_tulip(struct initio_host * host)
2653int wait_tulip(HCS * pCurHcb)
2654{ 2437{
2655 2438
2656 while (!((pCurHcb->HCS_JSStatus0 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus0)) 2439 while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0))
2657 & TSS_INT_PENDING)); 2440 & TSS_INT_PENDING))
2441 cpu_relax();
2658 2442
2659 pCurHcb->HCS_JSInt = TUL_RD(pCurHcb->HCS_Base, TUL_SInt); 2443 host->jsint = inb(host->addr + TUL_SInt);
2660 pCurHcb->HCS_Phase = pCurHcb->HCS_JSStatus0 & TSS_PH_MASK; 2444 host->phase = host->jsstatus0 & TSS_PH_MASK;
2661 pCurHcb->HCS_JSStatus1 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus1); 2445 host->jsstatus1 = inb(host->addr + TUL_SStatus1);
2662 2446
2663 if (pCurHcb->HCS_JSInt & TSS_RESEL_INT) { /* if SCSI bus reset detected */ 2447 if (host->jsint & TSS_RESEL_INT) /* if SCSI bus reset detected */
2664 return (int_tul_resel(pCurHcb)); 2448 return int_initio_resel(host);
2665 } 2449 if (host->jsint & TSS_SEL_TIMEOUT) /* if selected/reselected timeout interrupt */
2666 if (pCurHcb->HCS_JSInt & TSS_SEL_TIMEOUT) { /* if selected/reselected timeout interrupt */ 2450 return int_initio_busfree(host);
2667 return (int_tul_busfree(pCurHcb)); 2451 if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
2668 } 2452 return int_initio_scsi_rst(host);
2669 if (pCurHcb->HCS_JSInt & TSS_SCSIRST_INT) { /* if SCSI bus reset detected */ 2453
2670 return (int_tul_scsi_rst(pCurHcb)); 2454 if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
2671 } 2455 if (host->flags & HCF_EXPECT_DONE_DISC) {
2672 if (pCurHcb->HCS_JSInt & TSS_DISC_INT) { /* BUS disconnection */ 2456 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
2673 if (pCurHcb->HCS_Flags & HCF_EXPECT_DONE_DISC) { 2457 initio_unlink_busy_scb(host, host->active);
2674 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); /* Flush SCSI FIFO */ 2458 host->active->hastat = 0;
2675 tul_unlink_busy_scb(pCurHcb, pCurHcb->HCS_ActScb); 2459 initio_append_done_scb(host, host->active);
2676 pCurHcb->HCS_ActScb->SCB_HaStat = 0; 2460 host->active = NULL;
2677 tul_append_done_scb(pCurHcb, pCurHcb->HCS_ActScb); 2461 host->active_tc = NULL;
2678 pCurHcb->HCS_ActScb = NULL; 2462 host->flags &= ~HCF_EXPECT_DONE_DISC;
2679 pCurHcb->HCS_ActTcs = NULL; 2463 outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
2680 pCurHcb->HCS_Flags &= ~HCF_EXPECT_DONE_DISC; 2464 outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
2681 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, TSC_INITDEFAULT); 2465 return -1;
2682 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); /* Enable HW reselect */
2683 return (-1);
2684 } 2466 }
2685 if (pCurHcb->HCS_Flags & HCF_EXPECT_DISC) { 2467 if (host->flags & HCF_EXPECT_DISC) {
2686 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); /* Flush SCSI FIFO */ 2468 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
2687 pCurHcb->HCS_ActScb = NULL; 2469 host->active = NULL;
2688 pCurHcb->HCS_ActTcs = NULL; 2470 host->active_tc = NULL;
2689 pCurHcb->HCS_Flags &= ~HCF_EXPECT_DISC; 2471 host->flags &= ~HCF_EXPECT_DISC;
2690 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, TSC_INITDEFAULT); 2472 outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
2691 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); /* Enable HW reselect */ 2473 outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
2692 return (-1); 2474 return -1;
2693 } 2475 }
2694 return (int_tul_busfree(pCurHcb)); 2476 return int_initio_busfree(host);
2695 }
2696 if (pCurHcb->HCS_JSInt & (TSS_FUNC_COMP | TSS_BUS_SERV)) {
2697 return (pCurHcb->HCS_Phase);
2698 } 2477 }
2699 return (pCurHcb->HCS_Phase); 2478 /* The old code really does the below. Can probably be removed */
2479 if (host->jsint & (TSS_FUNC_COMP | TSS_BUS_SERV))
2480 return host->phase;
2481 return host->phase;
2700} 2482}
2701/***************************************************************************/
2702int tul_wait_disc(HCS * pCurHcb)
2703{
2704
2705 while (!((pCurHcb->HCS_JSStatus0 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus0))
2706 & TSS_INT_PENDING));
2707 2483
2484static int initio_wait_disc(struct initio_host * host)
2485{
2486 while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0)) & TSS_INT_PENDING))
2487 cpu_relax();
2708 2488
2709 pCurHcb->HCS_JSInt = TUL_RD(pCurHcb->HCS_Base, TUL_SInt); 2489 host->jsint = inb(host->addr + TUL_SInt);
2710 2490
2711 if (pCurHcb->HCS_JSInt & TSS_SCSIRST_INT) { /* if SCSI bus reset detected */ 2491 if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
2712 return (int_tul_scsi_rst(pCurHcb)); 2492 return int_initio_scsi_rst(host);
2713 } 2493 if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
2714 if (pCurHcb->HCS_JSInt & TSS_DISC_INT) { /* BUS disconnection */ 2494 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
2715 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); /* Flush SCSI FIFO */ 2495 outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
2716 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, TSC_INITDEFAULT); 2496 outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
2717 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); /* Enable HW reselect */ 2497 host->active = NULL;
2718 pCurHcb->HCS_ActScb = NULL; 2498 return -1;
2719 return (-1);
2720 } 2499 }
2721 return (tul_bad_seq(pCurHcb)); 2500 return initio_bad_seq(host);
2722} 2501}
2723 2502
2724/***************************************************************************/ 2503static int initio_wait_done_disc(struct initio_host * host)
2725int tul_wait_done_disc(HCS * pCurHcb)
2726{ 2504{
2505 while (!((host->jsstatus0 = inb(host->addr + TUL_SStatus0))
2506 & TSS_INT_PENDING))
2507 cpu_relax();
2727 2508
2509 host->jsint = inb(host->addr + TUL_SInt);
2728 2510
2729 while (!((pCurHcb->HCS_JSStatus0 = TUL_RD(pCurHcb->HCS_Base, TUL_SStatus0)) 2511 if (host->jsint & TSS_SCSIRST_INT) /* if SCSI bus reset detected */
2730 & TSS_INT_PENDING)); 2512 return int_initio_scsi_rst(host);
2731 2513 if (host->jsint & TSS_DISC_INT) { /* BUS disconnection */
2732 pCurHcb->HCS_JSInt = TUL_RD(pCurHcb->HCS_Base, TUL_SInt); 2514 outb(TSC_FLUSH_FIFO, host->addr + TUL_SCtrl0); /* Flush SCSI FIFO */
2733 2515 outb(TSC_INITDEFAULT, host->addr + TUL_SConfig);
2734 2516 outb(TSC_HW_RESELECT, host->addr + TUL_SCtrl1); /* Enable HW reselect */
2735 if (pCurHcb->HCS_JSInt & TSS_SCSIRST_INT) { /* if SCSI bus reset detected */ 2517 initio_unlink_busy_scb(host, host->active);
2736 return (int_tul_scsi_rst(pCurHcb));
2737 }
2738 if (pCurHcb->HCS_JSInt & TSS_DISC_INT) { /* BUS disconnection */
2739 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl0, TSC_FLUSH_FIFO); /* Flush SCSI FIFO */
2740 TUL_WR(pCurHcb->HCS_Base + TUL_SConfig, TSC_INITDEFAULT);
2741 TUL_WR(pCurHcb->HCS_Base + TUL_SCtrl1, TSC_HW_RESELECT); /* Enable HW reselect */
2742 tul_unlink_busy_scb(pCurHcb, pCurHcb->HCS_ActScb);
2743 2518
2744 tul_append_done_scb(pCurHcb, pCurHcb->HCS_ActScb); 2519 initio_append_done_scb(host, host->active);
2745 pCurHcb->HCS_ActScb = NULL; 2520 host->active = NULL;
2746 return (-1); 2521 return -1;
2747 } 2522 }
2748 return (tul_bad_seq(pCurHcb)); 2523 return initio_bad_seq(host);
2749} 2524}
2750 2525
2526/**
2527 * i91u_intr - IRQ handler
2528 * @irqno: IRQ number
2529 * @dev_id: IRQ identifier
2530 *
2531 * Take the relevant locks and then invoke the actual isr processing
2532 * code under the lock.
2533 */
2534
2751static irqreturn_t i91u_intr(int irqno, void *dev_id) 2535static irqreturn_t i91u_intr(int irqno, void *dev_id)
2752{ 2536{
2753 struct Scsi_Host *dev = dev_id; 2537 struct Scsi_Host *dev = dev_id;
2754 unsigned long flags; 2538 unsigned long flags;
2539 int r;
2755 2540
2756 spin_lock_irqsave(dev->host_lock, flags); 2541 spin_lock_irqsave(dev->host_lock, flags);
2757 tul_isr((HCS *)dev->base); 2542 r = initio_isr((struct initio_host *)dev->hostdata);
2758 spin_unlock_irqrestore(dev->host_lock, flags); 2543 spin_unlock_irqrestore(dev->host_lock, flags);
2759 return IRQ_HANDLED; 2544 if (r)
2760} 2545 return IRQ_HANDLED;
2761 2546 else
2762static int tul_NewReturnNumberOfAdapters(void) 2547 return IRQ_NONE;
2763{
2764 struct pci_dev *pDev = NULL; /* Start from none */
2765 int iAdapters = 0;
2766 long dRegValue;
2767 WORD wBIOS;
2768 int i = 0;
2769
2770 init_i91uAdapter_table();
2771
2772 for (i = 0; i < ARRAY_SIZE(i91u_pci_devices); i++)
2773 {
2774 while ((pDev = pci_find_device(i91u_pci_devices[i].vendor, i91u_pci_devices[i].device, pDev)) != NULL) {
2775 if (pci_enable_device(pDev))
2776 continue;
2777 pci_read_config_dword(pDev, 0x44, (u32 *) & dRegValue);
2778 wBIOS = (UWORD) (dRegValue & 0xFF);
2779 if (((dRegValue & 0xFF00) >> 8) == 0xFF)
2780 dRegValue = 0;
2781 wBIOS = (wBIOS << 8) + ((UWORD) ((dRegValue & 0xFF00) >> 8));
2782 if (pci_set_dma_mask(pDev, DMA_32BIT_MASK)) {
2783 printk(KERN_WARNING
2784 "i91u: Could not set 32 bit DMA mask\n");
2785 continue;
2786 }
2787
2788 if (Addi91u_into_Adapter_table(wBIOS,
2789 (pDev->resource[0].start),
2790 pDev->irq,
2791 pDev->bus->number,
2792 (pDev->devfn >> 3)
2793 ) == 0)
2794 iAdapters++;
2795 }
2796 }
2797
2798 return (iAdapters);
2799} 2548}
2800 2549
2801static int i91u_detect(struct scsi_host_template * tpnt)
2802{
2803 HCS *pHCB;
2804 struct Scsi_Host *hreg;
2805 unsigned long i; /* 01/14/98 */
2806 int ok = 0, iAdapters;
2807 ULONG dBiosAdr;
2808 BYTE *pbBiosAdr;
2809
2810 /* Get total number of adapters in the motherboard */
2811 iAdapters = tul_NewReturnNumberOfAdapters();
2812 if (iAdapters == 0) /* If no tulip founded, return */
2813 return (0);
2814
2815 tul_num_ch = (iAdapters > tul_num_ch) ? tul_num_ch : iAdapters;
2816 /* Update actually channel number */
2817 if (tul_tag_enable) { /* 1.01i */
2818 tul_num_scb = MAX_TARGETS * i91u_MAXQUEUE;
2819 } else {
2820 tul_num_scb = MAX_TARGETS + 3; /* 1-tape, 1-CD_ROM, 1- extra */
2821 } /* Update actually SCBs per adapter */
2822
2823 /* Get total memory needed for HCS */
2824 i = tul_num_ch * sizeof(HCS);
2825 memset((unsigned char *) &tul_hcs[0], 0, i); /* Initialize tul_hcs 0 */
2826 /* Get total memory needed for SCB */
2827
2828 for (; tul_num_scb >= MAX_TARGETS + 3; tul_num_scb--) {
2829 i = tul_num_ch * tul_num_scb * sizeof(SCB);
2830 if ((tul_scb = kmalloc(i, GFP_ATOMIC | GFP_DMA)) != NULL)
2831 break;
2832 }
2833 if (tul_scb == NULL) {
2834 printk("i91u: SCB memory allocation error\n");
2835 return (0);
2836 }
2837 memset((unsigned char *) tul_scb, 0, i);
2838 2550
2839 for (i = 0, pHCB = &tul_hcs[0]; /* Get pointer for control block */ 2551/**
2840 i < tul_num_ch; 2552 * initio_build_scb - Build the mappings and SCB
2841 i++, pHCB++) { 2553 * @host: InitIO host taking the command
2842 get_tulipPCIConfig(pHCB, i); 2554 * @cblk: Firmware command block
2843 2555 * @cmnd: SCSI midlayer command block
2844 dBiosAdr = pHCB->HCS_BIOS; 2556 *
2845 dBiosAdr = (dBiosAdr << 4); 2557 * Translate the abstract SCSI command into a firmware command block
2846 2558 * suitable for feeding to the InitIO host controller. This also requires
2847 pbBiosAdr = phys_to_virt(dBiosAdr); 2559 * we build the scatter gather lists and ensure they are mapped properly.
2848 2560 */
2849 init_tulip(pHCB, tul_scb + (i * tul_num_scb), tul_num_scb, pbBiosAdr, 10);
2850 request_region(pHCB->HCS_Base, 256, "i91u"); /* Register */
2851
2852 pHCB->HCS_Index = i; /* 7/29/98 */
2853 hreg = scsi_register(tpnt, sizeof(HCS));
2854 if(hreg == NULL) {
2855 release_region(pHCB->HCS_Base, 256);
2856 return 0;
2857 }
2858 hreg->io_port = pHCB->HCS_Base;
2859 hreg->n_io_port = 0xff;
2860 hreg->can_queue = tul_num_scb; /* 03/05/98 */
2861 hreg->unique_id = pHCB->HCS_Base;
2862 hreg->max_id = pHCB->HCS_MaxTar;
2863 hreg->max_lun = 32; /* 10/21/97 */
2864 hreg->irq = pHCB->HCS_Intr;
2865 hreg->this_id = pHCB->HCS_SCSI_ID; /* Assign HCS index */
2866 hreg->base = (unsigned long)pHCB;
2867 hreg->sg_tablesize = TOTAL_SG_ENTRY; /* Maximun support is 32 */
2868
2869 /* Initial tulip chip */
2870 ok = request_irq(pHCB->HCS_Intr, i91u_intr, IRQF_DISABLED | IRQF_SHARED, "i91u", hreg);
2871 if (ok < 0) {
2872 printk(KERN_WARNING "i91u: unable to request IRQ %d\n\n", pHCB->HCS_Intr);
2873 return 0;
2874 }
2875 }
2876
2877 tpnt->this_id = -1;
2878 tpnt->can_queue = 1;
2879
2880 return 1;
2881}
2882 2561
2883static void i91uBuildSCB(HCS * pHCB, SCB * pSCB, struct scsi_cmnd * SCpnt) 2562static void initio_build_scb(struct initio_host * host, struct scsi_ctrl_blk * cblk, struct scsi_cmnd * cmnd)
2884{ /* Create corresponding SCB */ 2563{ /* Create corresponding SCB */
2885 struct scatterlist *pSrbSG; 2564 struct scatterlist *sglist;
2886 SG *pSG; /* Pointer to SG list */ 2565 struct sg_entry *sg; /* Pointer to SG list */
2887 int i; 2566 int i, nseg;
2888 long TotalLen; 2567 long total_len;
2889 dma_addr_t dma_addr; 2568 dma_addr_t dma_addr;
2890 2569
2891 pSCB->SCB_Post = i91uSCBPost; /* i91u's callback routine */ 2570 /* Fill in the command headers */
2892 pSCB->SCB_Srb = SCpnt; 2571 cblk->post = i91uSCBPost; /* i91u's callback routine */
2893 pSCB->SCB_Opcode = ExecSCSI; 2572 cblk->srb = cmnd;
2894 pSCB->SCB_Flags = SCF_POST; /* After SCSI done, call post routine */ 2573 cblk->opcode = ExecSCSI;
2895 pSCB->SCB_Target = SCpnt->device->id; 2574 cblk->flags = SCF_POST; /* After SCSI done, call post routine */
2896 pSCB->SCB_Lun = SCpnt->device->lun; 2575 cblk->target = cmnd->device->id;
2897 pSCB->SCB_Ident = SCpnt->device->lun | DISC_ALLOW; 2576 cblk->lun = cmnd->device->lun;
2898 2577 cblk->ident = cmnd->device->lun | DISC_ALLOW;
2899 pSCB->SCB_Flags |= SCF_SENSE; /* Turn on auto request sense */
2900 dma_addr = dma_map_single(&pHCB->pci_dev->dev, SCpnt->sense_buffer,
2901 SENSE_SIZE, DMA_FROM_DEVICE);
2902 pSCB->SCB_SensePtr = cpu_to_le32((u32)dma_addr);
2903 pSCB->SCB_SenseLen = cpu_to_le32(SENSE_SIZE);
2904 SCpnt->SCp.ptr = (char *)(unsigned long)dma_addr;
2905 2578
2906 pSCB->SCB_CDBLen = SCpnt->cmd_len; 2579 cblk->flags |= SCF_SENSE; /* Turn on auto request sense */
2907 pSCB->SCB_HaStat = 0;
2908 pSCB->SCB_TaStat = 0;
2909 memcpy(&pSCB->SCB_CDB[0], &SCpnt->cmnd, SCpnt->cmd_len);
2910 2580
2911 if (SCpnt->device->tagged_supported) { /* Tag Support */ 2581 /* Map the sense buffer into bus memory */
2912 pSCB->SCB_TagMsg = SIMPLE_QUEUE_TAG; /* Do simple tag only */ 2582 dma_addr = dma_map_single(&host->pci_dev->dev, cmnd->sense_buffer,
2583 SENSE_SIZE, DMA_FROM_DEVICE);
2584 cblk->senseptr = cpu_to_le32((u32)dma_addr);
2585 cblk->senselen = cpu_to_le32(SENSE_SIZE);
2586 cmnd->SCp.ptr = (char *)(unsigned long)dma_addr;
2587 cblk->cdblen = cmnd->cmd_len;
2588
2589 /* Clear the returned status */
2590 cblk->hastat = 0;
2591 cblk->tastat = 0;
2592 /* Command the command */
2593 memcpy(&cblk->cdb[0], &cmnd->cmnd, cmnd->cmd_len);
2594
2595 /* Set up tags */
2596 if (cmnd->device->tagged_supported) { /* Tag Support */
2597 cblk->tagmsg = SIMPLE_QUEUE_TAG; /* Do simple tag only */
2913 } else { 2598 } else {
2914 pSCB->SCB_TagMsg = 0; /* No tag support */ 2599 cblk->tagmsg = 0; /* No tag support */
2915 } 2600 }
2601
2916 /* todo handle map_sg error */ 2602 /* todo handle map_sg error */
2917 if (SCpnt->use_sg) { 2603 nseg = scsi_dma_map(cmnd);
2918 dma_addr = dma_map_single(&pHCB->pci_dev->dev, &pSCB->SCB_SGList[0], 2604 BUG_ON(nseg < 0);
2919 sizeof(struct SG_Struc) * TOTAL_SG_ENTRY, 2605 if (nseg) {
2606 dma_addr = dma_map_single(&host->pci_dev->dev, &cblk->sglist[0],
2607 sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
2920 DMA_BIDIRECTIONAL); 2608 DMA_BIDIRECTIONAL);
2921 pSCB->SCB_BufPtr = cpu_to_le32((u32)dma_addr); 2609 cblk->bufptr = cpu_to_le32((u32)dma_addr);
2922 SCpnt->SCp.dma_handle = dma_addr; 2610 cmnd->SCp.dma_handle = dma_addr;
2923 2611
2924 pSrbSG = (struct scatterlist *) SCpnt->request_buffer; 2612
2925 pSCB->SCB_SGLen = dma_map_sg(&pHCB->pci_dev->dev, pSrbSG, 2613 cblk->flags |= SCF_SG; /* Turn on SG list flag */
2926 SCpnt->use_sg, SCpnt->sc_data_direction); 2614 total_len = 0;
2927 2615 sg = &cblk->sglist[0];
2928 pSCB->SCB_Flags |= SCF_SG; /* Turn on SG list flag */ 2616 scsi_for_each_sg(cmnd, sglist, cblk->sglen, i) {
2929 for (i = 0, TotalLen = 0, pSG = &pSCB->SCB_SGList[0]; /* 1.01g */ 2617 sg->data = cpu_to_le32((u32)sg_dma_address(sglist));
2930 i < pSCB->SCB_SGLen; i++, pSG++, pSrbSG++) { 2618 total_len += sg->len = cpu_to_le32((u32)sg_dma_len(sglist));
2931 pSG->SG_Ptr = cpu_to_le32((u32)sg_dma_address(pSrbSG));
2932 TotalLen += pSG->SG_Len = cpu_to_le32((u32)sg_dma_len(pSrbSG));
2933 } 2619 }
2934 2620
2935 pSCB->SCB_BufLen = (SCpnt->request_bufflen > TotalLen) ? 2621 cblk->buflen = (scsi_bufflen(cmnd) > total_len) ?
2936 TotalLen : SCpnt->request_bufflen; 2622 total_len : scsi_bufflen(cmnd);
2937 } else if (SCpnt->request_bufflen) { /* Non SG */ 2623 } else { /* No data transfer required */
2938 dma_addr = dma_map_single(&pHCB->pci_dev->dev, SCpnt->request_buffer, 2624 cblk->buflen = 0;
2939 SCpnt->request_bufflen, 2625 cblk->sglen = 0;
2940 SCpnt->sc_data_direction);
2941 SCpnt->SCp.dma_handle = dma_addr;
2942 pSCB->SCB_BufPtr = cpu_to_le32((u32)dma_addr);
2943 pSCB->SCB_BufLen = cpu_to_le32((u32)SCpnt->request_bufflen);
2944 pSCB->SCB_SGLen = 0;
2945 } else {
2946 pSCB->SCB_BufLen = 0;
2947 pSCB->SCB_SGLen = 0;
2948 } 2626 }
2949} 2627}
2950 2628
2629/**
2630 * i91u_queuecommand - Queue a new command if possible
2631 * @cmd: SCSI command block from the mid layer
2632 * @done: Completion handler
2633 *
2634 * Attempts to queue a new command with the host adapter. Will return
2635 * zero if successful or indicate a host busy condition if not (which
2636 * will cause the mid layer to call us again later with the command)
2637 */
2638
2951static int i91u_queuecommand(struct scsi_cmnd *cmd, 2639static int i91u_queuecommand(struct scsi_cmnd *cmd,
2952 void (*done)(struct scsi_cmnd *)) 2640 void (*done)(struct scsi_cmnd *))
2953{ 2641{
2954 HCS *pHCB = (HCS *) cmd->device->host->base; 2642 struct initio_host *host = (struct initio_host *) cmd->device->host->hostdata;
2955 register SCB *pSCB; 2643 struct scsi_ctrl_blk *cmnd;
2956 2644
2957 cmd->scsi_done = done; 2645 cmd->scsi_done = done;
2958 2646
2959 pSCB = tul_alloc_scb(pHCB); 2647 cmnd = initio_alloc_scb(host);
2960 if (!pSCB) 2648 if (!cmnd)
2961 return SCSI_MLQUEUE_HOST_BUSY; 2649 return SCSI_MLQUEUE_HOST_BUSY;
2962 2650
2963 i91uBuildSCB(pHCB, pSCB, cmd); 2651 initio_build_scb(host, cmnd, cmd);
2964 tul_exec_scb(pHCB, pSCB); 2652 initio_exec_scb(host, cmnd);
2965 return 0; 2653 return 0;
2966} 2654}
2967 2655
2968#if 0 /* no new EH yet */ 2656/**
2969/* 2657 * i91u_bus_reset - reset the SCSI bus
2970 * Abort a queued command 2658 * @cmnd: Command block we want to trigger the reset for
2971 * (commands that are on the bus can't be aborted easily) 2659 *
2972 */ 2660 * Initiate a SCSI bus reset sequence
2973static int i91u_abort(struct scsi_cmnd * SCpnt)
2974{
2975 HCS *pHCB;
2976
2977 pHCB = (HCS *) SCpnt->device->host->base;
2978 return tul_abort_srb(pHCB, SCpnt);
2979}
2980
2981/*
2982 * Reset registers, reset a hanging bus and
2983 * kill active and disconnected commands for target w/o soft reset
2984 */ 2661 */
2985static int i91u_reset(struct scsi_cmnd * SCpnt, unsigned int reset_flags)
2986{ /* I need Host Control Block Information */
2987 HCS *pHCB;
2988
2989 pHCB = (HCS *) SCpnt->device->host->base;
2990 2662
2991 if (reset_flags & (SCSI_RESET_SUGGEST_BUS_RESET | SCSI_RESET_SUGGEST_HOST_RESET)) 2663static int i91u_bus_reset(struct scsi_cmnd * cmnd)
2992 return tul_reset_scsi_bus(pHCB);
2993 else
2994 return tul_device_reset(pHCB, SCpnt, SCpnt->device->id, reset_flags);
2995}
2996#endif
2997
2998static int i91u_bus_reset(struct scsi_cmnd * SCpnt)
2999{ 2664{
3000 HCS *pHCB; 2665 struct initio_host *host;
3001 2666
3002 pHCB = (HCS *) SCpnt->device->host->base; 2667 host = (struct initio_host *) cmnd->device->host->hostdata;
3003 2668
3004 spin_lock_irq(SCpnt->device->host->host_lock); 2669 spin_lock_irq(cmnd->device->host->host_lock);
3005 tul_reset_scsi(pHCB, 0); 2670 initio_reset_scsi(host, 0);
3006 spin_unlock_irq(SCpnt->device->host->host_lock); 2671 spin_unlock_irq(cmnd->device->host->host_lock);
3007 2672
3008 return SUCCESS; 2673 return SUCCESS;
3009} 2674}
3010 2675
3011/* 2676/**
3012 * Return the "logical geometry" 2677 * i91u_biospararm - return the "logical geometry
2678 * @sdev: SCSI device
2679 * @dev; Matching block device
2680 * @capacity: Sector size of drive
2681 * @info_array: Return space for BIOS geometry
2682 *
2683 * Map the device geometry in a manner compatible with the host
2684 * controller BIOS behaviour.
2685 *
2686 * FIXME: limited to 2^32 sector devices.
3013 */ 2687 */
2688
3014static int i91u_biosparam(struct scsi_device *sdev, struct block_device *dev, 2689static int i91u_biosparam(struct scsi_device *sdev, struct block_device *dev,
3015 sector_t capacity, int *info_array) 2690 sector_t capacity, int *info_array)
3016{ 2691{
3017 HCS *pHcb; /* Point to Host adapter control block */ 2692 struct initio_host *host; /* Point to Host adapter control block */
3018 TCS *pTcb; 2693 struct target_control *tc;
3019 2694
3020 pHcb = (HCS *) sdev->host->base; 2695 host = (struct initio_host *) sdev->host->hostdata;
3021 pTcb = &pHcb->HCS_Tcs[sdev->id]; 2696 tc = &host->targets[sdev->id];
3022 2697
3023 if (pTcb->TCS_DrvHead) { 2698 if (tc->heads) {
3024 info_array[0] = pTcb->TCS_DrvHead; 2699 info_array[0] = tc->heads;
3025 info_array[1] = pTcb->TCS_DrvSector; 2700 info_array[1] = tc->sectors;
3026 info_array[2] = (unsigned long)capacity / pTcb->TCS_DrvHead / pTcb->TCS_DrvSector; 2701 info_array[2] = (unsigned long)capacity / tc->heads / tc->sectors;
3027 } else { 2702 } else {
3028 if (pTcb->TCS_DrvFlags & TCF_DRV_255_63) { 2703 if (tc->drv_flags & TCF_DRV_255_63) {
3029 info_array[0] = 255; 2704 info_array[0] = 255;
3030 info_array[1] = 63; 2705 info_array[1] = 63;
3031 info_array[2] = (unsigned long)capacity / 255 / 63; 2706 info_array[2] = (unsigned long)capacity / 255 / 63;
@@ -3047,7 +2722,16 @@ static int i91u_biosparam(struct scsi_device *sdev, struct block_device *dev,
3047 return 0; 2722 return 0;
3048} 2723}
3049 2724
3050static void i91u_unmap_cmnd(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd) 2725/**
2726 * i91u_unmap_scb - Unmap a command
2727 * @pci_dev: PCI device the command is for
2728 * @cmnd: The command itself
2729 *
2730 * Unmap any PCI mapping/IOMMU resources allocated when the command
2731 * was mapped originally as part of initio_build_scb
2732 */
2733
2734static void i91u_unmap_scb(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd)
3051{ 2735{
3052 /* auto sense buffer */ 2736 /* auto sense buffer */
3053 if (cmnd->SCp.ptr) { 2737 if (cmnd->SCp.ptr) {
@@ -3058,65 +2742,63 @@ static void i91u_unmap_cmnd(struct pci_dev *pci_dev, struct scsi_cmnd *cmnd)
3058 } 2742 }
3059 2743
3060 /* request buffer */ 2744 /* request buffer */
3061 if (cmnd->use_sg) { 2745 if (scsi_sg_count(cmnd)) {
3062 dma_unmap_single(&pci_dev->dev, cmnd->SCp.dma_handle, 2746 dma_unmap_single(&pci_dev->dev, cmnd->SCp.dma_handle,
3063 sizeof(struct SG_Struc) * TOTAL_SG_ENTRY, 2747 sizeof(struct sg_entry) * TOTAL_SG_ENTRY,
3064 DMA_BIDIRECTIONAL); 2748 DMA_BIDIRECTIONAL);
3065 2749
3066 dma_unmap_sg(&pci_dev->dev, cmnd->request_buffer, 2750 scsi_dma_unmap(cmnd);
3067 cmnd->use_sg,
3068 cmnd->sc_data_direction);
3069 } else if (cmnd->request_bufflen) {
3070 dma_unmap_single(&pci_dev->dev, cmnd->SCp.dma_handle,
3071 cmnd->request_bufflen,
3072 cmnd->sc_data_direction);
3073 } 2751 }
3074} 2752}
3075 2753
3076/***************************************************************************** 2754/**
3077 Function name : i91uSCBPost 2755 * i91uSCBPost - SCSI callback
3078 Description : This is callback routine be called when tulip finish one 2756 * @host: Pointer to host adapter control block.
3079 SCSI command. 2757 * @cmnd: Pointer to SCSI control block.
3080 Input : pHCB - Pointer to host adapter control block. 2758 *
3081 pSCB - Pointer to SCSI control block. 2759 * This is callback routine be called when tulip finish one
3082 Output : None. 2760 * SCSI command.
3083 Return : None. 2761 */
3084*****************************************************************************/
3085static void i91uSCBPost(BYTE * pHcb, BYTE * pScb)
3086{
3087 struct scsi_cmnd *pSRB; /* Pointer to SCSI request block */
3088 HCS *pHCB;
3089 SCB *pSCB;
3090 2762
3091 pHCB = (HCS *) pHcb; 2763static void i91uSCBPost(u8 * host_mem, u8 * cblk_mem)
3092 pSCB = (SCB *) pScb; 2764{
3093 if ((pSRB = pSCB->SCB_Srb) == 0) { 2765 struct scsi_cmnd *cmnd; /* Pointer to SCSI request block */
3094 printk("i91uSCBPost: SRB pointer is empty\n"); 2766 struct initio_host *host;
2767 struct scsi_ctrl_blk *cblk;
3095 2768
3096 tul_release_scb(pHCB, pSCB); /* Release SCB for current channel */ 2769 host = (struct initio_host *) host_mem;
2770 cblk = (struct scsi_ctrl_blk *) cblk_mem;
2771 if ((cmnd = cblk->srb) == NULL) {
2772 printk(KERN_ERR "i91uSCBPost: SRB pointer is empty\n");
2773 WARN_ON(1);
2774 initio_release_scb(host, cblk); /* Release SCB for current channel */
3097 return; 2775 return;
3098 } 2776 }
3099 switch (pSCB->SCB_HaStat) { 2777
2778 /*
2779 * Remap the firmware error status into a mid layer one
2780 */
2781 switch (cblk->hastat) {
3100 case 0x0: 2782 case 0x0:
3101 case 0xa: /* Linked command complete without error and linked normally */ 2783 case 0xa: /* Linked command complete without error and linked normally */
3102 case 0xb: /* Linked command complete without error interrupt generated */ 2784 case 0xb: /* Linked command complete without error interrupt generated */
3103 pSCB->SCB_HaStat = 0; 2785 cblk->hastat = 0;
3104 break; 2786 break;
3105 2787
3106 case 0x11: /* Selection time out-The initiator selection or target 2788 case 0x11: /* Selection time out-The initiator selection or target
3107 reselection was not complete within the SCSI Time out period */ 2789 reselection was not complete within the SCSI Time out period */
3108 pSCB->SCB_HaStat = DID_TIME_OUT; 2790 cblk->hastat = DID_TIME_OUT;
3109 break; 2791 break;
3110 2792
3111 case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus 2793 case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus
3112 phase sequence was requested by the target. The host adapter 2794 phase sequence was requested by the target. The host adapter
3113 will generate a SCSI Reset Condition, notifying the host with 2795 will generate a SCSI Reset Condition, notifying the host with
3114 a SCRD interrupt */ 2796 a SCRD interrupt */
3115 pSCB->SCB_HaStat = DID_RESET; 2797 cblk->hastat = DID_RESET;
3116 break; 2798 break;
3117 2799
3118 case 0x1a: /* SCB Aborted. 07/21/98 */ 2800 case 0x1a: /* SCB Aborted. 07/21/98 */
3119 pSCB->SCB_HaStat = DID_ABORT; 2801 cblk->hastat = DID_ABORT;
3120 break; 2802 break;
3121 2803
3122 case 0x12: /* Data overrun/underrun-The target attempted to transfer more data 2804 case 0x12: /* Data overrun/underrun-The target attempted to transfer more data
@@ -3126,49 +2808,196 @@ static void i91uSCBPost(BYTE * pHcb, BYTE * pScb)
3126 case 0x16: /* Invalid SCB Operation Code. */ 2808 case 0x16: /* Invalid SCB Operation Code. */
3127 2809
3128 default: 2810 default:
3129 printk("ini9100u: %x %x\n", pSCB->SCB_HaStat, pSCB->SCB_TaStat); 2811 printk("ini9100u: %x %x\n", cblk->hastat, cblk->tastat);
3130 pSCB->SCB_HaStat = DID_ERROR; /* Couldn't find any better */ 2812 cblk->hastat = DID_ERROR; /* Couldn't find any better */
3131 break; 2813 break;
3132 } 2814 }
3133 2815
3134 pSRB->result = pSCB->SCB_TaStat | (pSCB->SCB_HaStat << 16); 2816 cmnd->result = cblk->tastat | (cblk->hastat << 16);
2817 WARN_ON(cmnd == NULL);
2818 i91u_unmap_scb(host->pci_dev, cmnd);
2819 cmnd->scsi_done(cmnd); /* Notify system DONE */
2820 initio_release_scb(host, cblk); /* Release SCB for current channel */
2821}
2822
2823static struct scsi_host_template initio_template = {
2824 .proc_name = "INI9100U",
2825 .name = "Initio INI-9X00U/UW SCSI device driver",
2826 .queuecommand = i91u_queuecommand,
2827 .eh_bus_reset_handler = i91u_bus_reset,
2828 .bios_param = i91u_biosparam,
2829 .can_queue = MAX_TARGETS * i91u_MAXQUEUE,
2830 .this_id = 1,
2831 .sg_tablesize = SG_ALL,
2832 .cmd_per_lun = 1,
2833 .use_clustering = ENABLE_CLUSTERING,
2834};
2835
2836static int initio_probe_one(struct pci_dev *pdev,
2837 const struct pci_device_id *id)
2838{
2839 struct Scsi_Host *shost;
2840 struct initio_host *host;
2841 u32 reg;
2842 u16 bios_seg;
2843 struct scsi_ctrl_blk *scb, *tmp, *prev = NULL /* silence gcc */;
2844 int num_scb, i, error;
2845
2846 error = pci_enable_device(pdev);
2847 if (error)
2848 return error;
2849
2850 pci_read_config_dword(pdev, 0x44, (u32 *) & reg);
2851 bios_seg = (u16) (reg & 0xFF);
2852 if (((reg & 0xFF00) >> 8) == 0xFF)
2853 reg = 0;
2854 bios_seg = (bios_seg << 8) + ((u16) ((reg & 0xFF00) >> 8));
2855
2856 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
2857 printk(KERN_WARNING "i91u: Could not set 32 bit DMA mask\n");
2858 error = -ENODEV;
2859 goto out_disable_device;
2860 }
2861 shost = scsi_host_alloc(&initio_template, sizeof(struct initio_host));
2862 if (!shost) {
2863 printk(KERN_WARNING "initio: Could not allocate host structure.\n");
2864 error = -ENOMEM;
2865 goto out_disable_device;
2866 }
2867 host = (struct initio_host *)shost->hostdata;
2868 memset(host, 0, sizeof(struct initio_host));
3135 2869
3136 if (pSRB == NULL) { 2870 if (!request_region(host->addr, 256, "i91u")) {
3137 printk("pSRB is NULL\n"); 2871 printk(KERN_WARNING "initio: I/O port range 0x%x is busy.\n", host->addr);
2872 error = -ENODEV;
2873 goto out_host_put;
3138 } 2874 }
3139 2875
3140 i91u_unmap_cmnd(pHCB->pci_dev, pSRB); 2876 if (initio_tag_enable) /* 1.01i */
3141 pSRB->scsi_done(pSRB); /* Notify system DONE */ 2877 num_scb = MAX_TARGETS * i91u_MAXQUEUE;
2878 else
2879 num_scb = MAX_TARGETS + 3; /* 1-tape, 1-CD_ROM, 1- extra */
3142 2880
3143 tul_release_scb(pHCB, pSCB); /* Release SCB for current channel */ 2881 for (; num_scb >= MAX_TARGETS + 3; num_scb--) {
3144} 2882 i = num_scb * sizeof(struct scsi_ctrl_blk);
2883 if ((scb = kzalloc(i, GFP_DMA)) != NULL)
2884 break;
2885 }
2886
2887 if (!scb) {
2888 printk(KERN_WARNING "initio: Cannot allocate SCB array.\n");
2889 error = -ENOMEM;
2890 goto out_release_region;
2891 }
3145 2892
3146/* 2893 host->num_scbs = num_scb;
3147 * Release ressources 2894 host->scb = scb;
2895 host->next_pending = scb;
2896 host->next_avail = scb;
2897 for (i = 0, tmp = scb; i < num_scb; i++, tmp++) {
2898 tmp->tagid = i;
2899 if (i != 0)
2900 prev->next = tmp;
2901 prev = tmp;
2902 }
2903 prev->next = NULL;
2904 host->scb_end = tmp;
2905 host->first_avail = scb;
2906 host->last_avail = prev;
2907
2908 initio_init(host, phys_to_virt(bios_seg << 4));
2909
2910 host->jsstatus0 = 0;
2911
2912 shost->io_port = host->addr;
2913 shost->n_io_port = 0xff;
2914 shost->can_queue = num_scb; /* 03/05/98 */
2915 shost->unique_id = host->addr;
2916 shost->max_id = host->max_tar;
2917 shost->max_lun = 32; /* 10/21/97 */
2918 shost->irq = pdev->irq;
2919 shost->this_id = host->scsi_id; /* Assign HCS index */
2920 shost->base = host->addr;
2921 shost->sg_tablesize = TOTAL_SG_ENTRY;
2922
2923 error = request_irq(pdev->irq, i91u_intr, IRQF_DISABLED|IRQF_SHARED, "i91u", shost);
2924 if (error < 0) {
2925 printk(KERN_WARNING "initio: Unable to request IRQ %d\n", pdev->irq);
2926 goto out_free_scbs;
2927 }
2928
2929 pci_set_drvdata(pdev, shost);
2930 host->pci_dev = pdev;
2931
2932 error = scsi_add_host(shost, &pdev->dev);
2933 if (error)
2934 goto out_free_irq;
2935 scsi_scan_host(shost);
2936 return 0;
2937out_free_irq:
2938 free_irq(pdev->irq, shost);
2939out_free_scbs:
2940 kfree(host->scb);
2941out_release_region:
2942 release_region(host->addr, 256);
2943out_host_put:
2944 scsi_host_put(shost);
2945out_disable_device:
2946 pci_disable_device(pdev);
2947 return error;
2948}
2949
2950/**
2951 * initio_remove_one - control shutdown
2952 * @pdev: PCI device being released
2953 *
2954 * Release the resources assigned to this adapter after it has
2955 * finished being used.
3148 */ 2956 */
3149static int i91u_release(struct Scsi_Host *hreg) 2957
2958static void initio_remove_one(struct pci_dev *pdev)
3150{ 2959{
3151 free_irq(hreg->irq, hreg); 2960 struct Scsi_Host *host = pci_get_drvdata(pdev);
3152 release_region(hreg->io_port, 256); 2961 struct initio_host *s = (struct initio_host *)host->hostdata;
3153 return 0; 2962 scsi_remove_host(host);
2963 free_irq(pdev->irq, host);
2964 release_region(s->addr, 256);
2965 scsi_host_put(host);
2966 pci_disable_device(pdev);
3154} 2967}
3155MODULE_LICENSE("Dual BSD/GPL"); 2968
3156 2969MODULE_LICENSE("GPL");
3157static struct scsi_host_template driver_template = { 2970
3158 .proc_name = "INI9100U", 2971static struct pci_device_id initio_pci_tbl[] = {
3159 .name = i91u_REVID, 2972 {PCI_VENDOR_ID_INIT, 0x9500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
3160 .detect = i91u_detect, 2973 {PCI_VENDOR_ID_INIT, 0x9400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
3161 .release = i91u_release, 2974 {PCI_VENDOR_ID_INIT, 0x9401, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
3162 .queuecommand = i91u_queuecommand, 2975 {PCI_VENDOR_ID_INIT, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
3163// .abort = i91u_abort, 2976 {PCI_VENDOR_ID_DOMEX, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
3164// .reset = i91u_reset, 2977 {0,}
3165 .eh_bus_reset_handler = i91u_bus_reset, 2978};
3166 .bios_param = i91u_biosparam, 2979MODULE_DEVICE_TABLE(pci, initio_pci_tbl);
3167 .can_queue = 1, 2980
3168 .this_id = 1, 2981static struct pci_driver initio_pci_driver = {
3169 .sg_tablesize = SG_ALL, 2982 .name = "initio",
3170 .cmd_per_lun = 1, 2983 .id_table = initio_pci_tbl,
3171 .use_clustering = ENABLE_CLUSTERING, 2984 .probe = initio_probe_one,
2985 .remove = __devexit_p(initio_remove_one),
3172}; 2986};
3173#include "scsi_module.c"
3174 2987
2988static int __init initio_init_driver(void)
2989{
2990 return pci_register_driver(&initio_pci_driver);
2991}
2992
2993static void __exit initio_exit_driver(void)
2994{
2995 pci_unregister_driver(&initio_pci_driver);
2996}
2997
2998MODULE_DESCRIPTION("Initio INI-9X00U/UW SCSI device driver");
2999MODULE_AUTHOR("Initio Corporation");
3000MODULE_LICENSE("GPL");
3001
3002module_init(initio_init_driver);
3003module_exit(initio_exit_driver);
diff --git a/drivers/scsi/initio.h b/drivers/scsi/initio.h
index acb67a4af2cc..cb48efa81fe2 100644
--- a/drivers/scsi/initio.h
+++ b/drivers/scsi/initio.h
@@ -4,6 +4,8 @@
4 * Copyright (c) 1994-1998 Initio Corporation 4 * Copyright (c) 1994-1998 Initio Corporation
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Cleanups (c) Copyright 2007 Red Hat <alan@redhat.com>
8 *
7 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2, or (at your option) 11 * the Free Software Foundation; either version 2, or (at your option)
@@ -18,27 +20,6 @@
18 * along with this program; see the file COPYING. If not, write to 20 * along with this program; see the file COPYING. If not, write to
19 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. 21 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
20 * 22 *
21 * --------------------------------------------------------------------------
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the above copyright
27 * notice, this list of conditions, and the following disclaimer,
28 * without modification, immediately at the beginning of the file.
29 * 2. Redistributions in binary form must reproduce the above copyright
30 * notice, this list of conditions and the following disclaimer in the
31 * documentation and/or other materials provided with the distribution.
32 * 3. The name of the author may not be used to endorse or promote products
33 * derived from this software without specific prior written permission.
34 *
35 * Where this Software is combined with software released under the terms of
36 * the GNU General Public License ("GPL") and the terms of the GPL would require the
37 * combined work to also be released under the terms of the GPL, the terms
38 * and conditions of this License will apply in addition to those of the
39 * GPL with the exception of any terms or conditions of this License that
40 * conflict with, or are expressly prohibited by, the GPL.
41 *
42 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
43 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
44 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
@@ -56,17 +37,6 @@
56 37
57#include <linux/types.h> 38#include <linux/types.h>
58 39
59#define ULONG unsigned long
60#define USHORT unsigned short
61#define UCHAR unsigned char
62#define BYTE unsigned char
63#define WORD unsigned short
64#define DWORD unsigned long
65#define UBYTE unsigned char
66#define UWORD unsigned short
67#define UDWORD unsigned long
68#define U32 u32
69
70#define TOTAL_SG_ENTRY 32 40#define TOTAL_SG_ENTRY 32
71#define MAX_SUPPORTED_ADAPTERS 8 41#define MAX_SUPPORTED_ADAPTERS 8
72#define MAX_OFFSET 15 42#define MAX_OFFSET 15
@@ -368,55 +338,55 @@ typedef struct {
368/************************************************************************/ 338/************************************************************************/
369/* Scatter-Gather Element Structure */ 339/* Scatter-Gather Element Structure */
370/************************************************************************/ 340/************************************************************************/
371typedef struct SG_Struc { 341struct sg_entry {
372 U32 SG_Ptr; /* Data Pointer */ 342 u32 data; /* Data Pointer */
373 U32 SG_Len; /* Data Length */ 343 u32 len; /* Data Length */
374} SG; 344};
375 345
376/*********************************************************************** 346/***********************************************************************
377 SCSI Control Block 347 SCSI Control Block
378************************************************************************/ 348************************************************************************/
379typedef struct Scsi_Ctrl_Blk { 349struct scsi_ctrl_blk {
380 struct Scsi_Ctrl_Blk *SCB_NxtScb; 350 struct scsi_ctrl_blk *next;
381 UBYTE SCB_Status; /*4 */ 351 u8 status; /*4 */
382 UBYTE SCB_NxtStat; /*5 */ 352 u8 next_state; /*5 */
383 UBYTE SCB_Mode; /*6 */ 353 u8 mode; /*6 */
384 UBYTE SCB_Msgin; /*7 SCB_Res0 */ 354 u8 msgin; /*7 SCB_Res0 */
385 UWORD SCB_SGIdx; /*8 */ 355 u16 sgidx; /*8 */
386 UWORD SCB_SGMax; /*A */ 356 u16 sgmax; /*A */
387#ifdef ALPHA 357#ifdef ALPHA
388 U32 SCB_Reserved[2]; /*C */ 358 u32 reserved[2]; /*C */
389#else 359#else
390 U32 SCB_Reserved[3]; /*C */ 360 u32 reserved[3]; /*C */
391#endif 361#endif
392 362
393 U32 SCB_XferLen; /*18 Current xfer len */ 363 u32 xferlen; /*18 Current xfer len */
394 U32 SCB_TotXLen; /*1C Total xfer len */ 364 u32 totxlen; /*1C Total xfer len */
395 U32 SCB_PAddr; /*20 SCB phy. Addr. */ 365 u32 paddr; /*20 SCB phy. Addr. */
396 366
397 UBYTE SCB_Opcode; /*24 SCB command code */ 367 u8 opcode; /*24 SCB command code */
398 UBYTE SCB_Flags; /*25 SCB Flags */ 368 u8 flags; /*25 SCB Flags */
399 UBYTE SCB_Target; /*26 Target Id */ 369 u8 target; /*26 Target Id */
400 UBYTE SCB_Lun; /*27 Lun */ 370 u8 lun; /*27 Lun */
401 U32 SCB_BufPtr; /*28 Data Buffer Pointer */ 371 u32 bufptr; /*28 Data Buffer Pointer */
402 U32 SCB_BufLen; /*2C Data Allocation Length */ 372 u32 buflen; /*2C Data Allocation Length */
403 UBYTE SCB_SGLen; /*30 SG list # */ 373 u8 sglen; /*30 SG list # */
404 UBYTE SCB_SenseLen; /*31 Sense Allocation Length */ 374 u8 senselen; /*31 Sense Allocation Length */
405 UBYTE SCB_HaStat; /*32 */ 375 u8 hastat; /*32 */
406 UBYTE SCB_TaStat; /*33 */ 376 u8 tastat; /*33 */
407 UBYTE SCB_CDBLen; /*34 CDB Length */ 377 u8 cdblen; /*34 CDB Length */
408 UBYTE SCB_Ident; /*35 Identify */ 378 u8 ident; /*35 Identify */
409 UBYTE SCB_TagMsg; /*36 Tag Message */ 379 u8 tagmsg; /*36 Tag Message */
410 UBYTE SCB_TagId; /*37 Queue Tag */ 380 u8 tagid; /*37 Queue Tag */
411 UBYTE SCB_CDB[12]; /*38 */ 381 u8 cdb[12]; /*38 */
412 U32 SCB_SGPAddr; /*44 SG List/Sense Buf phy. Addr. */ 382 u32 sgpaddr; /*44 SG List/Sense Buf phy. Addr. */
413 U32 SCB_SensePtr; /*48 Sense data pointer */ 383 u32 senseptr; /*48 Sense data pointer */
414 void (*SCB_Post) (BYTE *, BYTE *); /*4C POST routine */ 384 void (*post) (u8 *, u8 *); /*4C POST routine */
415 struct scsi_cmnd *SCB_Srb; /*50 SRB Pointer */ 385 struct scsi_cmnd *srb; /*50 SRB Pointer */
416 SG SCB_SGList[TOTAL_SG_ENTRY]; /*54 Start of SG list */ 386 struct sg_entry sglist[TOTAL_SG_ENTRY]; /*54 Start of SG list */
417} SCB; 387};
418 388
419/* Bit Definition for SCB_Status */ 389/* Bit Definition for status */
420#define SCB_RENT 0x01 390#define SCB_RENT 0x01
421#define SCB_PEND 0x02 391#define SCB_PEND 0x02
422#define SCB_CONTIG 0x04 /* Contigent Allegiance */ 392#define SCB_CONTIG 0x04 /* Contigent Allegiance */
@@ -425,17 +395,17 @@ typedef struct Scsi_Ctrl_Blk {
425#define SCB_DONE 0x20 395#define SCB_DONE 0x20
426 396
427 397
428/* Opcodes of SCB_Opcode */ 398/* Opcodes for opcode */
429#define ExecSCSI 0x1 399#define ExecSCSI 0x1
430#define BusDevRst 0x2 400#define BusDevRst 0x2
431#define AbortCmd 0x3 401#define AbortCmd 0x3
432 402
433 403
434/* Bit Definition for SCB_Mode */ 404/* Bit Definition for mode */
435#define SCM_RSENS 0x01 /* request sense mode */ 405#define SCM_RSENS 0x01 /* request sense mode */
436 406
437 407
438/* Bit Definition for SCB_Flags */ 408/* Bit Definition for flags */
439#define SCF_DONE 0x01 409#define SCF_DONE 0x01
440#define SCF_POST 0x02 410#define SCF_POST 0x02
441#define SCF_SENSE 0x04 411#define SCF_SENSE 0x04
@@ -492,15 +462,14 @@ typedef struct Scsi_Ctrl_Blk {
492 Target Device Control Structure 462 Target Device Control Structure
493**********************************************************************/ 463**********************************************************************/
494 464
495typedef struct Tar_Ctrl_Struc { 465struct target_control {
496 UWORD TCS_Flags; /* 0 */ 466 u16 flags;
497 UBYTE TCS_JS_Period; /* 2 */ 467 u8 js_period;
498 UBYTE TCS_SConfig0; /* 3 */ 468 u8 sconfig0;
499 469 u16 drv_flags;
500 UWORD TCS_DrvFlags; /* 4 */ 470 u8 heads;
501 UBYTE TCS_DrvHead; /* 6 */ 471 u8 sectors;
502 UBYTE TCS_DrvSector; /* 7 */ 472};
503} TCS;
504 473
505/*********************************************************************** 474/***********************************************************************
506 Target Device Control Structure 475 Target Device Control Structure
@@ -523,62 +492,53 @@ typedef struct Tar_Ctrl_Struc {
523#define TCF_DRV_EN_TAG 0x0800 492#define TCF_DRV_EN_TAG 0x0800
524#define TCF_DRV_255_63 0x0400 493#define TCF_DRV_255_63 0x0400
525 494
526typedef struct I91u_Adpt_Struc {
527 UWORD ADPT_BIOS; /* 0 */
528 UWORD ADPT_BASE; /* 1 */
529 UBYTE ADPT_Bus; /* 2 */
530 UBYTE ADPT_Device; /* 3 */
531 UBYTE ADPT_INTR; /* 4 */
532} INI_ADPT_STRUCT;
533
534
535/*********************************************************************** 495/***********************************************************************
536 Host Adapter Control Structure 496 Host Adapter Control Structure
537************************************************************************/ 497************************************************************************/
538typedef struct Ha_Ctrl_Struc { 498struct initio_host {
539 UWORD HCS_Base; /* 00 */ 499 u16 addr; /* 00 */
540 UWORD HCS_BIOS; /* 02 */ 500 u16 bios_addr; /* 02 */
541 UBYTE HCS_Intr; /* 04 */ 501 u8 irq; /* 04 */
542 UBYTE HCS_SCSI_ID; /* 05 */ 502 u8 scsi_id; /* 05 */
543 UBYTE HCS_MaxTar; /* 06 */ 503 u8 max_tar; /* 06 */
544 UBYTE HCS_NumScbs; /* 07 */ 504 u8 num_scbs; /* 07 */
545 505
546 UBYTE HCS_Flags; /* 08 */ 506 u8 flags; /* 08 */
547 UBYTE HCS_Index; /* 09 */ 507 u8 index; /* 09 */
548 UBYTE HCS_HaId; /* 0A */ 508 u8 ha_id; /* 0A */
549 UBYTE HCS_Config; /* 0B */ 509 u8 config; /* 0B */
550 UWORD HCS_IdMask; /* 0C */ 510 u16 idmask; /* 0C */
551 UBYTE HCS_Semaph; /* 0E */ 511 u8 semaph; /* 0E */
552 UBYTE HCS_Phase; /* 0F */ 512 u8 phase; /* 0F */
553 UBYTE HCS_JSStatus0; /* 10 */ 513 u8 jsstatus0; /* 10 */
554 UBYTE HCS_JSInt; /* 11 */ 514 u8 jsint; /* 11 */
555 UBYTE HCS_JSStatus1; /* 12 */ 515 u8 jsstatus1; /* 12 */
556 UBYTE HCS_SConf1; /* 13 */ 516 u8 sconf1; /* 13 */
557 517
558 UBYTE HCS_Msg[8]; /* 14 */ 518 u8 msg[8]; /* 14 */
559 SCB *HCS_NxtAvail; /* 1C */ 519 struct scsi_ctrl_blk *next_avail; /* 1C */
560 SCB *HCS_Scb; /* 20 */ 520 struct scsi_ctrl_blk *scb; /* 20 */
561 SCB *HCS_ScbEnd; /* 24 */ 521 struct scsi_ctrl_blk *scb_end; /* 24 */ /*UNUSED*/
562 SCB *HCS_NxtPend; /* 28 */ 522 struct scsi_ctrl_blk *next_pending; /* 28 */
563 SCB *HCS_NxtContig; /* 2C */ 523 struct scsi_ctrl_blk *next_contig; /* 2C */ /*UNUSED*/
564 SCB *HCS_ActScb; /* 30 */ 524 struct scsi_ctrl_blk *active; /* 30 */
565 TCS *HCS_ActTcs; /* 34 */ 525 struct target_control *active_tc; /* 34 */
566 526
567 SCB *HCS_FirstAvail; /* 38 */ 527 struct scsi_ctrl_blk *first_avail; /* 38 */
568 SCB *HCS_LastAvail; /* 3C */ 528 struct scsi_ctrl_blk *last_avail; /* 3C */
569 SCB *HCS_FirstPend; /* 40 */ 529 struct scsi_ctrl_blk *first_pending; /* 40 */
570 SCB *HCS_LastPend; /* 44 */ 530 struct scsi_ctrl_blk *last_pending; /* 44 */
571 SCB *HCS_FirstBusy; /* 48 */ 531 struct scsi_ctrl_blk *first_busy; /* 48 */
572 SCB *HCS_LastBusy; /* 4C */ 532 struct scsi_ctrl_blk *last_busy; /* 4C */
573 SCB *HCS_FirstDone; /* 50 */ 533 struct scsi_ctrl_blk *first_done; /* 50 */
574 SCB *HCS_LastDone; /* 54 */ 534 struct scsi_ctrl_blk *last_done; /* 54 */
575 UBYTE HCS_MaxTags[16]; /* 58 */ 535 u8 max_tags[16]; /* 58 */
576 UBYTE HCS_ActTags[16]; /* 68 */ 536 u8 act_tags[16]; /* 68 */
577 TCS HCS_Tcs[MAX_TARGETS]; /* 78 */ 537 struct target_control targets[MAX_TARGETS]; /* 78 */
578 spinlock_t HCS_AvailLock; 538 spinlock_t avail_lock;
579 spinlock_t HCS_SemaphLock; 539 spinlock_t semaph_lock;
580 struct pci_dev *pci_dev; 540 struct pci_dev *pci_dev;
581} HCS; 541};
582 542
583/* Bit Definition for HCB_Config */ 543/* Bit Definition for HCB_Config */
584#define HCC_SCSI_RESET 0x01 544#define HCC_SCSI_RESET 0x01
@@ -599,47 +559,47 @@ typedef struct Ha_Ctrl_Struc {
599*******************************************************************/ 559*******************************************************************/
600 560
601typedef struct _NVRAM_SCSI { /* SCSI channel configuration */ 561typedef struct _NVRAM_SCSI { /* SCSI channel configuration */
602 UCHAR NVM_ChSCSIID; /* 0Ch -> Channel SCSI ID */ 562 u8 NVM_ChSCSIID; /* 0Ch -> Channel SCSI ID */
603 UCHAR NVM_ChConfig1; /* 0Dh -> Channel config 1 */ 563 u8 NVM_ChConfig1; /* 0Dh -> Channel config 1 */
604 UCHAR NVM_ChConfig2; /* 0Eh -> Channel config 2 */ 564 u8 NVM_ChConfig2; /* 0Eh -> Channel config 2 */
605 UCHAR NVM_NumOfTarg; /* 0Fh -> Number of SCSI target */ 565 u8 NVM_NumOfTarg; /* 0Fh -> Number of SCSI target */
606 /* SCSI target configuration */ 566 /* SCSI target configuration */
607 UCHAR NVM_Targ0Config; /* 10h -> Target 0 configuration */ 567 u8 NVM_Targ0Config; /* 10h -> Target 0 configuration */
608 UCHAR NVM_Targ1Config; /* 11h -> Target 1 configuration */ 568 u8 NVM_Targ1Config; /* 11h -> Target 1 configuration */
609 UCHAR NVM_Targ2Config; /* 12h -> Target 2 configuration */ 569 u8 NVM_Targ2Config; /* 12h -> Target 2 configuration */
610 UCHAR NVM_Targ3Config; /* 13h -> Target 3 configuration */ 570 u8 NVM_Targ3Config; /* 13h -> Target 3 configuration */
611 UCHAR NVM_Targ4Config; /* 14h -> Target 4 configuration */ 571 u8 NVM_Targ4Config; /* 14h -> Target 4 configuration */
612 UCHAR NVM_Targ5Config; /* 15h -> Target 5 configuration */ 572 u8 NVM_Targ5Config; /* 15h -> Target 5 configuration */
613 UCHAR NVM_Targ6Config; /* 16h -> Target 6 configuration */ 573 u8 NVM_Targ6Config; /* 16h -> Target 6 configuration */
614 UCHAR NVM_Targ7Config; /* 17h -> Target 7 configuration */ 574 u8 NVM_Targ7Config; /* 17h -> Target 7 configuration */
615 UCHAR NVM_Targ8Config; /* 18h -> Target 8 configuration */ 575 u8 NVM_Targ8Config; /* 18h -> Target 8 configuration */
616 UCHAR NVM_Targ9Config; /* 19h -> Target 9 configuration */ 576 u8 NVM_Targ9Config; /* 19h -> Target 9 configuration */
617 UCHAR NVM_TargAConfig; /* 1Ah -> Target A configuration */ 577 u8 NVM_TargAConfig; /* 1Ah -> Target A configuration */
618 UCHAR NVM_TargBConfig; /* 1Bh -> Target B configuration */ 578 u8 NVM_TargBConfig; /* 1Bh -> Target B configuration */
619 UCHAR NVM_TargCConfig; /* 1Ch -> Target C configuration */ 579 u8 NVM_TargCConfig; /* 1Ch -> Target C configuration */
620 UCHAR NVM_TargDConfig; /* 1Dh -> Target D configuration */ 580 u8 NVM_TargDConfig; /* 1Dh -> Target D configuration */
621 UCHAR NVM_TargEConfig; /* 1Eh -> Target E configuration */ 581 u8 NVM_TargEConfig; /* 1Eh -> Target E configuration */
622 UCHAR NVM_TargFConfig; /* 1Fh -> Target F configuration */ 582 u8 NVM_TargFConfig; /* 1Fh -> Target F configuration */
623} NVRAM_SCSI; 583} NVRAM_SCSI;
624 584
625typedef struct _NVRAM { 585typedef struct _NVRAM {
626/*----------header ---------------*/ 586/*----------header ---------------*/
627 USHORT NVM_Signature; /* 0,1: Signature */ 587 u16 NVM_Signature; /* 0,1: Signature */
628 UCHAR NVM_Size; /* 2: Size of data structure */ 588 u8 NVM_Size; /* 2: Size of data structure */
629 UCHAR NVM_Revision; /* 3: Revision of data structure */ 589 u8 NVM_Revision; /* 3: Revision of data structure */
630 /* ----Host Adapter Structure ---- */ 590 /* ----Host Adapter Structure ---- */
631 UCHAR NVM_ModelByte0; /* 4: Model number (byte 0) */ 591 u8 NVM_ModelByte0; /* 4: Model number (byte 0) */
632 UCHAR NVM_ModelByte1; /* 5: Model number (byte 1) */ 592 u8 NVM_ModelByte1; /* 5: Model number (byte 1) */
633 UCHAR NVM_ModelInfo; /* 6: Model information */ 593 u8 NVM_ModelInfo; /* 6: Model information */
634 UCHAR NVM_NumOfCh; /* 7: Number of SCSI channel */ 594 u8 NVM_NumOfCh; /* 7: Number of SCSI channel */
635 UCHAR NVM_BIOSConfig1; /* 8: BIOS configuration 1 */ 595 u8 NVM_BIOSConfig1; /* 8: BIOS configuration 1 */
636 UCHAR NVM_BIOSConfig2; /* 9: BIOS configuration 2 */ 596 u8 NVM_BIOSConfig2; /* 9: BIOS configuration 2 */
637 UCHAR NVM_HAConfig1; /* A: Hoat adapter configuration 1 */ 597 u8 NVM_HAConfig1; /* A: Hoat adapter configuration 1 */
638 UCHAR NVM_HAConfig2; /* B: Hoat adapter configuration 2 */ 598 u8 NVM_HAConfig2; /* B: Hoat adapter configuration 2 */
639 NVRAM_SCSI NVM_SCSIInfo[2]; 599 NVRAM_SCSI NVM_SCSIInfo[2];
640 UCHAR NVM_reserved[10]; 600 u8 NVM_reserved[10];
641 /* ---------- CheckSum ---------- */ 601 /* ---------- CheckSum ---------- */
642 USHORT NVM_CheckSum; /* 0x3E, 0x3F: Checksum of NVRam */ 602 u16 NVM_CheckSum; /* 0x3E, 0x3F: Checksum of NVRam */
643} NVRAM, *PNVRAM; 603} NVRAM, *PNVRAM;
644 604
645/* Bios Configuration for nvram->BIOSConfig1 */ 605/* Bios Configuration for nvram->BIOSConfig1 */
@@ -681,19 +641,6 @@ typedef struct _NVRAM {
681#define DISC_ALLOW 0xC0 /* Disconnect is allowed */ 641#define DISC_ALLOW 0xC0 /* Disconnect is allowed */
682#define SCSICMD_RequestSense 0x03 642#define SCSICMD_RequestSense 0x03
683 643
684typedef struct _HCSinfo {
685 ULONG base;
686 UCHAR vec;
687 UCHAR bios; /* High byte of BIOS address */
688 USHORT BaseAndBios; /* high byte: pHcsInfo->bios,low byte:pHcsInfo->base */
689} HCSINFO;
690
691#define TUL_RD(x,y) (UCHAR)(inb( (int)((ULONG)(x+y)) ))
692#define TUL_RDLONG(x,y) (ULONG)(inl((int)((ULONG)(x+y)) ))
693#define TUL_WR( adr,data) outb( (UCHAR)(data), (int)(adr))
694#define TUL_WRSHORT(adr,data) outw( (UWORD)(data), (int)(adr))
695#define TUL_WRLONG( adr,data) outl( (ULONG)(data), (int)(adr))
696
697#define SCSI_ABORT_SNOOZE 0 644#define SCSI_ABORT_SNOOZE 0
698#define SCSI_ABORT_SUCCESS 1 645#define SCSI_ABORT_SUCCESS 1
699#define SCSI_ABORT_PENDING 2 646#define SCSI_ABORT_PENDING 2
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index fa6ff295e568..072f57715658 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -540,32 +540,6 @@ struct ipr_cmnd *ipr_get_free_ipr_cmnd(struct ipr_ioa_cfg *ioa_cfg)
540} 540}
541 541
542/** 542/**
543 * ipr_unmap_sglist - Unmap scatterlist if mapped
544 * @ioa_cfg: ioa config struct
545 * @ipr_cmd: ipr command struct
546 *
547 * Return value:
548 * nothing
549 **/
550static void ipr_unmap_sglist(struct ipr_ioa_cfg *ioa_cfg,
551 struct ipr_cmnd *ipr_cmd)
552{
553 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
554
555 if (ipr_cmd->dma_use_sg) {
556 if (scsi_cmd->use_sg > 0) {
557 pci_unmap_sg(ioa_cfg->pdev, scsi_cmd->request_buffer,
558 scsi_cmd->use_sg,
559 scsi_cmd->sc_data_direction);
560 } else {
561 pci_unmap_single(ioa_cfg->pdev, ipr_cmd->dma_handle,
562 scsi_cmd->request_bufflen,
563 scsi_cmd->sc_data_direction);
564 }
565 }
566}
567
568/**
569 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts 543 * ipr_mask_and_clear_interrupts - Mask all and clear specified interrupts
570 * @ioa_cfg: ioa config struct 544 * @ioa_cfg: ioa config struct
571 * @clr_ints: interrupts to clear 545 * @clr_ints: interrupts to clear
@@ -677,7 +651,7 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
677 651
678 scsi_cmd->result |= (DID_ERROR << 16); 652 scsi_cmd->result |= (DID_ERROR << 16);
679 653
680 ipr_unmap_sglist(ioa_cfg, ipr_cmd); 654 scsi_dma_unmap(ipr_cmd->scsi_cmd);
681 scsi_cmd->scsi_done(scsi_cmd); 655 scsi_cmd->scsi_done(scsi_cmd);
682 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 656 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
683} 657}
@@ -4292,93 +4266,55 @@ static irqreturn_t ipr_isr(int irq, void *devp)
4292static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg, 4266static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
4293 struct ipr_cmnd *ipr_cmd) 4267 struct ipr_cmnd *ipr_cmd)
4294{ 4268{
4295 int i; 4269 int i, nseg;
4296 struct scatterlist *sglist; 4270 struct scatterlist *sg;
4297 u32 length; 4271 u32 length;
4298 u32 ioadl_flags = 0; 4272 u32 ioadl_flags = 0;
4299 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 4273 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4300 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb; 4274 struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
4301 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl; 4275 struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
4302 4276
4303 length = scsi_cmd->request_bufflen; 4277 length = scsi_bufflen(scsi_cmd);
4304 4278 if (!length)
4305 if (length == 0)
4306 return 0; 4279 return 0;
4307 4280
4308 if (scsi_cmd->use_sg) { 4281 nseg = scsi_dma_map(scsi_cmd);
4309 ipr_cmd->dma_use_sg = pci_map_sg(ioa_cfg->pdev, 4282 if (nseg < 0) {
4310 scsi_cmd->request_buffer, 4283 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
4311 scsi_cmd->use_sg, 4284 return -1;
4312 scsi_cmd->sc_data_direction); 4285 }
4313
4314 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4315 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4316 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4317 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4318 ioarcb->write_ioadl_len =
4319 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4320 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4321 ioadl_flags = IPR_IOADL_FLAGS_READ;
4322 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4323 ioarcb->read_ioadl_len =
4324 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4325 }
4326
4327 sglist = scsi_cmd->request_buffer;
4328 4286
4329 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) { 4287 ipr_cmd->dma_use_sg = nseg;
4330 ioadl = ioarcb->add_data.u.ioadl;
4331 ioarcb->write_ioadl_addr =
4332 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4333 offsetof(struct ipr_ioarcb, add_data));
4334 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4335 }
4336 4288
4337 for (i = 0; i < ipr_cmd->dma_use_sg; i++) { 4289 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4338 ioadl[i].flags_and_data_len = 4290 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4339 cpu_to_be32(ioadl_flags | sg_dma_len(&sglist[i])); 4291 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4340 ioadl[i].address = 4292 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4341 cpu_to_be32(sg_dma_address(&sglist[i])); 4293 ioarcb->write_ioadl_len =
4342 } 4294 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4295 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4296 ioadl_flags = IPR_IOADL_FLAGS_READ;
4297 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4298 ioarcb->read_ioadl_len =
4299 cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
4300 }
4343 4301
4344 if (likely(ipr_cmd->dma_use_sg)) { 4302 if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
4345 ioadl[i-1].flags_and_data_len |= 4303 ioadl = ioarcb->add_data.u.ioadl;
4346 cpu_to_be32(IPR_IOADL_FLAGS_LAST); 4304 ioarcb->write_ioadl_addr =
4347 return 0; 4305 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4348 } else 4306 offsetof(struct ipr_ioarcb, add_data));
4349 dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n"); 4307 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4350 } else { 4308 }
4351 if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
4352 ioadl_flags = IPR_IOADL_FLAGS_WRITE;
4353 ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
4354 ioarcb->write_data_transfer_length = cpu_to_be32(length);
4355 ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4356 } else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
4357 ioadl_flags = IPR_IOADL_FLAGS_READ;
4358 ioarcb->read_data_transfer_length = cpu_to_be32(length);
4359 ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
4360 }
4361 4309
4362 ipr_cmd->dma_handle = pci_map_single(ioa_cfg->pdev, 4310 scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
4363 scsi_cmd->request_buffer, length, 4311 ioadl[i].flags_and_data_len =
4364 scsi_cmd->sc_data_direction); 4312 cpu_to_be32(ioadl_flags | sg_dma_len(sg));
4365 4313 ioadl[i].address = cpu_to_be32(sg_dma_address(sg));
4366 if (likely(!pci_dma_mapping_error(ipr_cmd->dma_handle))) {
4367 ioadl = ioarcb->add_data.u.ioadl;
4368 ioarcb->write_ioadl_addr =
4369 cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
4370 offsetof(struct ipr_ioarcb, add_data));
4371 ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
4372 ipr_cmd->dma_use_sg = 1;
4373 ioadl[0].flags_and_data_len =
4374 cpu_to_be32(ioadl_flags | length | IPR_IOADL_FLAGS_LAST);
4375 ioadl[0].address = cpu_to_be32(ipr_cmd->dma_handle);
4376 return 0;
4377 } else
4378 dev_err(&ioa_cfg->pdev->dev, "pci_map_single failed!\n");
4379 } 4314 }
4380 4315
4381 return -1; 4316 ioadl[i-1].flags_and_data_len |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
4317 return 0;
4382} 4318}
4383 4319
4384/** 4320/**
@@ -4441,7 +4377,7 @@ static void ipr_erp_done(struct ipr_cmnd *ipr_cmd)
4441 res->needs_sync_complete = 1; 4377 res->needs_sync_complete = 1;
4442 res->in_erp = 0; 4378 res->in_erp = 0;
4443 } 4379 }
4444 ipr_unmap_sglist(ioa_cfg, ipr_cmd); 4380 scsi_dma_unmap(ipr_cmd->scsi_cmd);
4445 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4381 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4446 scsi_cmd->scsi_done(scsi_cmd); 4382 scsi_cmd->scsi_done(scsi_cmd);
4447} 4383}
@@ -4819,7 +4755,7 @@ static void ipr_erp_start(struct ipr_ioa_cfg *ioa_cfg,
4819 break; 4755 break;
4820 } 4756 }
4821 4757
4822 ipr_unmap_sglist(ioa_cfg, ipr_cmd); 4758 scsi_dma_unmap(ipr_cmd->scsi_cmd);
4823 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4759 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4824 scsi_cmd->scsi_done(scsi_cmd); 4760 scsi_cmd->scsi_done(scsi_cmd);
4825} 4761}
@@ -4840,10 +4776,10 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
4840 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 4776 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
4841 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc); 4777 u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
4842 4778
4843 scsi_cmd->resid = be32_to_cpu(ipr_cmd->ioasa.residual_data_len); 4779 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
4844 4780
4845 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 4781 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
4846 ipr_unmap_sglist(ioa_cfg, ipr_cmd); 4782 scsi_dma_unmap(ipr_cmd->scsi_cmd);
4847 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q); 4783 list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
4848 scsi_cmd->scsi_done(scsi_cmd); 4784 scsi_cmd->scsi_done(scsi_cmd);
4849 } else 4785 } else
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index 8b704f73055a..84f4f5d06f9d 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -211,19 +211,6 @@ module_param(ips, charp, 0);
211#warning "This driver has only been tested on the x86/ia64/x86_64 platforms" 211#warning "This driver has only been tested on the x86/ia64/x86_64 platforms"
212#endif 212#endif
213 213
214#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,0)
215#include <linux/blk.h>
216#include "sd.h"
217#define IPS_LOCK_SAVE(lock,flags) spin_lock_irqsave(&io_request_lock,flags)
218#define IPS_UNLOCK_RESTORE(lock,flags) spin_unlock_irqrestore(&io_request_lock,flags)
219#ifndef __devexit_p
220#define __devexit_p(x) x
221#endif
222#else
223#define IPS_LOCK_SAVE(lock,flags) do{spin_lock(lock);(void)flags;}while(0)
224#define IPS_UNLOCK_RESTORE(lock,flags) do{spin_unlock(lock);(void)flags;}while(0)
225#endif
226
227#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \ 214#define IPS_DMA_DIR(scb) ((!scb->scsi_cmd || ips_is_passthru(scb->scsi_cmd) || \
228 DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \ 215 DMA_NONE == scb->scsi_cmd->sc_data_direction) ? \
229 PCI_DMA_BIDIRECTIONAL : \ 216 PCI_DMA_BIDIRECTIONAL : \
@@ -381,24 +368,13 @@ static struct scsi_host_template ips_driver_template = {
381 .eh_abort_handler = ips_eh_abort, 368 .eh_abort_handler = ips_eh_abort,
382 .eh_host_reset_handler = ips_eh_reset, 369 .eh_host_reset_handler = ips_eh_reset,
383 .proc_name = "ips", 370 .proc_name = "ips",
384#if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)
385 .proc_info = ips_proc_info, 371 .proc_info = ips_proc_info,
386 .slave_configure = ips_slave_configure, 372 .slave_configure = ips_slave_configure,
387#else
388 .proc_info = ips_proc24_info,
389 .select_queue_depths = ips_select_queue_depth,
390#endif
391 .bios_param = ips_biosparam, 373 .bios_param = ips_biosparam,
392 .this_id = -1, 374 .this_id = -1,
393 .sg_tablesize = IPS_MAX_SG, 375 .sg_tablesize = IPS_MAX_SG,
394 .cmd_per_lun = 3, 376 .cmd_per_lun = 3,
395 .use_clustering = ENABLE_CLUSTERING, 377 .use_clustering = ENABLE_CLUSTERING,
396#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
397 .use_new_eh_code = 1,
398#endif
399#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20) && LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
400 .highmem_io = 1,
401#endif
402}; 378};
403 379
404 380
@@ -731,7 +707,7 @@ ips_release(struct Scsi_Host *sh)
731 /* free IRQ */ 707 /* free IRQ */
732 free_irq(ha->irq, ha); 708 free_irq(ha->irq, ha);
733 709
734 IPS_REMOVE_HOST(sh); 710 scsi_remove_host(sh);
735 scsi_host_put(sh); 711 scsi_host_put(sh);
736 712
737 ips_released_controllers++; 713 ips_released_controllers++;
@@ -813,7 +789,6 @@ int ips_eh_abort(struct scsi_cmnd *SC)
813 ips_ha_t *ha; 789 ips_ha_t *ha;
814 ips_copp_wait_item_t *item; 790 ips_copp_wait_item_t *item;
815 int ret; 791 int ret;
816 unsigned long cpu_flags;
817 struct Scsi_Host *host; 792 struct Scsi_Host *host;
818 793
819 METHOD_TRACE("ips_eh_abort", 1); 794 METHOD_TRACE("ips_eh_abort", 1);
@@ -830,7 +805,7 @@ int ips_eh_abort(struct scsi_cmnd *SC)
830 if (!ha->active) 805 if (!ha->active)
831 return (FAILED); 806 return (FAILED);
832 807
833 IPS_LOCK_SAVE(host->host_lock, cpu_flags); 808 spin_lock(host->host_lock);
834 809
835 /* See if the command is on the copp queue */ 810 /* See if the command is on the copp queue */
836 item = ha->copp_waitlist.head; 811 item = ha->copp_waitlist.head;
@@ -851,7 +826,7 @@ int ips_eh_abort(struct scsi_cmnd *SC)
851 ret = (FAILED); 826 ret = (FAILED);
852 } 827 }
853 828
854 IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); 829 spin_unlock(host->host_lock);
855 return ret; 830 return ret;
856} 831}
857 832
@@ -1176,18 +1151,10 @@ static int ips_queue(struct scsi_cmnd *SC, void (*done) (struct scsi_cmnd *))
1176/* Set bios geometry for the controller */ 1151/* Set bios geometry for the controller */
1177/* */ 1152/* */
1178/****************************************************************************/ 1153/****************************************************************************/
1179static int 1154static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1180#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) 1155 sector_t capacity, int geom[])
1181ips_biosparam(Disk * disk, kdev_t dev, int geom[])
1182{
1183 ips_ha_t *ha = (ips_ha_t *) disk->device->host->hostdata;
1184 unsigned long capacity = disk->capacity;
1185#else
1186ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1187 sector_t capacity, int geom[])
1188{ 1156{
1189 ips_ha_t *ha = (ips_ha_t *) sdev->host->hostdata; 1157 ips_ha_t *ha = (ips_ha_t *) sdev->host->hostdata;
1190#endif
1191 int heads; 1158 int heads;
1192 int sectors; 1159 int sectors;
1193 int cylinders; 1160 int cylinders;
@@ -1225,70 +1192,6 @@ ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1225 return (0); 1192 return (0);
1226} 1193}
1227 1194
1228#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
1229
1230/* ips_proc24_info is a wrapper around ips_proc_info *
1231 * for compatibility with the 2.4 scsi parameters */
1232static int
1233ips_proc24_info(char *buffer, char **start, off_t offset, int length,
1234 int hostno, int func)
1235{
1236 int i;
1237
1238 for (i = 0; i < ips_next_controller; i++) {
1239 if (ips_sh[i] && ips_sh[i]->host_no == hostno) {
1240 return ips_proc_info(ips_sh[i], buffer, start,
1241 offset, length, func);
1242 }
1243 }
1244 return -EINVAL;
1245}
1246
1247/****************************************************************************/
1248/* */
1249/* Routine Name: ips_select_queue_depth */
1250/* */
1251/* Routine Description: */
1252/* */
1253/* Select queue depths for the devices on the contoller */
1254/* */
1255/****************************************************************************/
1256static void
1257ips_select_queue_depth(struct Scsi_Host *host, struct scsi_device * scsi_devs)
1258{
1259 struct scsi_device *device;
1260 ips_ha_t *ha;
1261 int count = 0;
1262 int min;
1263
1264 ha = IPS_HA(host);
1265 min = ha->max_cmds / 4;
1266
1267 for (device = scsi_devs; device; device = device->next) {
1268 if (device->host == host) {
1269 if ((device->channel == 0) && (device->type == 0))
1270 count++;
1271 }
1272 }
1273
1274 for (device = scsi_devs; device; device = device->next) {
1275 if (device->host == host) {
1276 if ((device->channel == 0) && (device->type == 0)) {
1277 device->queue_depth =
1278 (ha->max_cmds - 1) / count;
1279 if (device->queue_depth < min)
1280 device->queue_depth = min;
1281 } else {
1282 device->queue_depth = 2;
1283 }
1284
1285 if (device->queue_depth < 2)
1286 device->queue_depth = 2;
1287 }
1288 }
1289}
1290
1291#else
1292/****************************************************************************/ 1195/****************************************************************************/
1293/* */ 1196/* */
1294/* Routine Name: ips_slave_configure */ 1197/* Routine Name: ips_slave_configure */
@@ -1316,7 +1219,6 @@ ips_slave_configure(struct scsi_device * SDptr)
1316 SDptr->skip_ms_page_3f = 1; 1219 SDptr->skip_ms_page_3f = 1;
1317 return 0; 1220 return 0;
1318} 1221}
1319#endif
1320 1222
1321/****************************************************************************/ 1223/****************************************************************************/
1322/* */ 1224/* */
@@ -1331,7 +1233,6 @@ static irqreturn_t
1331do_ipsintr(int irq, void *dev_id) 1233do_ipsintr(int irq, void *dev_id)
1332{ 1234{
1333 ips_ha_t *ha; 1235 ips_ha_t *ha;
1334 unsigned long cpu_flags;
1335 struct Scsi_Host *host; 1236 struct Scsi_Host *host;
1336 int irqstatus; 1237 int irqstatus;
1337 1238
@@ -1347,16 +1248,16 @@ do_ipsintr(int irq, void *dev_id)
1347 return IRQ_HANDLED; 1248 return IRQ_HANDLED;
1348 } 1249 }
1349 1250
1350 IPS_LOCK_SAVE(host->host_lock, cpu_flags); 1251 spin_lock(host->host_lock);
1351 1252
1352 if (!ha->active) { 1253 if (!ha->active) {
1353 IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); 1254 spin_unlock(host->host_lock);
1354 return IRQ_HANDLED; 1255 return IRQ_HANDLED;
1355 } 1256 }
1356 1257
1357 irqstatus = (*ha->func.intr) (ha); 1258 irqstatus = (*ha->func.intr) (ha);
1358 1259
1359 IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); 1260 spin_unlock(host->host_lock);
1360 1261
1361 /* start the next command */ 1262 /* start the next command */
1362 ips_next(ha, IPS_INTR_ON); 1263 ips_next(ha, IPS_INTR_ON);
@@ -2730,7 +2631,6 @@ ips_next(ips_ha_t * ha, int intr)
2730 struct scsi_cmnd *q; 2631 struct scsi_cmnd *q;
2731 ips_copp_wait_item_t *item; 2632 ips_copp_wait_item_t *item;
2732 int ret; 2633 int ret;
2733 unsigned long cpu_flags = 0;
2734 struct Scsi_Host *host; 2634 struct Scsi_Host *host;
2735 METHOD_TRACE("ips_next", 1); 2635 METHOD_TRACE("ips_next", 1);
2736 2636
@@ -2742,7 +2642,7 @@ ips_next(ips_ha_t * ha, int intr)
2742 * this command won't time out 2642 * this command won't time out
2743 */ 2643 */
2744 if (intr == IPS_INTR_ON) 2644 if (intr == IPS_INTR_ON)
2745 IPS_LOCK_SAVE(host->host_lock, cpu_flags); 2645 spin_lock(host->host_lock);
2746 2646
2747 if ((ha->subsys->param[3] & 0x300000) 2647 if ((ha->subsys->param[3] & 0x300000)
2748 && (ha->scb_activelist.count == 0)) { 2648 && (ha->scb_activelist.count == 0)) {
@@ -2769,14 +2669,14 @@ ips_next(ips_ha_t * ha, int intr)
2769 item = ips_removeq_copp_head(&ha->copp_waitlist); 2669 item = ips_removeq_copp_head(&ha->copp_waitlist);
2770 ha->num_ioctl++; 2670 ha->num_ioctl++;
2771 if (intr == IPS_INTR_ON) 2671 if (intr == IPS_INTR_ON)
2772 IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); 2672 spin_unlock(host->host_lock);
2773 scb->scsi_cmd = item->scsi_cmd; 2673 scb->scsi_cmd = item->scsi_cmd;
2774 kfree(item); 2674 kfree(item);
2775 2675
2776 ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr); 2676 ret = ips_make_passthru(ha, scb->scsi_cmd, scb, intr);
2777 2677
2778 if (intr == IPS_INTR_ON) 2678 if (intr == IPS_INTR_ON)
2779 IPS_LOCK_SAVE(host->host_lock, cpu_flags); 2679 spin_lock(host->host_lock);
2780 switch (ret) { 2680 switch (ret) {
2781 case IPS_FAILURE: 2681 case IPS_FAILURE:
2782 if (scb->scsi_cmd) { 2682 if (scb->scsi_cmd) {
@@ -2846,7 +2746,7 @@ ips_next(ips_ha_t * ha, int intr)
2846 SC = ips_removeq_wait(&ha->scb_waitlist, q); 2746 SC = ips_removeq_wait(&ha->scb_waitlist, q);
2847 2747
2848 if (intr == IPS_INTR_ON) 2748 if (intr == IPS_INTR_ON)
2849 IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); /* Unlock HA after command is taken off queue */ 2749 spin_unlock(host->host_lock); /* Unlock HA after command is taken off queue */
2850 2750
2851 SC->result = DID_OK; 2751 SC->result = DID_OK;
2852 SC->host_scribble = NULL; 2752 SC->host_scribble = NULL;
@@ -2919,7 +2819,7 @@ ips_next(ips_ha_t * ha, int intr)
2919 scb->dcdb.transfer_length = 0; 2819 scb->dcdb.transfer_length = 0;
2920 } 2820 }
2921 if (intr == IPS_INTR_ON) 2821 if (intr == IPS_INTR_ON)
2922 IPS_LOCK_SAVE(host->host_lock, cpu_flags); 2822 spin_lock(host->host_lock);
2923 2823
2924 ret = ips_send_cmd(ha, scb); 2824 ret = ips_send_cmd(ha, scb);
2925 2825
@@ -2958,7 +2858,7 @@ ips_next(ips_ha_t * ha, int intr)
2958 } /* end while */ 2858 } /* end while */
2959 2859
2960 if (intr == IPS_INTR_ON) 2860 if (intr == IPS_INTR_ON)
2961 IPS_UNLOCK_RESTORE(host->host_lock, cpu_flags); 2861 spin_unlock(host->host_lock);
2962} 2862}
2963 2863
2964/****************************************************************************/ 2864/****************************************************************************/
@@ -7004,7 +6904,6 @@ ips_register_scsi(int index)
7004 kfree(oldha); 6904 kfree(oldha);
7005 ips_sh[index] = sh; 6905 ips_sh[index] = sh;
7006 ips_ha[index] = ha; 6906 ips_ha[index] = ha;
7007 IPS_SCSI_SET_DEVICE(sh, ha);
7008 6907
7009 /* Store away needed values for later use */ 6908 /* Store away needed values for later use */
7010 sh->io_port = ha->io_addr; 6909 sh->io_port = ha->io_addr;
@@ -7016,17 +6915,16 @@ ips_register_scsi(int index)
7016 sh->cmd_per_lun = sh->hostt->cmd_per_lun; 6915 sh->cmd_per_lun = sh->hostt->cmd_per_lun;
7017 sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma; 6916 sh->unchecked_isa_dma = sh->hostt->unchecked_isa_dma;
7018 sh->use_clustering = sh->hostt->use_clustering; 6917 sh->use_clustering = sh->hostt->use_clustering;
7019
7020#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,7)
7021 sh->max_sectors = 128; 6918 sh->max_sectors = 128;
7022#endif
7023 6919
7024 sh->max_id = ha->ntargets; 6920 sh->max_id = ha->ntargets;
7025 sh->max_lun = ha->nlun; 6921 sh->max_lun = ha->nlun;
7026 sh->max_channel = ha->nbus - 1; 6922 sh->max_channel = ha->nbus - 1;
7027 sh->can_queue = ha->max_cmds - 1; 6923 sh->can_queue = ha->max_cmds - 1;
7028 6924
7029 IPS_ADD_HOST(sh, NULL); 6925 scsi_add_host(sh, NULL);
6926 scsi_scan_host(sh);
6927
7030 return 0; 6928 return 0;
7031} 6929}
7032 6930
@@ -7069,7 +6967,7 @@ ips_module_init(void)
7069 return -ENODEV; 6967 return -ENODEV;
7070 ips_driver_template.module = THIS_MODULE; 6968 ips_driver_template.module = THIS_MODULE;
7071 ips_order_controllers(); 6969 ips_order_controllers();
7072 if (IPS_REGISTER_HOSTS(&ips_driver_template)) { 6970 if (!ips_detect(&ips_driver_template)) {
7073 pci_unregister_driver(&ips_pci_driver); 6971 pci_unregister_driver(&ips_pci_driver);
7074 return -ENODEV; 6972 return -ENODEV;
7075 } 6973 }
@@ -7087,7 +6985,6 @@ ips_module_init(void)
7087static void __exit 6985static void __exit
7088ips_module_exit(void) 6986ips_module_exit(void)
7089{ 6987{
7090 IPS_UNREGISTER_HOSTS(&ips_driver_template);
7091 pci_unregister_driver(&ips_pci_driver); 6988 pci_unregister_driver(&ips_pci_driver);
7092 unregister_reboot_notifier(&ips_notifier); 6989 unregister_reboot_notifier(&ips_notifier);
7093} 6990}
@@ -7443,15 +7340,9 @@ ips_init_phase2(int index)
7443 return SUCCESS; 7340 return SUCCESS;
7444} 7341}
7445 7342
7446#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,9)
7447MODULE_LICENSE("GPL"); 7343MODULE_LICENSE("GPL");
7448#endif
7449
7450MODULE_DESCRIPTION("IBM ServeRAID Adapter Driver " IPS_VER_STRING); 7344MODULE_DESCRIPTION("IBM ServeRAID Adapter Driver " IPS_VER_STRING);
7451
7452#ifdef MODULE_VERSION
7453MODULE_VERSION(IPS_VER_STRING); 7345MODULE_VERSION(IPS_VER_STRING);
7454#endif
7455 7346
7456 7347
7457/* 7348/*
diff --git a/drivers/scsi/ips.h b/drivers/scsi/ips.h
index b726dcc424b1..24123d537c58 100644
--- a/drivers/scsi/ips.h
+++ b/drivers/scsi/ips.h
@@ -58,10 +58,6 @@
58 /* 58 /*
59 * Some handy macros 59 * Some handy macros
60 */ 60 */
61 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,4,20) || defined CONFIG_HIGHIO
62 #define IPS_HIGHIO
63 #endif
64
65 #define IPS_HA(x) ((ips_ha_t *) x->hostdata) 61 #define IPS_HA(x) ((ips_ha_t *) x->hostdata)
66 #define IPS_COMMAND_ID(ha, scb) (int) (scb - ha->scbs) 62 #define IPS_COMMAND_ID(ha, scb) (int) (scb - ha->scbs)
67 #define IPS_IS_TROMBONE(ha) (((ha->device_id == IPS_DEVICEID_COPPERHEAD) && \ 63 #define IPS_IS_TROMBONE(ha) (((ha->device_id == IPS_DEVICEID_COPPERHEAD) && \
@@ -84,38 +80,8 @@
84 #define IPS_SGLIST_SIZE(ha) (IPS_USE_ENH_SGLIST(ha) ? \ 80 #define IPS_SGLIST_SIZE(ha) (IPS_USE_ENH_SGLIST(ha) ? \
85 sizeof(IPS_ENH_SG_LIST) : sizeof(IPS_STD_SG_LIST)) 81 sizeof(IPS_ENH_SG_LIST) : sizeof(IPS_STD_SG_LIST))
86 82
87 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,4,4) 83 #define IPS_PRINTK(level, pcidev, format, arg...) \
88 #define pci_set_dma_mask(dev,mask) ( mask > 0xffffffff ? 1:0 )
89 #define scsi_set_pci_device(sh,dev) (0)
90 #endif
91
92 #ifndef IRQ_NONE
93 typedef void irqreturn_t;
94 #define IRQ_NONE
95 #define IRQ_HANDLED
96 #define IRQ_RETVAL(x)
97 #endif
98
99 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
100 #define IPS_REGISTER_HOSTS(SHT) scsi_register_module(MODULE_SCSI_HA,SHT)
101 #define IPS_UNREGISTER_HOSTS(SHT) scsi_unregister_module(MODULE_SCSI_HA,SHT)
102 #define IPS_ADD_HOST(shost,device)
103 #define IPS_REMOVE_HOST(shost)
104 #define IPS_SCSI_SET_DEVICE(sh,ha) scsi_set_pci_device(sh, (ha)->pcidev)
105 #define IPS_PRINTK(level, pcidev, format, arg...) \
106 printk(level "%s %s:" format , "ips" , \
107 (pcidev)->slot_name , ## arg)
108 #define scsi_host_alloc(sh,size) scsi_register(sh,size)
109 #define scsi_host_put(sh) scsi_unregister(sh)
110 #else
111 #define IPS_REGISTER_HOSTS(SHT) (!ips_detect(SHT))
112 #define IPS_UNREGISTER_HOSTS(SHT)
113 #define IPS_ADD_HOST(shost,device) do { scsi_add_host(shost,device); scsi_scan_host(shost); } while (0)
114 #define IPS_REMOVE_HOST(shost) scsi_remove_host(shost)
115 #define IPS_SCSI_SET_DEVICE(sh,ha) do { } while (0)
116 #define IPS_PRINTK(level, pcidev, format, arg...) \
117 dev_printk(level , &((pcidev)->dev) , format , ## arg) 84 dev_printk(level , &((pcidev)->dev) , format , ## arg)
118 #endif
119 85
120 #define MDELAY(n) \ 86 #define MDELAY(n) \
121 do { \ 87 do { \
@@ -134,7 +100,7 @@
134 #define pci_dma_hi32(a) ((a >> 16) >> 16) 100 #define pci_dma_hi32(a) ((a >> 16) >> 16)
135 #define pci_dma_lo32(a) (a & 0xffffffff) 101 #define pci_dma_lo32(a) (a & 0xffffffff)
136 102
137 #if (BITS_PER_LONG > 32) || (defined CONFIG_HIGHMEM64G && defined IPS_HIGHIO) 103 #if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
138 #define IPS_ENABLE_DMA64 (1) 104 #define IPS_ENABLE_DMA64 (1)
139 #else 105 #else
140 #define IPS_ENABLE_DMA64 (0) 106 #define IPS_ENABLE_DMA64 (0)
@@ -451,16 +417,10 @@
451 /* 417 /*
452 * Scsi_Host Template 418 * Scsi_Host Template
453 */ 419 */
454#if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0)
455 static int ips_proc24_info(char *, char **, off_t, int, int, int);
456 static void ips_select_queue_depth(struct Scsi_Host *, struct scsi_device *);
457 static int ips_biosparam(Disk *disk, kdev_t dev, int geom[]);
458#else
459 static int ips_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int); 420 static int ips_proc_info(struct Scsi_Host *, char *, char **, off_t, int, int);
460 static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev, 421 static int ips_biosparam(struct scsi_device *sdev, struct block_device *bdev,
461 sector_t capacity, int geom[]); 422 sector_t capacity, int geom[]);
462 static int ips_slave_configure(struct scsi_device *SDptr); 423 static int ips_slave_configure(struct scsi_device *SDptr);
463#endif
464 424
465/* 425/*
466 * Raid Command Formats 426 * Raid Command Formats
diff --git a/drivers/scsi/jazz_esp.c b/drivers/scsi/jazz_esp.c
index 81e497d9eae0..0140766c0286 100644
--- a/drivers/scsi/jazz_esp.c
+++ b/drivers/scsi/jazz_esp.c
@@ -1,6 +1,6 @@
1/* jazz_esp.c: ESP front-end for MIPS JAZZ systems. 1/* jazz_esp.c: ESP front-end for MIPS JAZZ systems.
2 * 2 *
3 * Copyright (C) 2007 Thomas Bogendörfer (tsbogend@alpha.frankende) 3 * Copyright (C) 2007 Thomas Bogendörfer (tsbogend@alpha.frankende)
4 */ 4 */
5 5
6#include <linux/kernel.h> 6#include <linux/kernel.h>
diff --git a/drivers/scsi/libsas/sas_scsi_host.c b/drivers/scsi/libsas/sas_scsi_host.c
index b4b52694497c..d70ddfda93fc 100644
--- a/drivers/scsi/libsas/sas_scsi_host.c
+++ b/drivers/scsi/libsas/sas_scsi_host.c
@@ -76,8 +76,8 @@ static void sas_scsi_task_done(struct sas_task *task)
76 hs = DID_NO_CONNECT; 76 hs = DID_NO_CONNECT;
77 break; 77 break;
78 case SAS_DATA_UNDERRUN: 78 case SAS_DATA_UNDERRUN:
79 sc->resid = ts->residual; 79 scsi_set_resid(sc, ts->residual);
80 if (sc->request_bufflen - sc->resid < sc->underflow) 80 if (scsi_bufflen(sc) - scsi_get_resid(sc) < sc->underflow)
81 hs = DID_ERROR; 81 hs = DID_ERROR;
82 break; 82 break;
83 case SAS_DATA_OVERRUN: 83 case SAS_DATA_OVERRUN:
@@ -161,9 +161,9 @@ static struct sas_task *sas_create_task(struct scsi_cmnd *cmd,
161 task->ssp_task.task_attr = sas_scsi_get_task_attr(cmd); 161 task->ssp_task.task_attr = sas_scsi_get_task_attr(cmd);
162 memcpy(task->ssp_task.cdb, cmd->cmnd, 16); 162 memcpy(task->ssp_task.cdb, cmd->cmnd, 16);
163 163
164 task->scatter = cmd->request_buffer; 164 task->scatter = scsi_sglist(cmd);
165 task->num_scatter = cmd->use_sg; 165 task->num_scatter = scsi_sg_count(cmd);
166 task->total_xfer_len = cmd->request_bufflen; 166 task->total_xfer_len = scsi_bufflen(cmd);
167 task->data_dir = cmd->sc_data_direction; 167 task->data_dir = cmd->sc_data_direction;
168 168
169 task->task_done = sas_scsi_task_done; 169 task->task_done = sas_scsi_task_done;
diff --git a/drivers/scsi/mac53c94.c b/drivers/scsi/mac53c94.c
index 5806ede120a4..b12ad7c7c673 100644
--- a/drivers/scsi/mac53c94.c
+++ b/drivers/scsi/mac53c94.c
@@ -77,7 +77,7 @@ static int mac53c94_queue(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *
77 for (i = 0; i < cmd->cmd_len; ++i) 77 for (i = 0; i < cmd->cmd_len; ++i)
78 printk(" %.2x", cmd->cmnd[i]); 78 printk(" %.2x", cmd->cmnd[i]);
79 printk("\n" KERN_DEBUG "use_sg=%d request_bufflen=%d request_buffer=%p\n", 79 printk("\n" KERN_DEBUG "use_sg=%d request_bufflen=%d request_buffer=%p\n",
80 cmd->use_sg, cmd->request_bufflen, cmd->request_buffer); 80 scsi_sg_count(cmd), scsi_bufflen(cmd), scsi_sglist(cmd));
81 } 81 }
82#endif 82#endif
83 83
@@ -173,8 +173,7 @@ static void mac53c94_start(struct fsc_state *state)
173 writeb(CMD_SELECT, &regs->command); 173 writeb(CMD_SELECT, &regs->command);
174 state->phase = selecting; 174 state->phase = selecting;
175 175
176 if (cmd->use_sg > 0 || cmd->request_bufflen != 0) 176 set_dma_cmds(state, cmd);
177 set_dma_cmds(state, cmd);
178} 177}
179 178
180static irqreturn_t do_mac53c94_interrupt(int irq, void *dev_id) 179static irqreturn_t do_mac53c94_interrupt(int irq, void *dev_id)
@@ -262,7 +261,7 @@ static void mac53c94_interrupt(int irq, void *dev_id)
262 writeb(CMD_NOP, &regs->command); 261 writeb(CMD_NOP, &regs->command);
263 /* set DMA controller going if any data to transfer */ 262 /* set DMA controller going if any data to transfer */
264 if ((stat & (STAT_MSG|STAT_CD)) == 0 263 if ((stat & (STAT_MSG|STAT_CD)) == 0
265 && (cmd->use_sg > 0 || cmd->request_bufflen != 0)) { 264 && (scsi_sg_count(cmd) > 0 || scsi_bufflen(cmd))) {
266 nb = cmd->SCp.this_residual; 265 nb = cmd->SCp.this_residual;
267 if (nb > 0xfff0) 266 if (nb > 0xfff0)
268 nb = 0xfff0; 267 nb = 0xfff0;
@@ -310,14 +309,7 @@ static void mac53c94_interrupt(int irq, void *dev_id)
310 printk(KERN_DEBUG "intr %x before data xfer complete\n", intr); 309 printk(KERN_DEBUG "intr %x before data xfer complete\n", intr);
311 } 310 }
312 writel(RUN << 16, &dma->control); /* stop dma */ 311 writel(RUN << 16, &dma->control); /* stop dma */
313 if (cmd->use_sg != 0) { 312 scsi_dma_unmap(cmd);
314 pci_unmap_sg(state->pdev,
315 (struct scatterlist *)cmd->request_buffer,
316 cmd->use_sg, cmd->sc_data_direction);
317 } else {
318 pci_unmap_single(state->pdev, state->dma_addr,
319 cmd->request_bufflen, cmd->sc_data_direction);
320 }
321 /* should check dma status */ 313 /* should check dma status */
322 writeb(CMD_I_COMPLETE, &regs->command); 314 writeb(CMD_I_COMPLETE, &regs->command);
323 state->phase = completing; 315 state->phase = completing;
@@ -365,47 +357,35 @@ static void cmd_done(struct fsc_state *state, int result)
365 */ 357 */
366static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd) 358static void set_dma_cmds(struct fsc_state *state, struct scsi_cmnd *cmd)
367{ 359{
368 int i, dma_cmd, total; 360 int i, dma_cmd, total, nseg;
369 struct scatterlist *scl; 361 struct scatterlist *scl;
370 struct dbdma_cmd *dcmds; 362 struct dbdma_cmd *dcmds;
371 dma_addr_t dma_addr; 363 dma_addr_t dma_addr;
372 u32 dma_len; 364 u32 dma_len;
373 365
366 nseg = scsi_dma_map(cmd);
367 BUG_ON(nseg < 0);
368 if (!nseg)
369 return;
370
374 dma_cmd = cmd->sc_data_direction == DMA_TO_DEVICE ? 371 dma_cmd = cmd->sc_data_direction == DMA_TO_DEVICE ?
375 OUTPUT_MORE : INPUT_MORE; 372 OUTPUT_MORE : INPUT_MORE;
376 dcmds = state->dma_cmds; 373 dcmds = state->dma_cmds;
377 if (cmd->use_sg > 0) { 374 total = 0;
378 int nseg; 375
379 376 scsi_for_each_sg(cmd, scl, nseg, i) {
380 total = 0; 377 dma_addr = sg_dma_address(scl);
381 scl = (struct scatterlist *) cmd->request_buffer; 378 dma_len = sg_dma_len(scl);
382 nseg = pci_map_sg(state->pdev, scl, cmd->use_sg, 379 if (dma_len > 0xffff)
383 cmd->sc_data_direction); 380 panic("mac53c94: scatterlist element >= 64k");
384 for (i = 0; i < nseg; ++i) { 381 total += dma_len;
385 dma_addr = sg_dma_address(scl); 382 st_le16(&dcmds->req_count, dma_len);
386 dma_len = sg_dma_len(scl); 383 st_le16(&dcmds->command, dma_cmd);
387 if (dma_len > 0xffff)
388 panic("mac53c94: scatterlist element >= 64k");
389 total += dma_len;
390 st_le16(&dcmds->req_count, dma_len);
391 st_le16(&dcmds->command, dma_cmd);
392 st_le32(&dcmds->phy_addr, dma_addr);
393 dcmds->xfer_status = 0;
394 ++scl;
395 ++dcmds;
396 }
397 } else {
398 total = cmd->request_bufflen;
399 if (total > 0xffff)
400 panic("mac53c94: transfer size >= 64k");
401 dma_addr = pci_map_single(state->pdev, cmd->request_buffer,
402 total, cmd->sc_data_direction);
403 state->dma_addr = dma_addr;
404 st_le16(&dcmds->req_count, total);
405 st_le32(&dcmds->phy_addr, dma_addr); 384 st_le32(&dcmds->phy_addr, dma_addr);
406 dcmds->xfer_status = 0; 385 dcmds->xfer_status = 0;
407 ++dcmds; 386 ++dcmds;
408 } 387 }
388
409 dma_cmd += OUTPUT_LAST - OUTPUT_MORE; 389 dma_cmd += OUTPUT_LAST - OUTPUT_MORE;
410 st_le16(&dcmds[-1].command, dma_cmd); 390 st_le16(&dcmds[-1].command, dma_cmd);
411 st_le16(&dcmds->command, DBDMA_STOP); 391 st_le16(&dcmds->command, DBDMA_STOP);
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 3cce75d70263..40ee07dab450 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -3571,7 +3571,7 @@ megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
3571 /* 3571 /*
3572 * The user passthru structure 3572 * The user passthru structure
3573 */ 3573 */
3574 upthru = (mega_passthru __user *)MBOX(uioc)->xferaddr; 3574 upthru = (mega_passthru __user *)(unsigned long)MBOX(uioc)->xferaddr;
3575 3575
3576 /* 3576 /*
3577 * Copy in the user passthru here. 3577 * Copy in the user passthru here.
@@ -3623,7 +3623,7 @@ megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
3623 /* 3623 /*
3624 * Get the user data 3624 * Get the user data
3625 */ 3625 */
3626 if( copy_from_user(data, (char __user *)uxferaddr, 3626 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
3627 pthru->dataxferlen) ) { 3627 pthru->dataxferlen) ) {
3628 rval = (-EFAULT); 3628 rval = (-EFAULT);
3629 goto freemem_and_return; 3629 goto freemem_and_return;
@@ -3649,7 +3649,7 @@ megadev_ioctl(struct inode *inode, struct file *filep, unsigned int cmd,
3649 * Is data going up-stream 3649 * Is data going up-stream
3650 */ 3650 */
3651 if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) { 3651 if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) {
3652 if( copy_to_user((char __user *)uxferaddr, data, 3652 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
3653 pthru->dataxferlen) ) { 3653 pthru->dataxferlen) ) {
3654 rval = (-EFAULT); 3654 rval = (-EFAULT);
3655 } 3655 }
@@ -3702,7 +3702,7 @@ freemem_and_return:
3702 /* 3702 /*
3703 * Get the user data 3703 * Get the user data
3704 */ 3704 */
3705 if( copy_from_user(data, (char __user *)uxferaddr, 3705 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
3706 uioc.xferlen) ) { 3706 uioc.xferlen) ) {
3707 3707
3708 pci_free_consistent(pdev, 3708 pci_free_consistent(pdev,
@@ -3742,7 +3742,7 @@ freemem_and_return:
3742 * Is data going up-stream 3742 * Is data going up-stream
3743 */ 3743 */
3744 if( uioc.xferlen && (uioc.flags & UIOC_RD) ) { 3744 if( uioc.xferlen && (uioc.flags & UIOC_RD) ) {
3745 if( copy_to_user((char __user *)uxferaddr, data, 3745 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
3746 uioc.xferlen) ) { 3746 uioc.xferlen) ) {
3747 3747
3748 rval = (-EFAULT); 3748 rval = (-EFAULT);
diff --git a/drivers/scsi/mvme16x.c b/drivers/scsi/mvme16x.c
deleted file mode 100644
index 575fe6f7e0ec..000000000000
--- a/drivers/scsi/mvme16x.c
+++ /dev/null
@@ -1,78 +0,0 @@
1/*
2 * Detection routine for the NCR53c710 based MVME16x SCSI Controllers for Linux.
3 *
4 * Based on work by Alan Hourihane
5 */
6#include <linux/types.h>
7#include <linux/mm.h>
8#include <linux/blkdev.h>
9
10#include <asm/page.h>
11#include <asm/pgtable.h>
12#include <asm/mvme16xhw.h>
13#include <asm/irq.h>
14
15#include "scsi.h"
16#include <scsi/scsi_host.h>
17#include "53c7xx.h"
18#include "mvme16x.h"
19
20#include<linux/stat.h>
21
22
23int mvme16x_scsi_detect(struct scsi_host_template *tpnt)
24{
25 static unsigned char called = 0;
26 int clock;
27 long long options;
28
29 if (!MACH_IS_MVME16x)
30 return 0;
31 if (mvme16x_config & MVME16x_CONFIG_NO_SCSICHIP) {
32 printk ("SCSI detection disabled, SCSI chip not present\n");
33 return 0;
34 }
35 if (called)
36 return 0;
37
38 tpnt->proc_name = "MVME16x";
39
40 options = OPTION_MEMORY_MAPPED|OPTION_DEBUG_TEST1|OPTION_INTFLY|OPTION_SYNCHRONOUS|OPTION_ALWAYS_SYNCHRONOUS|OPTION_DISCONNECT;
41
42 clock = 66000000; /* 66MHz SCSI Clock */
43
44 ncr53c7xx_init(tpnt, 0, 710, (unsigned long)0xfff47000,
45 0, MVME16x_IRQ_SCSI, DMA_NONE,
46 options, clock);
47 called = 1;
48 return 1;
49}
50
51static int mvme16x_scsi_release(struct Scsi_Host *shost)
52{
53 if (shost->irq)
54 free_irq(shost->irq, NULL);
55 if (shost->dma_channel != 0xff)
56 free_dma(shost->dma_channel);
57 if (shost->io_port && shost->n_io_port)
58 release_region(shost->io_port, shost->n_io_port);
59 scsi_unregister(shost);
60 return 0;
61}
62
63static struct scsi_host_template driver_template = {
64 .name = "MVME16x NCR53c710 SCSI",
65 .detect = mvme16x_scsi_detect,
66 .release = mvme16x_scsi_release,
67 .queuecommand = NCR53c7xx_queue_command,
68 .abort = NCR53c7xx_abort,
69 .reset = NCR53c7xx_reset,
70 .can_queue = 24,
71 .this_id = 7,
72 .sg_tablesize = 63,
73 .cmd_per_lun = 3,
74 .use_clustering = DISABLE_CLUSTERING
75};
76
77
78#include "scsi_module.c"
diff --git a/drivers/scsi/mvme16x.h b/drivers/scsi/mvme16x.h
deleted file mode 100644
index 73e33b37a3f8..000000000000
--- a/drivers/scsi/mvme16x.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef MVME16x_SCSI_H
2#define MVME16x_SCSI_H
3
4#include <linux/types.h>
5
6int mvme16x_scsi_detect(struct scsi_host_template *);
7const char *NCR53c7x0_info(void);
8int NCR53c7xx_queue_command(Scsi_Cmnd *, void (*done)(Scsi_Cmnd *));
9int NCR53c7xx_abort(Scsi_Cmnd *);
10int NCR53c7x0_release (struct Scsi_Host *);
11int NCR53c7xx_reset(Scsi_Cmnd *, unsigned int);
12void NCR53c7x0_intr(int irq, void *dev_id);
13
14#ifndef CMD_PER_LUN
15#define CMD_PER_LUN 3
16#endif
17
18#ifndef CAN_QUEUE
19#define CAN_QUEUE 24
20#endif
21
22#include <scsi/scsicam.h>
23
24#endif /* MVME16x_SCSI_H */
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index f6f561d26bf0..8cc9e64bbdff 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -49,10 +49,6 @@
49#include <scsi/scsi_host.h> 49#include <scsi/scsi_host.h>
50#include <scsi/scsi_ioctl.h> 50#include <scsi/scsi_ioctl.h>
51 51
52#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0))
53# include <linux/blk.h>
54#endif
55
56#include "nsp32.h" 52#include "nsp32.h"
57 53
58 54
@@ -199,17 +195,9 @@ static int __init init_nsp32 (void);
199static void __exit exit_nsp32 (void); 195static void __exit exit_nsp32 (void);
200 196
201/* struct struct scsi_host_template */ 197/* struct struct scsi_host_template */
202#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
203static int nsp32_proc_info (struct Scsi_Host *, char *, char **, off_t, int, int); 198static int nsp32_proc_info (struct Scsi_Host *, char *, char **, off_t, int, int);
204#else
205static int nsp32_proc_info (char *, char **, off_t, int, int, int);
206#endif
207 199
208#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
209static int nsp32_detect (struct pci_dev *pdev); 200static int nsp32_detect (struct pci_dev *pdev);
210#else
211static int nsp32_detect (struct scsi_host_template *);
212#endif
213static int nsp32_queuecommand(struct scsi_cmnd *, 201static int nsp32_queuecommand(struct scsi_cmnd *,
214 void (*done)(struct scsi_cmnd *)); 202 void (*done)(struct scsi_cmnd *));
215static const char *nsp32_info (struct Scsi_Host *); 203static const char *nsp32_info (struct Scsi_Host *);
@@ -296,15 +284,7 @@ static struct scsi_host_template nsp32_template = {
296 .eh_abort_handler = nsp32_eh_abort, 284 .eh_abort_handler = nsp32_eh_abort,
297 .eh_bus_reset_handler = nsp32_eh_bus_reset, 285 .eh_bus_reset_handler = nsp32_eh_bus_reset,
298 .eh_host_reset_handler = nsp32_eh_host_reset, 286 .eh_host_reset_handler = nsp32_eh_host_reset,
299#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,74))
300 .detect = nsp32_detect,
301 .release = nsp32_release,
302#endif
303#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,2))
304 .use_new_eh_code = 1,
305#else
306/* .highmem_io = 1, */ 287/* .highmem_io = 1, */
307#endif
308}; 288};
309 289
310#include "nsp32_io.h" 290#include "nsp32_io.h"
@@ -739,7 +719,7 @@ static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
739 command = 0; 719 command = 0;
740 command |= (TRANSFER_GO | ALL_COUNTER_CLR); 720 command |= (TRANSFER_GO | ALL_COUNTER_CLR);
741 if (data->trans_method & NSP32_TRANSFER_BUSMASTER) { 721 if (data->trans_method & NSP32_TRANSFER_BUSMASTER) {
742 if (SCpnt->request_bufflen > 0) { 722 if (scsi_bufflen(SCpnt) > 0) {
743 command |= BM_START; 723 command |= BM_START;
744 } 724 }
745 } else if (data->trans_method & NSP32_TRANSFER_MMIO) { 725 } else if (data->trans_method & NSP32_TRANSFER_MMIO) {
@@ -888,31 +868,28 @@ static int nsp32_reselection(struct scsi_cmnd *SCpnt, unsigned char newlun)
888static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt) 868static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
889{ 869{
890 nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; 870 nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
891 struct scatterlist *sgl; 871 struct scatterlist *sg;
892 nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt; 872 nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt;
893 int num, i; 873 int num, i;
894 u32_le l; 874 u32_le l;
895 875
896 if (SCpnt->request_bufflen == 0) {
897 return TRUE;
898 }
899
900 if (sgt == NULL) { 876 if (sgt == NULL) {
901 nsp32_dbg(NSP32_DEBUG_SGLIST, "SGT == null"); 877 nsp32_dbg(NSP32_DEBUG_SGLIST, "SGT == null");
902 return FALSE; 878 return FALSE;
903 } 879 }
904 880
905 if (SCpnt->use_sg) { 881 num = scsi_dma_map(SCpnt);
906 sgl = (struct scatterlist *)SCpnt->request_buffer; 882 if (!num)
907 num = pci_map_sg(data->Pci, sgl, SCpnt->use_sg, 883 return TRUE;
908 SCpnt->sc_data_direction); 884 else if (num < 0)
909 for (i = 0; i < num; i++) { 885 return FALSE;
886 else {
887 scsi_for_each_sg(SCpnt, sg, num, i) {
910 /* 888 /*
911 * Build nsp32_sglist, substitute sg dma addresses. 889 * Build nsp32_sglist, substitute sg dma addresses.
912 */ 890 */
913 sgt[i].addr = cpu_to_le32(sg_dma_address(sgl)); 891 sgt[i].addr = cpu_to_le32(sg_dma_address(sg));
914 sgt[i].len = cpu_to_le32(sg_dma_len(sgl)); 892 sgt[i].len = cpu_to_le32(sg_dma_len(sg));
915 sgl++;
916 893
917 if (le32_to_cpu(sgt[i].len) > 0x10000) { 894 if (le32_to_cpu(sgt[i].len) > 0x10000) {
918 nsp32_msg(KERN_ERR, 895 nsp32_msg(KERN_ERR,
@@ -929,23 +906,6 @@ static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
929 /* set end mark */ 906 /* set end mark */
930 l = le32_to_cpu(sgt[num-1].len); 907 l = le32_to_cpu(sgt[num-1].len);
931 sgt[num-1].len = cpu_to_le32(l | SGTEND); 908 sgt[num-1].len = cpu_to_le32(l | SGTEND);
932
933 } else {
934 SCpnt->SCp.have_data_in = pci_map_single(data->Pci,
935 SCpnt->request_buffer, SCpnt->request_bufflen,
936 SCpnt->sc_data_direction);
937
938 sgt[0].addr = cpu_to_le32(SCpnt->SCp.have_data_in);
939 sgt[0].len = cpu_to_le32(SCpnt->request_bufflen | SGTEND); /* set end mark */
940
941 if (SCpnt->request_bufflen > 0x10000) {
942 nsp32_msg(KERN_ERR,
943 "can't transfer over 64KB at a time, size=0x%lx", SCpnt->request_bufflen);
944 return FALSE;
945 }
946 nsp32_dbg(NSP32_DEBUG_SGLIST, "single : addr 0x%lx len=0x%lx",
947 le32_to_cpu(sgt[0].addr),
948 le32_to_cpu(sgt[0].len ));
949 } 909 }
950 910
951 return TRUE; 911 return TRUE;
@@ -962,7 +922,7 @@ static int nsp32_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
962 "enter. target: 0x%x LUN: 0x%x cmnd: 0x%x cmndlen: 0x%x " 922 "enter. target: 0x%x LUN: 0x%x cmnd: 0x%x cmndlen: 0x%x "
963 "use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x", 923 "use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x",
964 SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len, 924 SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len,
965 SCpnt->use_sg, SCpnt->request_buffer, SCpnt->request_bufflen); 925 scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt));
966 926
967 if (data->CurrentSC != NULL) { 927 if (data->CurrentSC != NULL) {
968 nsp32_msg(KERN_ERR, "Currentsc != NULL. Cancel this command request"); 928 nsp32_msg(KERN_ERR, "Currentsc != NULL. Cancel this command request");
@@ -994,10 +954,10 @@ static int nsp32_queuecommand(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
994 data->CurrentSC = SCpnt; 954 data->CurrentSC = SCpnt;
995 SCpnt->SCp.Status = CHECK_CONDITION; 955 SCpnt->SCp.Status = CHECK_CONDITION;
996 SCpnt->SCp.Message = 0; 956 SCpnt->SCp.Message = 0;
997 SCpnt->resid = SCpnt->request_bufflen; 957 scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
998 958
999 SCpnt->SCp.ptr = (char *) SCpnt->request_buffer; 959 SCpnt->SCp.ptr = (char *)scsi_sglist(SCpnt);
1000 SCpnt->SCp.this_residual = SCpnt->request_bufflen; 960 SCpnt->SCp.this_residual = scsi_bufflen(SCpnt);
1001 SCpnt->SCp.buffer = NULL; 961 SCpnt->SCp.buffer = NULL;
1002 SCpnt->SCp.buffers_residual = 0; 962 SCpnt->SCp.buffers_residual = 0;
1003 963
@@ -1210,13 +1170,9 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
1210 unsigned long flags; 1170 unsigned long flags;
1211 int ret; 1171 int ret;
1212 int handled = 0; 1172 int handled = 0;
1213
1214#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
1215 struct Scsi_Host *host = data->Host; 1173 struct Scsi_Host *host = data->Host;
1174
1216 spin_lock_irqsave(host->host_lock, flags); 1175 spin_lock_irqsave(host->host_lock, flags);
1217#else
1218 spin_lock_irqsave(&io_request_lock, flags);
1219#endif
1220 1176
1221 /* 1177 /*
1222 * IRQ check, then enable IRQ mask 1178 * IRQ check, then enable IRQ mask
@@ -1312,7 +1268,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
1312 } 1268 }
1313 1269
1314 if ((auto_stat & DATA_IN_PHASE) && 1270 if ((auto_stat & DATA_IN_PHASE) &&
1315 (SCpnt->resid > 0) && 1271 (scsi_get_resid(SCpnt) > 0) &&
1316 ((nsp32_read2(base, FIFO_REST_CNT) & FIFO_REST_MASK) != 0)) { 1272 ((nsp32_read2(base, FIFO_REST_CNT) & FIFO_REST_MASK) != 0)) {
1317 printk( "auto+fifo\n"); 1273 printk( "auto+fifo\n");
1318 //nsp32_pio_read(SCpnt); 1274 //nsp32_pio_read(SCpnt);
@@ -1333,7 +1289,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
1333 nsp32_dbg(NSP32_DEBUG_INTR, "SSACK=0x%lx", 1289 nsp32_dbg(NSP32_DEBUG_INTR, "SSACK=0x%lx",
1334 nsp32_read4(base, SAVED_SACK_CNT)); 1290 nsp32_read4(base, SAVED_SACK_CNT));
1335 1291
1336 SCpnt->resid = 0; /* all data transfered! */ 1292 scsi_set_resid(SCpnt, 0); /* all data transfered! */
1337 } 1293 }
1338 1294
1339 /* 1295 /*
@@ -1480,11 +1436,7 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
1480 nsp32_write2(base, IRQ_CONTROL, 0); 1436 nsp32_write2(base, IRQ_CONTROL, 0);
1481 1437
1482 out2: 1438 out2:
1483#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
1484 spin_unlock_irqrestore(host->host_lock, flags); 1439 spin_unlock_irqrestore(host->host_lock, flags);
1485#else
1486 spin_unlock_irqrestore(&io_request_lock, flags);
1487#endif
1488 1440
1489 nsp32_dbg(NSP32_DEBUG_INTR, "exit"); 1441 nsp32_dbg(NSP32_DEBUG_INTR, "exit");
1490 1442
@@ -1499,28 +1451,15 @@ static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
1499 nsp32_dbg(NSP32_DEBUG_PROC, "buffer=0x%p pos=0x%p length=%d %d\n", buffer, pos, length, length - (pos - buffer));\ 1451 nsp32_dbg(NSP32_DEBUG_PROC, "buffer=0x%p pos=0x%p length=%d %d\n", buffer, pos, length, length - (pos - buffer));\
1500 } \ 1452 } \
1501 } while(0) 1453 } while(0)
1502static int nsp32_proc_info( 1454
1503#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73)) 1455static int nsp32_proc_info(struct Scsi_Host *host, char *buffer, char **start,
1504 struct Scsi_Host *host, 1456 off_t offset, int length, int inout)
1505#endif
1506 char *buffer,
1507 char **start,
1508 off_t offset,
1509 int length,
1510#if !(LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
1511 int hostno,
1512#endif
1513 int inout)
1514{ 1457{
1515 char *pos = buffer; 1458 char *pos = buffer;
1516 int thislength; 1459 int thislength;
1517 unsigned long flags; 1460 unsigned long flags;
1518 nsp32_hw_data *data; 1461 nsp32_hw_data *data;
1519#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
1520 int hostno; 1462 int hostno;
1521#else
1522 struct Scsi_Host *host;
1523#endif
1524 unsigned int base; 1463 unsigned int base;
1525 unsigned char mode_reg; 1464 unsigned char mode_reg;
1526 int id, speed; 1465 int id, speed;
@@ -1531,15 +1470,7 @@ static int nsp32_proc_info(
1531 return -EINVAL; 1470 return -EINVAL;
1532 } 1471 }
1533 1472
1534#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
1535 hostno = host->host_no; 1473 hostno = host->host_no;
1536#else
1537 /* search this HBA host */
1538 host = scsi_host_hn_get(hostno);
1539 if (host == NULL) {
1540 return -ESRCH;
1541 }
1542#endif
1543 data = (nsp32_hw_data *)host->hostdata; 1474 data = (nsp32_hw_data *)host->hostdata;
1544 base = host->io_port; 1475 base = host->io_port;
1545 1476
@@ -1626,25 +1557,8 @@ static void nsp32_scsi_done(struct scsi_cmnd *SCpnt)
1626 nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata; 1557 nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
1627 unsigned int base = SCpnt->device->host->io_port; 1558 unsigned int base = SCpnt->device->host->io_port;
1628 1559
1629 /* 1560 scsi_dma_unmap(SCpnt);
1630 * unmap pci
1631 */
1632 if (SCpnt->request_bufflen == 0) {
1633 goto skip;
1634 }
1635 1561
1636 if (SCpnt->use_sg) {
1637 pci_unmap_sg(data->Pci,
1638 (struct scatterlist *)SCpnt->request_buffer,
1639 SCpnt->use_sg, SCpnt->sc_data_direction);
1640 } else {
1641 pci_unmap_single(data->Pci,
1642 (u32)SCpnt->SCp.have_data_in,
1643 SCpnt->request_bufflen,
1644 SCpnt->sc_data_direction);
1645 }
1646
1647 skip:
1648 /* 1562 /*
1649 * clear TRANSFERCONTROL_BM_START 1563 * clear TRANSFERCONTROL_BM_START
1650 */ 1564 */
@@ -1800,7 +1714,7 @@ static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
1800 SCpnt->SCp.Message = 0; 1714 SCpnt->SCp.Message = 0;
1801 nsp32_dbg(NSP32_DEBUG_BUSFREE, 1715 nsp32_dbg(NSP32_DEBUG_BUSFREE,
1802 "normal end stat=0x%x resid=0x%x\n", 1716 "normal end stat=0x%x resid=0x%x\n",
1803 SCpnt->SCp.Status, SCpnt->resid); 1717 SCpnt->SCp.Status, scsi_get_resid(SCpnt));
1804 SCpnt->result = (DID_OK << 16) | 1718 SCpnt->result = (DID_OK << 16) |
1805 (SCpnt->SCp.Message << 8) | 1719 (SCpnt->SCp.Message << 8) |
1806 (SCpnt->SCp.Status << 0); 1720 (SCpnt->SCp.Status << 0);
@@ -1844,7 +1758,7 @@ static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen
1844 unsigned int restlen, sentlen; 1758 unsigned int restlen, sentlen;
1845 u32_le len, addr; 1759 u32_le len, addr;
1846 1760
1847 nsp32_dbg(NSP32_DEBUG_SGLIST, "old resid=0x%x", SCpnt->resid); 1761 nsp32_dbg(NSP32_DEBUG_SGLIST, "old resid=0x%x", scsi_get_resid(SCpnt));
1848 1762
1849 /* adjust saved SACK count with 4 byte start address boundary */ 1763 /* adjust saved SACK count with 4 byte start address boundary */
1850 s_sacklen -= le32_to_cpu(sgt[old_entry].addr) & 3; 1764 s_sacklen -= le32_to_cpu(sgt[old_entry].addr) & 3;
@@ -1888,12 +1802,12 @@ static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen
1888 return; 1802 return;
1889 1803
1890 last: 1804 last:
1891 if (SCpnt->resid < sentlen) { 1805 if (scsi_get_resid(SCpnt) < sentlen) {
1892 nsp32_msg(KERN_ERR, "resid underflow"); 1806 nsp32_msg(KERN_ERR, "resid underflow");
1893 } 1807 }
1894 1808
1895 SCpnt->resid -= sentlen; 1809 scsi_set_resid(SCpnt, scsi_get_resid(SCpnt) - sentlen);
1896 nsp32_dbg(NSP32_DEBUG_SGLIST, "new resid=0x%x", SCpnt->resid); 1810 nsp32_dbg(NSP32_DEBUG_SGLIST, "new resid=0x%x", scsi_get_resid(SCpnt));
1897 1811
1898 /* update hostdata and lun */ 1812 /* update hostdata and lun */
1899 1813
@@ -2022,7 +1936,7 @@ static void nsp32_restart_autoscsi(struct scsi_cmnd *SCpnt, unsigned short comma
2022 transfer = 0; 1936 transfer = 0;
2023 transfer |= (TRANSFER_GO | ALL_COUNTER_CLR); 1937 transfer |= (TRANSFER_GO | ALL_COUNTER_CLR);
2024 if (data->trans_method & NSP32_TRANSFER_BUSMASTER) { 1938 if (data->trans_method & NSP32_TRANSFER_BUSMASTER) {
2025 if (SCpnt->request_bufflen > 0) { 1939 if (scsi_bufflen(SCpnt) > 0) {
2026 transfer |= BM_START; 1940 transfer |= BM_START;
2027 } 1941 }
2028 } else if (data->trans_method & NSP32_TRANSFER_MMIO) { 1942 } else if (data->trans_method & NSP32_TRANSFER_MMIO) {
@@ -2674,17 +2588,7 @@ static void nsp32_sack_negate(nsp32_hw_data *data)
2674 * 0x900-0xbff: (map same 0x800-0x8ff I/O port image repeatedly) 2588 * 0x900-0xbff: (map same 0x800-0x8ff I/O port image repeatedly)
2675 * 0xc00-0xfff: CardBus status registers 2589 * 0xc00-0xfff: CardBus status registers
2676 */ 2590 */
2677#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
2678#define DETECT_OK 0
2679#define DETECT_NG 1
2680#define PCIDEV pdev
2681static int nsp32_detect(struct pci_dev *pdev) 2591static int nsp32_detect(struct pci_dev *pdev)
2682#else
2683#define DETECT_OK 1
2684#define DETECT_NG 0
2685#define PCIDEV (data->Pci)
2686static int nsp32_detect(struct scsi_host_template *sht)
2687#endif
2688{ 2592{
2689 struct Scsi_Host *host; /* registered host structure */ 2593 struct Scsi_Host *host; /* registered host structure */
2690 struct resource *res; 2594 struct resource *res;
@@ -2697,11 +2601,7 @@ static int nsp32_detect(struct scsi_host_template *sht)
2697 /* 2601 /*
2698 * register this HBA as SCSI device 2602 * register this HBA as SCSI device
2699 */ 2603 */
2700#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
2701 host = scsi_host_alloc(&nsp32_template, sizeof(nsp32_hw_data)); 2604 host = scsi_host_alloc(&nsp32_template, sizeof(nsp32_hw_data));
2702#else
2703 host = scsi_register(sht, sizeof(nsp32_hw_data));
2704#endif
2705 if (host == NULL) { 2605 if (host == NULL) {
2706 nsp32_msg (KERN_ERR, "failed to scsi register"); 2606 nsp32_msg (KERN_ERR, "failed to scsi register");
2707 goto err; 2607 goto err;
@@ -2719,9 +2619,6 @@ static int nsp32_detect(struct scsi_host_template *sht)
2719 host->unique_id = data->BaseAddress; 2619 host->unique_id = data->BaseAddress;
2720 host->n_io_port = data->NumAddress; 2620 host->n_io_port = data->NumAddress;
2721 host->base = (unsigned long)data->MmioAddress; 2621 host->base = (unsigned long)data->MmioAddress;
2722#if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,63))
2723 scsi_set_pci_device(host, PCIDEV);
2724#endif
2725 2622
2726 data->Host = host; 2623 data->Host = host;
2727 spin_lock_init(&(data->Lock)); 2624 spin_lock_init(&(data->Lock));
@@ -2776,7 +2673,7 @@ static int nsp32_detect(struct scsi_host_template *sht)
2776 /* 2673 /*
2777 * setup DMA 2674 * setup DMA
2778 */ 2675 */
2779 if (pci_set_dma_mask(PCIDEV, DMA_32BIT_MASK) != 0) { 2676 if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
2780 nsp32_msg (KERN_ERR, "failed to set PCI DMA mask"); 2677 nsp32_msg (KERN_ERR, "failed to set PCI DMA mask");
2781 goto scsi_unregister; 2678 goto scsi_unregister;
2782 } 2679 }
@@ -2784,7 +2681,7 @@ static int nsp32_detect(struct scsi_host_template *sht)
2784 /* 2681 /*
2785 * allocate autoparam DMA resource. 2682 * allocate autoparam DMA resource.
2786 */ 2683 */
2787 data->autoparam = pci_alloc_consistent(PCIDEV, sizeof(nsp32_autoparam), &(data->auto_paddr)); 2684 data->autoparam = pci_alloc_consistent(pdev, sizeof(nsp32_autoparam), &(data->auto_paddr));
2788 if (data->autoparam == NULL) { 2685 if (data->autoparam == NULL) {
2789 nsp32_msg(KERN_ERR, "failed to allocate DMA memory"); 2686 nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
2790 goto scsi_unregister; 2687 goto scsi_unregister;
@@ -2793,7 +2690,7 @@ static int nsp32_detect(struct scsi_host_template *sht)
2793 /* 2690 /*
2794 * allocate scatter-gather DMA resource. 2691 * allocate scatter-gather DMA resource.
2795 */ 2692 */
2796 data->sg_list = pci_alloc_consistent(PCIDEV, NSP32_SG_TABLE_SIZE, 2693 data->sg_list = pci_alloc_consistent(pdev, NSP32_SG_TABLE_SIZE,
2797 &(data->sg_paddr)); 2694 &(data->sg_paddr));
2798 if (data->sg_list == NULL) { 2695 if (data->sg_list == NULL) {
2799 nsp32_msg(KERN_ERR, "failed to allocate DMA memory"); 2696 nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
@@ -2883,16 +2780,14 @@ static int nsp32_detect(struct scsi_host_template *sht)
2883 goto free_irq; 2780 goto free_irq;
2884 } 2781 }
2885 2782
2886#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73)) 2783 ret = scsi_add_host(host, &pdev->dev);
2887 ret = scsi_add_host(host, &PCIDEV->dev);
2888 if (ret) { 2784 if (ret) {
2889 nsp32_msg(KERN_ERR, "failed to add scsi host"); 2785 nsp32_msg(KERN_ERR, "failed to add scsi host");
2890 goto free_region; 2786 goto free_region;
2891 } 2787 }
2892 scsi_scan_host(host); 2788 scsi_scan_host(host);
2893#endif 2789 pci_set_drvdata(pdev, host);
2894 pci_set_drvdata(PCIDEV, host); 2790 return 0;
2895 return DETECT_OK;
2896 2791
2897 free_region: 2792 free_region:
2898 release_region(host->io_port, host->n_io_port); 2793 release_region(host->io_port, host->n_io_port);
@@ -2901,22 +2796,19 @@ static int nsp32_detect(struct scsi_host_template *sht)
2901 free_irq(host->irq, data); 2796 free_irq(host->irq, data);
2902 2797
2903 free_sg_list: 2798 free_sg_list:
2904 pci_free_consistent(PCIDEV, NSP32_SG_TABLE_SIZE, 2799 pci_free_consistent(pdev, NSP32_SG_TABLE_SIZE,
2905 data->sg_list, data->sg_paddr); 2800 data->sg_list, data->sg_paddr);
2906 2801
2907 free_autoparam: 2802 free_autoparam:
2908 pci_free_consistent(PCIDEV, sizeof(nsp32_autoparam), 2803 pci_free_consistent(pdev, sizeof(nsp32_autoparam),
2909 data->autoparam, data->auto_paddr); 2804 data->autoparam, data->auto_paddr);
2910 2805
2911 scsi_unregister: 2806 scsi_unregister:
2912 scsi_host_put(host); 2807 scsi_host_put(host);
2913 2808
2914 err: 2809 err:
2915 return DETECT_NG; 2810 return 1;
2916} 2811}
2917#undef DETECT_OK
2918#undef DETECT_NG
2919#undef PCIDEV
2920 2812
2921static int nsp32_release(struct Scsi_Host *host) 2813static int nsp32_release(struct Scsi_Host *host)
2922{ 2814{
@@ -3525,11 +3417,7 @@ static int __devinit nsp32_probe(struct pci_dev *pdev, const struct pci_device_i
3525 3417
3526 pci_set_master(pdev); 3418 pci_set_master(pdev);
3527 3419
3528#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
3529 ret = nsp32_detect(pdev); 3420 ret = nsp32_detect(pdev);
3530#else
3531 ret = scsi_register_host(&nsp32_template);
3532#endif
3533 3421
3534 nsp32_msg(KERN_INFO, "irq: %i mmio: %p+0x%lx slot: %s model: %s", 3422 nsp32_msg(KERN_INFO, "irq: %i mmio: %p+0x%lx slot: %s model: %s",
3535 pdev->irq, 3423 pdev->irq,
@@ -3544,25 +3432,17 @@ static int __devinit nsp32_probe(struct pci_dev *pdev, const struct pci_device_i
3544 3432
3545static void __devexit nsp32_remove(struct pci_dev *pdev) 3433static void __devexit nsp32_remove(struct pci_dev *pdev)
3546{ 3434{
3547#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
3548 struct Scsi_Host *host = pci_get_drvdata(pdev); 3435 struct Scsi_Host *host = pci_get_drvdata(pdev);
3549#endif
3550 3436
3551 nsp32_dbg(NSP32_DEBUG_REGISTER, "enter"); 3437 nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
3552 3438
3553#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,73))
3554 scsi_remove_host(host); 3439 scsi_remove_host(host);
3555 3440
3556 nsp32_release(host); 3441 nsp32_release(host);
3557 3442
3558 scsi_host_put(host); 3443 scsi_host_put(host);
3559#else
3560 scsi_unregister_host(&nsp32_template);
3561#endif
3562} 3444}
3563 3445
3564
3565
3566static struct pci_driver nsp32_driver = { 3446static struct pci_driver nsp32_driver = {
3567 .name = "nsp32", 3447 .name = "nsp32",
3568 .id_table = nsp32_pci_table, 3448 .id_table = nsp32_pci_table,
diff --git a/drivers/scsi/pcmcia/sym53c500_cs.c b/drivers/scsi/pcmcia/sym53c500_cs.c
index ffe75c431b25..2695b7187b2f 100644
--- a/drivers/scsi/pcmcia/sym53c500_cs.c
+++ b/drivers/scsi/pcmcia/sym53c500_cs.c
@@ -370,8 +370,6 @@ SYM53C500_intr(int irq, void *dev_id)
370 DEB(unsigned char seq_reg;) 370 DEB(unsigned char seq_reg;)
371 unsigned char status, int_reg; 371 unsigned char status, int_reg;
372 unsigned char pio_status; 372 unsigned char pio_status;
373 struct scatterlist *sglist;
374 unsigned int sgcount;
375 int port_base = dev->io_port; 373 int port_base = dev->io_port;
376 struct sym53c500_data *data = 374 struct sym53c500_data *data =
377 (struct sym53c500_data *)dev->hostdata; 375 (struct sym53c500_data *)dev->hostdata;
@@ -434,20 +432,19 @@ SYM53C500_intr(int irq, void *dev_id)
434 switch (status & 0x07) { /* scsi phase */ 432 switch (status & 0x07) { /* scsi phase */
435 case 0x00: /* DATA-OUT */ 433 case 0x00: /* DATA-OUT */
436 if (int_reg & 0x10) { /* Target requesting info transfer */ 434 if (int_reg & 0x10) { /* Target requesting info transfer */
435 struct scatterlist *sg;
436 int i;
437
437 curSC->SCp.phase = data_out; 438 curSC->SCp.phase = data_out;
438 VDEB(printk("SYM53C500: Data-Out phase\n")); 439 VDEB(printk("SYM53C500: Data-Out phase\n"));
439 outb(FLUSH_FIFO, port_base + CMD_REG); 440 outb(FLUSH_FIFO, port_base + CMD_REG);
440 LOAD_DMA_COUNT(port_base, curSC->request_bufflen); /* Max transfer size */ 441 LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
441 outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG); 442 outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG);
442 if (!curSC->use_sg) /* Don't use scatter-gather */ 443
443 SYM53C500_pio_write(fast_pio, port_base, curSC->request_buffer, curSC->request_bufflen); 444 scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) {
444 else { /* use scatter-gather */ 445 SYM53C500_pio_write(fast_pio, port_base,
445 sgcount = curSC->use_sg; 446 page_address(sg->page) + sg->offset,
446 sglist = curSC->request_buffer; 447 sg->length);
447 while (sgcount--) {
448 SYM53C500_pio_write(fast_pio, port_base, page_address(sglist->page) + sglist->offset, sglist->length);
449 sglist++;
450 }
451 } 448 }
452 REG0(port_base); 449 REG0(port_base);
453 } 450 }
@@ -455,20 +452,19 @@ SYM53C500_intr(int irq, void *dev_id)
455 452
456 case 0x01: /* DATA-IN */ 453 case 0x01: /* DATA-IN */
457 if (int_reg & 0x10) { /* Target requesting info transfer */ 454 if (int_reg & 0x10) { /* Target requesting info transfer */
455 struct scatterlist *sg;
456 int i;
457
458 curSC->SCp.phase = data_in; 458 curSC->SCp.phase = data_in;
459 VDEB(printk("SYM53C500: Data-In phase\n")); 459 VDEB(printk("SYM53C500: Data-In phase\n"));
460 outb(FLUSH_FIFO, port_base + CMD_REG); 460 outb(FLUSH_FIFO, port_base + CMD_REG);
461 LOAD_DMA_COUNT(port_base, curSC->request_bufflen); /* Max transfer size */ 461 LOAD_DMA_COUNT(port_base, scsi_bufflen(curSC)); /* Max transfer size */
462 outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG); 462 outb(TRANSFER_INFO | DMA_OP, port_base + CMD_REG);
463 if (!curSC->use_sg) /* Don't use scatter-gather */ 463
464 SYM53C500_pio_read(fast_pio, port_base, curSC->request_buffer, curSC->request_bufflen); 464 scsi_for_each_sg(curSC, sg, scsi_sg_count(curSC), i) {
465 else { /* Use scatter-gather */ 465 SYM53C500_pio_read(fast_pio, port_base,
466 sgcount = curSC->use_sg; 466 page_address(sg->page) + sg->offset,
467 sglist = curSC->request_buffer; 467 sg->length);
468 while (sgcount--) {
469 SYM53C500_pio_read(fast_pio, port_base, page_address(sglist->page) + sglist->offset, sglist->length);
470 sglist++;
471 }
472 } 468 }
473 REG0(port_base); 469 REG0(port_base);
474 } 470 }
@@ -578,7 +574,7 @@ SYM53C500_queue(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
578 574
579 DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n", 575 DEB(printk("cmd=%02x, cmd_len=%02x, target=%02x, lun=%02x, bufflen=%d\n",
580 SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->device->id, 576 SCpnt->cmnd[0], SCpnt->cmd_len, SCpnt->device->id,
581 SCpnt->device->lun, SCpnt->request_bufflen)); 577 SCpnt->device->lun, scsi_bufflen(SCpnt)));
582 578
583 VDEB(for (i = 0; i < SCpnt->cmd_len; i++) 579 VDEB(for (i = 0; i < SCpnt->cmd_len; i++)
584 printk("cmd[%d]=%02x ", i, SCpnt->cmnd[i])); 580 printk("cmd[%d]=%02x ", i, SCpnt->cmnd[i]));
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ca463469063d..0f04258becbf 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1633,7 +1633,7 @@ struct qla_init_msix_entry {
1633 uint16_t entry; 1633 uint16_t entry;
1634 uint16_t index; 1634 uint16_t index;
1635 const char *name; 1635 const char *name;
1636 irqreturn_t (*handler)(int, void *); 1636 irq_handler_t handler;
1637}; 1637};
1638 1638
1639static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = { 1639static struct qla_init_msix_entry imsix_entries[QLA_MSIX_ENTRIES] = {
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index 6437d024b0dd..fcc184cd066d 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -6,176 +6,9 @@
6 */ 6 */
7 7
8#include "ql4_def.h" 8#include "ql4_def.h"
9#include <scsi/scsi_dbg.h> 9#include "ql4_glbl.h"
10 10#include "ql4_dbg.h"
11#if 0 11#include "ql4_inline.h"
12
13static void qla4xxx_print_srb_info(struct srb * srb)
14{
15 printk("%s: srb = 0x%p, flags=0x%02x\n", __func__, srb, srb->flags);
16 printk("%s: cmd = 0x%p, saved_dma_handle = 0x%lx\n",
17 __func__, srb->cmd, (unsigned long) srb->dma_handle);
18 printk("%s: fw_ddb_index = %d, lun = %d\n",
19 __func__, srb->fw_ddb_index, srb->cmd->device->lun);
20 printk("%s: iocb_tov = %d\n",
21 __func__, srb->iocb_tov);
22 printk("%s: cc_stat = 0x%x, r_start = 0x%lx, u_start = 0x%lx\n\n",
23 __func__, srb->cc_stat, srb->r_start, srb->u_start);
24}
25
26void qla4xxx_print_scsi_cmd(struct scsi_cmnd *cmd)
27{
28 printk("SCSI Command = 0x%p, Handle=0x%p\n", cmd, cmd->host_scribble);
29 printk(" b=%d, t=%02xh, l=%02xh, cmd_len = %02xh\n",
30 cmd->device->channel, cmd->device->id, cmd->device->lun,
31 cmd->cmd_len);
32 scsi_print_command(cmd);
33 printk(" seg_cnt = %d\n", cmd->use_sg);
34 printk(" request buffer = 0x%p, request buffer len = 0x%x\n",
35 cmd->request_buffer, cmd->request_bufflen);
36 if (cmd->use_sg) {
37 struct scatterlist *sg;
38 sg = (struct scatterlist *)cmd->request_buffer;
39 printk(" SG buffer: \n");
40 qla4xxx_dump_buffer((caddr_t) sg,
41 (cmd->use_sg * sizeof(*sg)));
42 }
43 printk(" tag = %d, transfersize = 0x%x \n", cmd->tag,
44 cmd->transfersize);
45 printk(" Pid = %d, SP = 0x%p\n", (int)cmd->pid, cmd->SCp.ptr);
46 printk(" underflow size = 0x%x, direction=0x%x\n", cmd->underflow,
47 cmd->sc_data_direction);
48 printk(" Current time (jiffies) = 0x%lx, "
49 "timeout expires = 0x%lx\n", jiffies, cmd->eh_timeout.expires);
50 qla4xxx_print_srb_info((struct srb *) cmd->SCp.ptr);
51}
52
53void __dump_registers(struct scsi_qla_host *ha)
54{
55 uint8_t i;
56 for (i = 0; i < MBOX_REG_COUNT; i++) {
57 printk(KERN_INFO "0x%02X mailbox[%d] = 0x%08X\n",
58 (uint8_t) offsetof(struct isp_reg, mailbox[i]), i,
59 readw(&ha->reg->mailbox[i]));
60 }
61 printk(KERN_INFO "0x%02X flash_address = 0x%08X\n",
62 (uint8_t) offsetof(struct isp_reg, flash_address),
63 readw(&ha->reg->flash_address));
64 printk(KERN_INFO "0x%02X flash_data = 0x%08X\n",
65 (uint8_t) offsetof(struct isp_reg, flash_data),
66 readw(&ha->reg->flash_data));
67 printk(KERN_INFO "0x%02X ctrl_status = 0x%08X\n",
68 (uint8_t) offsetof(struct isp_reg, ctrl_status),
69 readw(&ha->reg->ctrl_status));
70 if (is_qla4010(ha)) {
71 printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
72 (uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram),
73 readw(&ha->reg->u1.isp4010.nvram));
74 }
75
76 else if (is_qla4022(ha) | is_qla4032(ha)) {
77 printk(KERN_INFO "0x%02X intr_mask = 0x%08X\n",
78 (uint8_t) offsetof(struct isp_reg,
79 u1.isp4022.intr_mask),
80 readw(&ha->reg->u1.isp4022.intr_mask));
81 printk(KERN_INFO "0x%02X nvram = 0x%08X\n",
82 (uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram),
83 readw(&ha->reg->u1.isp4022.nvram));
84 printk(KERN_INFO "0x%02X semaphore = 0x%08X\n",
85 (uint8_t) offsetof(struct isp_reg,
86 u1.isp4022.semaphore),
87 readw(&ha->reg->u1.isp4022.semaphore));
88 }
89 printk(KERN_INFO "0x%02X req_q_in = 0x%08X\n",
90 (uint8_t) offsetof(struct isp_reg, req_q_in),
91 readw(&ha->reg->req_q_in));
92 printk(KERN_INFO "0x%02X rsp_q_out = 0x%08X\n",
93 (uint8_t) offsetof(struct isp_reg, rsp_q_out),
94 readw(&ha->reg->rsp_q_out));
95 if (is_qla4010(ha)) {
96 printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
97 (uint8_t) offsetof(struct isp_reg,
98 u2.isp4010.ext_hw_conf),
99 readw(&ha->reg->u2.isp4010.ext_hw_conf));
100 printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n",
101 (uint8_t) offsetof(struct isp_reg,
102 u2.isp4010.port_ctrl),
103 readw(&ha->reg->u2.isp4010.port_ctrl));
104 printk(KERN_INFO "0x%02X port_status = 0x%08X\n",
105 (uint8_t) offsetof(struct isp_reg,
106 u2.isp4010.port_status),
107 readw(&ha->reg->u2.isp4010.port_status));
108 printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
109 (uint8_t) offsetof(struct isp_reg,
110 u2.isp4010.req_q_out),
111 readw(&ha->reg->u2.isp4010.req_q_out));
112 printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
113 (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out),
114 readw(&ha->reg->u2.isp4010.gp_out));
115 printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
116 (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in),
117 readw(&ha->reg->u2.isp4010.gp_in));
118 printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n",
119 (uint8_t) offsetof(struct isp_reg,
120 u2.isp4010.port_err_status),
121 readw(&ha->reg->u2.isp4010.port_err_status));
122 }
123
124 else if (is_qla4022(ha) | is_qla4032(ha)) {
125 printk(KERN_INFO "Page 0 Registers:\n");
126 printk(KERN_INFO "0x%02X ext_hw_conf = 0x%08X\n",
127 (uint8_t) offsetof(struct isp_reg,
128 u2.isp4022.p0.ext_hw_conf),
129 readw(&ha->reg->u2.isp4022.p0.ext_hw_conf));
130 printk(KERN_INFO "0x%02X port_ctrl = 0x%08X\n",
131 (uint8_t) offsetof(struct isp_reg,
132 u2.isp4022.p0.port_ctrl),
133 readw(&ha->reg->u2.isp4022.p0.port_ctrl));
134 printk(KERN_INFO "0x%02X port_status = 0x%08X\n",
135 (uint8_t) offsetof(struct isp_reg,
136 u2.isp4022.p0.port_status),
137 readw(&ha->reg->u2.isp4022.p0.port_status));
138 printk(KERN_INFO "0x%02X gp_out = 0x%08X\n",
139 (uint8_t) offsetof(struct isp_reg,
140 u2.isp4022.p0.gp_out),
141 readw(&ha->reg->u2.isp4022.p0.gp_out));
142 printk(KERN_INFO "0x%02X gp_in = 0x%08X\n",
143 (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in),
144 readw(&ha->reg->u2.isp4022.p0.gp_in));
145 printk(KERN_INFO "0x%02X port_err_status = 0x%08X\n",
146 (uint8_t) offsetof(struct isp_reg,
147 u2.isp4022.p0.port_err_status),
148 readw(&ha->reg->u2.isp4022.p0.port_err_status));
149 printk(KERN_INFO "Page 1 Registers:\n");
150 writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
151 &ha->reg->ctrl_status);
152 printk(KERN_INFO "0x%02X req_q_out = 0x%08X\n",
153 (uint8_t) offsetof(struct isp_reg,
154 u2.isp4022.p1.req_q_out),
155 readw(&ha->reg->u2.isp4022.p1.req_q_out));
156 writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
157 &ha->reg->ctrl_status);
158 }
159}
160
161void qla4xxx_dump_mbox_registers(struct scsi_qla_host *ha)
162{
163 unsigned long flags = 0;
164 int i = 0;
165 spin_lock_irqsave(&ha->hardware_lock, flags);
166 for (i = 1; i < MBOX_REG_COUNT; i++)
167 printk(KERN_INFO " Mailbox[%d] = %08x\n", i,
168 readw(&ha->reg->mailbox[i]));
169 spin_unlock_irqrestore(&ha->hardware_lock, flags);
170}
171
172void qla4xxx_dump_registers(struct scsi_qla_host *ha)
173{
174 unsigned long flags = 0;
175 spin_lock_irqsave(&ha->hardware_lock, flags);
176 __dump_registers(ha);
177 spin_unlock_irqrestore(&ha->hardware_lock, flags);
178}
179 12
180void qla4xxx_dump_buffer(void *b, uint32_t size) 13void qla4xxx_dump_buffer(void *b, uint32_t size)
181{ 14{
@@ -198,4 +31,3 @@ void qla4xxx_dump_buffer(void *b, uint32_t size)
198 printk(KERN_DEBUG "\n"); 31 printk(KERN_DEBUG "\n");
199} 32}
200 33
201#endif /* 0 */
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 6f4cf2dd2f4a..accaf690eaf0 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -122,8 +122,7 @@
122 122
123#define ISCSI_IPADDR_SIZE 4 /* IP address size */ 123#define ISCSI_IPADDR_SIZE 4 /* IP address size */
124#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alais name size */ 124#define ISCSI_ALIAS_SIZE 32 /* ISCSI Alais name size */
125#define ISCSI_NAME_SIZE 255 /* ISCSI Name size - 125#define ISCSI_NAME_SIZE 0xE0 /* ISCSI Name size */
126 * usually a string */
127 126
128#define LSDW(x) ((u32)((u64)(x))) 127#define LSDW(x) ((u32)((u64)(x)))
129#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16)) 128#define MSDW(x) ((u32)((((u64)(x)) >> 16) >> 16))
@@ -187,9 +186,21 @@ struct srb {
187 u_long u_start; /* Time when we handed the cmd to F/W */ 186 u_long u_start; /* Time when we handed the cmd to F/W */
188}; 187};
189 188
190 /* 189/*
191 * Device Database (DDB) structure 190 * Asynchronous Event Queue structure
192 */ 191 */
192struct aen {
193 uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
194};
195
196struct ql4_aen_log {
197 int count;
198 struct aen entry[MAX_AEN_ENTRIES];
199};
200
201/*
202 * Device Database (DDB) structure
203 */
193struct ddb_entry { 204struct ddb_entry {
194 struct list_head list; /* ddb list */ 205 struct list_head list; /* ddb list */
195 struct scsi_qla_host *ha; 206 struct scsi_qla_host *ha;
@@ -254,13 +265,6 @@ struct ddb_entry {
254#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */ 265#define DF_ISNS_DISCOVERED 2 /* Device was discovered via iSNS */
255#define DF_FO_MASKED 3 266#define DF_FO_MASKED 3
256 267
257/*
258 * Asynchronous Event Queue structure
259 */
260struct aen {
261 uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
262};
263
264 268
265#include "ql4_fw.h" 269#include "ql4_fw.h"
266#include "ql4_nvram.h" 270#include "ql4_nvram.h"
@@ -270,31 +274,31 @@ struct aen {
270 */ 274 */
271struct scsi_qla_host { 275struct scsi_qla_host {
272 /* Linux adapter configuration data */ 276 /* Linux adapter configuration data */
273 struct Scsi_Host *host; /* pointer to host data */
274 uint32_t tot_ddbs;
275 unsigned long flags; 277 unsigned long flags;
276 278
277#define AF_ONLINE 0 /* 0x00000001 */ 279#define AF_ONLINE 0 /* 0x00000001 */
278#define AF_INIT_DONE 1 /* 0x00000002 */ 280#define AF_INIT_DONE 1 /* 0x00000002 */
279#define AF_MBOX_COMMAND 2 /* 0x00000004 */ 281#define AF_MBOX_COMMAND 2 /* 0x00000004 */
280#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */ 282#define AF_MBOX_COMMAND_DONE 3 /* 0x00000008 */
281#define AF_INTERRUPTS_ON 6 /* 0x00000040 Not Used */ 283#define AF_INTERRUPTS_ON 6 /* 0x00000040 */
282#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */ 284#define AF_GET_CRASH_RECORD 7 /* 0x00000080 */
283#define AF_LINK_UP 8 /* 0x00000100 */ 285#define AF_LINK_UP 8 /* 0x00000100 */
284#define AF_IRQ_ATTACHED 10 /* 0x00000400 */ 286#define AF_IRQ_ATTACHED 10 /* 0x00000400 */
285#define AF_ISNS_CMD_IN_PROCESS 12 /* 0x00001000 */ 287#define AF_DISABLE_ACB_COMPLETE 11 /* 0x00000800 */
286#define AF_ISNS_CMD_DONE 13 /* 0x00002000 */
287 288
288 unsigned long dpc_flags; 289 unsigned long dpc_flags;
289 290
290#define DPC_RESET_HA 1 /* 0x00000002 */ 291#define DPC_RESET_HA 1 /* 0x00000002 */
291#define DPC_RETRY_RESET_HA 2 /* 0x00000004 */ 292#define DPC_RETRY_RESET_HA 2 /* 0x00000004 */
292#define DPC_RELOGIN_DEVICE 3 /* 0x00000008 */ 293#define DPC_RELOGIN_DEVICE 3 /* 0x00000008 */
293#define DPC_RESET_HA_DESTROY_DDB_LIST 4 /* 0x00000010 */ 294#define DPC_RESET_HA_DESTROY_DDB_LIST 4 /* 0x00000010 */
294#define DPC_RESET_HA_INTR 5 /* 0x00000020 */ 295#define DPC_RESET_HA_INTR 5 /* 0x00000020 */
295#define DPC_ISNS_RESTART 7 /* 0x00000080 */ 296#define DPC_ISNS_RESTART 7 /* 0x00000080 */
296#define DPC_AEN 9 /* 0x00000200 */ 297#define DPC_AEN 9 /* 0x00000200 */
297#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */ 298#define DPC_GET_DHCP_IP_ADDR 15 /* 0x00008000 */
299
300 struct Scsi_Host *host; /* pointer to host data */
301 uint32_t tot_ddbs;
298 302
299 uint16_t iocb_cnt; 303 uint16_t iocb_cnt;
300 uint16_t iocb_hiwat; 304 uint16_t iocb_hiwat;
@@ -344,6 +348,7 @@ struct scsi_qla_host {
344 uint32_t firmware_version[2]; 348 uint32_t firmware_version[2];
345 uint32_t patch_number; 349 uint32_t patch_number;
346 uint32_t build_number; 350 uint32_t build_number;
351 uint32_t board_id;
347 352
348 /* --- From Init_FW --- */ 353 /* --- From Init_FW --- */
349 /* init_cb_t *init_cb; */ 354 /* init_cb_t *init_cb; */
@@ -363,7 +368,6 @@ struct scsi_qla_host {
363 368
364 /* --- From GetFwState --- */ 369 /* --- From GetFwState --- */
365 uint32_t firmware_state; 370 uint32_t firmware_state;
366 uint32_t board_id;
367 uint32_t addl_fw_state; 371 uint32_t addl_fw_state;
368 372
369 /* Linux kernel thread */ 373 /* Linux kernel thread */
@@ -414,6 +418,8 @@ struct scsi_qla_host {
414 uint16_t aen_out; 418 uint16_t aen_out;
415 struct aen aen_q[MAX_AEN_ENTRIES]; 419 struct aen aen_q[MAX_AEN_ENTRIES];
416 420
421 struct ql4_aen_log aen_log;/* tracks all aens */
422
417 /* This mutex protects several threads to do mailbox commands 423 /* This mutex protects several threads to do mailbox commands
418 * concurrently. 424 * concurrently.
419 */ 425 */
@@ -585,10 +591,4 @@ static inline void ql4xxx_unlock_drvr(struct scsi_qla_host *a)
585#define FLUSH_DDB_CHANGED_AENS 1 591#define FLUSH_DDB_CHANGED_AENS 1
586#define RELOGIN_DDB_CHANGED_AENS 2 592#define RELOGIN_DDB_CHANGED_AENS 2
587 593
588#include "ql4_version.h"
589#include "ql4_glbl.h"
590#include "ql4_dbg.h"
591#include "ql4_inline.h"
592
593
594#endif /*_QLA4XXX_H */ 594#endif /*_QLA4XXX_H */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 4eea8c571916..9bb3d1d2a925 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -20,143 +20,23 @@
20 *************************************************************************/ 20 *************************************************************************/
21 21
22struct port_ctrl_stat_regs { 22struct port_ctrl_stat_regs {
23 __le32 ext_hw_conf; /* 80 x50 R/W */ 23 __le32 ext_hw_conf; /* 0x50 R/W */
24 __le32 intChipConfiguration; /* 84 x54 */ 24 __le32 rsrvd0; /* 0x54 */
25 __le32 port_ctrl; /* 88 x58 */ 25 __le32 port_ctrl; /* 0x58 */
26 __le32 port_status; /* 92 x5c */ 26 __le32 port_status; /* 0x5c */
27 __le32 HostPrimMACHi; /* 96 x60 */ 27 __le32 rsrvd1[32]; /* 0x60-0xdf */
28 __le32 HostPrimMACLow; /* 100 x64 */ 28 __le32 gp_out; /* 0xe0 */
29 __le32 HostSecMACHi; /* 104 x68 */ 29 __le32 gp_in; /* 0xe4 */
30 __le32 HostSecMACLow; /* 108 x6c */ 30 __le32 rsrvd2[5]; /* 0xe8-0xfb */
31 __le32 EPPrimMACHi; /* 112 x70 */ 31 __le32 port_err_status; /* 0xfc */
32 __le32 EPPrimMACLow; /* 116 x74 */
33 __le32 EPSecMACHi; /* 120 x78 */
34 __le32 EPSecMACLow; /* 124 x7c */
35 __le32 HostPrimIPHi; /* 128 x80 */
36 __le32 HostPrimIPMidHi; /* 132 x84 */
37 __le32 HostPrimIPMidLow; /* 136 x88 */
38 __le32 HostPrimIPLow; /* 140 x8c */
39 __le32 HostSecIPHi; /* 144 x90 */
40 __le32 HostSecIPMidHi; /* 148 x94 */
41 __le32 HostSecIPMidLow; /* 152 x98 */
42 __le32 HostSecIPLow; /* 156 x9c */
43 __le32 EPPrimIPHi; /* 160 xa0 */
44 __le32 EPPrimIPMidHi; /* 164 xa4 */
45 __le32 EPPrimIPMidLow; /* 168 xa8 */
46 __le32 EPPrimIPLow; /* 172 xac */
47 __le32 EPSecIPHi; /* 176 xb0 */
48 __le32 EPSecIPMidHi; /* 180 xb4 */
49 __le32 EPSecIPMidLow; /* 184 xb8 */
50 __le32 EPSecIPLow; /* 188 xbc */
51 __le32 IPReassemblyTimeout; /* 192 xc0 */
52 __le32 EthMaxFramePayload; /* 196 xc4 */
53 __le32 TCPMaxWindowSize; /* 200 xc8 */
54 __le32 TCPCurrentTimestampHi; /* 204 xcc */
55 __le32 TCPCurrentTimestampLow; /* 208 xd0 */
56 __le32 LocalRAMAddress; /* 212 xd4 */
57 __le32 LocalRAMData; /* 216 xd8 */
58 __le32 PCSReserved1; /* 220 xdc */
59 __le32 gp_out; /* 224 xe0 */
60 __le32 gp_in; /* 228 xe4 */
61 __le32 ProbeMuxAddr; /* 232 xe8 */
62 __le32 ProbeMuxData; /* 236 xec */
63 __le32 ERMQueueBaseAddr0; /* 240 xf0 */
64 __le32 ERMQueueBaseAddr1; /* 244 xf4 */
65 __le32 MACConfiguration; /* 248 xf8 */
66 __le32 port_err_status; /* 252 xfc COR */
67}; 32};
68 33
69struct host_mem_cfg_regs { 34struct host_mem_cfg_regs {
70 __le32 NetRequestQueueOut; /* 80 x50 */ 35 __le32 rsrvd0[12]; /* 0x50-0x79 */
71 __le32 NetRequestQueueOutAddrHi; /* 84 x54 */ 36 __le32 req_q_out; /* 0x80 */
72 __le32 NetRequestQueueOutAddrLow; /* 88 x58 */ 37 __le32 rsrvd1[31]; /* 0x84-0xFF */
73 __le32 NetRequestQueueBaseAddrHi; /* 92 x5c */
74 __le32 NetRequestQueueBaseAddrLow; /* 96 x60 */
75 __le32 NetRequestQueueLength; /* 100 x64 */
76 __le32 NetResponseQueueIn; /* 104 x68 */
77 __le32 NetResponseQueueInAddrHi; /* 108 x6c */
78 __le32 NetResponseQueueInAddrLow; /* 112 x70 */
79 __le32 NetResponseQueueBaseAddrHi; /* 116 x74 */
80 __le32 NetResponseQueueBaseAddrLow; /* 120 x78 */
81 __le32 NetResponseQueueLength; /* 124 x7c */
82 __le32 req_q_out; /* 128 x80 */
83 __le32 RequestQueueOutAddrHi; /* 132 x84 */
84 __le32 RequestQueueOutAddrLow; /* 136 x88 */
85 __le32 RequestQueueBaseAddrHi; /* 140 x8c */
86 __le32 RequestQueueBaseAddrLow; /* 144 x90 */
87 __le32 RequestQueueLength; /* 148 x94 */
88 __le32 ResponseQueueIn; /* 152 x98 */
89 __le32 ResponseQueueInAddrHi; /* 156 x9c */
90 __le32 ResponseQueueInAddrLow; /* 160 xa0 */
91 __le32 ResponseQueueBaseAddrHi; /* 164 xa4 */
92 __le32 ResponseQueueBaseAddrLow; /* 168 xa8 */
93 __le32 ResponseQueueLength; /* 172 xac */
94 __le32 NetRxLargeBufferQueueOut; /* 176 xb0 */
95 __le32 NetRxLargeBufferQueueBaseAddrHi; /* 180 xb4 */
96 __le32 NetRxLargeBufferQueueBaseAddrLow; /* 184 xb8 */
97 __le32 NetRxLargeBufferQueueLength; /* 188 xbc */
98 __le32 NetRxLargeBufferLength; /* 192 xc0 */
99 __le32 NetRxSmallBufferQueueOut; /* 196 xc4 */
100 __le32 NetRxSmallBufferQueueBaseAddrHi; /* 200 xc8 */
101 __le32 NetRxSmallBufferQueueBaseAddrLow; /* 204 xcc */
102 __le32 NetRxSmallBufferQueueLength; /* 208 xd0 */
103 __le32 NetRxSmallBufferLength; /* 212 xd4 */
104 __le32 HMCReserved0[10]; /* 216 xd8 */
105}; 38};
106 39
107struct local_ram_cfg_regs {
108 __le32 BufletSize; /* 80 x50 */
109 __le32 BufletMaxCount; /* 84 x54 */
110 __le32 BufletCurrCount; /* 88 x58 */
111 __le32 BufletPauseThresholdCount; /* 92 x5c */
112 __le32 BufletTCPWinThresholdHi; /* 96 x60 */
113 __le32 BufletTCPWinThresholdLow; /* 100 x64 */
114 __le32 IPHashTableBaseAddr; /* 104 x68 */
115 __le32 IPHashTableSize; /* 108 x6c */
116 __le32 TCPHashTableBaseAddr; /* 112 x70 */
117 __le32 TCPHashTableSize; /* 116 x74 */
118 __le32 NCBAreaBaseAddr; /* 120 x78 */
119 __le32 NCBMaxCount; /* 124 x7c */
120 __le32 NCBCurrCount; /* 128 x80 */
121 __le32 DRBAreaBaseAddr; /* 132 x84 */
122 __le32 DRBMaxCount; /* 136 x88 */
123 __le32 DRBCurrCount; /* 140 x8c */
124 __le32 LRCReserved[28]; /* 144 x90 */
125};
126
127struct prot_stat_regs {
128 __le32 MACTxFrameCount; /* 80 x50 R */
129 __le32 MACTxByteCount; /* 84 x54 R */
130 __le32 MACRxFrameCount; /* 88 x58 R */
131 __le32 MACRxByteCount; /* 92 x5c R */
132 __le32 MACCRCErrCount; /* 96 x60 R */
133 __le32 MACEncErrCount; /* 100 x64 R */
134 __le32 MACRxLengthErrCount; /* 104 x68 R */
135 __le32 IPTxPacketCount; /* 108 x6c R */
136 __le32 IPTxByteCount; /* 112 x70 R */
137 __le32 IPTxFragmentCount; /* 116 x74 R */
138 __le32 IPRxPacketCount; /* 120 x78 R */
139 __le32 IPRxByteCount; /* 124 x7c R */
140 __le32 IPRxFragmentCount; /* 128 x80 R */
141 __le32 IPDatagramReassemblyCount; /* 132 x84 R */
142 __le32 IPV6RxPacketCount; /* 136 x88 R */
143 __le32 IPErrPacketCount; /* 140 x8c R */
144 __le32 IPReassemblyErrCount; /* 144 x90 R */
145 __le32 TCPTxSegmentCount; /* 148 x94 R */
146 __le32 TCPTxByteCount; /* 152 x98 R */
147 __le32 TCPRxSegmentCount; /* 156 x9c R */
148 __le32 TCPRxByteCount; /* 160 xa0 R */
149 __le32 TCPTimerExpCount; /* 164 xa4 R */
150 __le32 TCPRxAckCount; /* 168 xa8 R */
151 __le32 TCPTxAckCount; /* 172 xac R */
152 __le32 TCPRxErrOOOCount; /* 176 xb0 R */
153 __le32 PSReserved0; /* 180 xb4 */
154 __le32 TCPRxWindowProbeUpdateCount; /* 184 xb8 R */
155 __le32 ECCErrCorrectionCount; /* 188 xbc R */
156 __le32 PSReserved1[16]; /* 192 xc0 */
157};
158
159
160/* remote register set (access via PCI memory read/write) */ 40/* remote register set (access via PCI memory read/write) */
161struct isp_reg { 41struct isp_reg {
162#define MBOX_REG_COUNT 8 42#define MBOX_REG_COUNT 8
@@ -207,11 +87,7 @@ struct isp_reg {
207 union { 87 union {
208 struct port_ctrl_stat_regs p0; 88 struct port_ctrl_stat_regs p0;
209 struct host_mem_cfg_regs p1; 89 struct host_mem_cfg_regs p1;
210 struct local_ram_cfg_regs p2;
211 struct prot_stat_regs p3;
212 __le32 r_union[44];
213 }; 90 };
214
215 } __attribute__ ((packed)) isp4022; 91 } __attribute__ ((packed)) isp4022;
216 } u2; 92 } u2;
217}; /* 256 x100 */ 93}; /* 256 x100 */
@@ -296,6 +172,7 @@ static inline uint32_t clr_rmask(uint32_t val)
296/* ISP Semaphore definitions */ 172/* ISP Semaphore definitions */
297 173
298/* ISP General Purpose Output definitions */ 174/* ISP General Purpose Output definitions */
175#define GPOR_TOPCAT_RESET 0x00000004
299 176
300/* shadow registers (DMA'd from HA to system memory. read only) */ 177/* shadow registers (DMA'd from HA to system memory. read only) */
301struct shadow_regs { 178struct shadow_regs {
@@ -337,6 +214,7 @@ union external_hw_config_reg {
337 214
338/* Mailbox command definitions */ 215/* Mailbox command definitions */
339#define MBOX_CMD_ABOUT_FW 0x0009 216#define MBOX_CMD_ABOUT_FW 0x0009
217#define MBOX_CMD_PING 0x000B
340#define MBOX_CMD_LUN_RESET 0x0016 218#define MBOX_CMD_LUN_RESET 0x0016
341#define MBOX_CMD_GET_MANAGEMENT_DATA 0x001E 219#define MBOX_CMD_GET_MANAGEMENT_DATA 0x001E
342#define MBOX_CMD_GET_FW_STATUS 0x001F 220#define MBOX_CMD_GET_FW_STATUS 0x001F
@@ -364,6 +242,17 @@ union external_hw_config_reg {
364#define MBOX_CMD_GET_FW_STATE 0x0069 242#define MBOX_CMD_GET_FW_STATE 0x0069
365#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A 243#define MBOX_CMD_GET_INIT_FW_CTRL_BLOCK_DEFAULTS 0x006A
366#define MBOX_CMD_RESTORE_FACTORY_DEFAULTS 0x0087 244#define MBOX_CMD_RESTORE_FACTORY_DEFAULTS 0x0087
245#define MBOX_CMD_SET_ACB 0x0088
246#define MBOX_CMD_GET_ACB 0x0089
247#define MBOX_CMD_DISABLE_ACB 0x008A
248#define MBOX_CMD_GET_IPV6_NEIGHBOR_CACHE 0x008B
249#define MBOX_CMD_GET_IPV6_DEST_CACHE 0x008C
250#define MBOX_CMD_GET_IPV6_DEF_ROUTER_LIST 0x008D
251#define MBOX_CMD_GET_IPV6_LCL_PREFIX_LIST 0x008E
252#define MBOX_CMD_SET_IPV6_NEIGHBOR_CACHE 0x0090
253#define MBOX_CMD_GET_IP_ADDR_STATE 0x0091
254#define MBOX_CMD_SEND_IPV6_ROUTER_SOL 0x0092
255#define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR 0x0093
367 256
368/* Mailbox 1 */ 257/* Mailbox 1 */
369#define FW_STATE_READY 0x0000 258#define FW_STATE_READY 0x0000
@@ -409,6 +298,16 @@ union external_hw_config_reg {
409#define MBOX_ASTS_DHCP_LEASE_EXPIRED 0x801D 298#define MBOX_ASTS_DHCP_LEASE_EXPIRED 0x801D
410#define MBOX_ASTS_DHCP_LEASE_ACQUIRED 0x801F 299#define MBOX_ASTS_DHCP_LEASE_ACQUIRED 0x801F
411#define MBOX_ASTS_ISNS_UNSOLICITED_PDU_RECEIVED 0x8021 300#define MBOX_ASTS_ISNS_UNSOLICITED_PDU_RECEIVED 0x8021
301#define MBOX_ASTS_DUPLICATE_IP 0x8025
302#define MBOX_ASTS_ARP_COMPLETE 0x8026
303#define MBOX_ASTS_SUBNET_STATE_CHANGE 0x8027
304#define MBOX_ASTS_RESPONSE_QUEUE_FULL 0x8028
305#define MBOX_ASTS_IP_ADDR_STATE_CHANGED 0x8029
306#define MBOX_ASTS_IPV6_PREFIX_EXPIRED 0x802B
307#define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED 0x802C
308#define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED 0x802D
309#define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD 0x802E
310
412#define ISNS_EVENT_DATA_RECEIVED 0x0000 311#define ISNS_EVENT_DATA_RECEIVED 0x0000
413#define ISNS_EVENT_CONNECTION_OPENED 0x0001 312#define ISNS_EVENT_CONNECTION_OPENED 0x0001
414#define ISNS_EVENT_CONNECTION_FAILED 0x0002 313#define ISNS_EVENT_CONNECTION_FAILED 0x0002
@@ -418,137 +317,166 @@ union external_hw_config_reg {
418/*************************************************************************/ 317/*************************************************************************/
419 318
420/* Host Adapter Initialization Control Block (from host) */ 319/* Host Adapter Initialization Control Block (from host) */
421struct init_fw_ctrl_blk { 320struct addr_ctrl_blk {
422 uint8_t Version; /* 00 */ 321 uint8_t version; /* 00 */
423 uint8_t Control; /* 01 */ 322 uint8_t control; /* 01 */
424 323
425 uint16_t FwOptions; /* 02-03 */ 324 uint16_t fw_options; /* 02-03 */
426#define FWOPT_HEARTBEAT_ENABLE 0x1000 325#define FWOPT_HEARTBEAT_ENABLE 0x1000
427#define FWOPT_SESSION_MODE 0x0040 326#define FWOPT_SESSION_MODE 0x0040
428#define FWOPT_INITIATOR_MODE 0x0020 327#define FWOPT_INITIATOR_MODE 0x0020
429#define FWOPT_TARGET_MODE 0x0010 328#define FWOPT_TARGET_MODE 0x0010
430 329
431 uint16_t ExecThrottle; /* 04-05 */ 330 uint16_t exec_throttle; /* 04-05 */
432 uint8_t RetryCount; /* 06 */ 331 uint8_t zio_count; /* 06 */
433 uint8_t RetryDelay; /* 07 */ 332 uint8_t res0; /* 07 */
434 uint16_t MaxEthFrPayloadSize; /* 08-09 */ 333 uint16_t eth_mtu_size; /* 08-09 */
435 uint16_t AddFwOptions; /* 0A-0B */ 334 uint16_t add_fw_options; /* 0A-0B */
436 335
437 uint8_t HeartbeatInterval; /* 0C */ 336 uint8_t hb_interval; /* 0C */
438 uint8_t InstanceNumber; /* 0D */ 337 uint8_t inst_num; /* 0D */
439 uint16_t RES2; /* 0E-0F */ 338 uint16_t res1; /* 0E-0F */
440 uint16_t ReqQConsumerIndex; /* 10-11 */ 339 uint16_t rqq_consumer_idx; /* 10-11 */
441 uint16_t ComplQProducerIndex; /* 12-13 */ 340 uint16_t compq_producer_idx; /* 12-13 */
442 uint16_t ReqQLen; /* 14-15 */ 341 uint16_t rqq_len; /* 14-15 */
443 uint16_t ComplQLen; /* 16-17 */ 342 uint16_t compq_len; /* 16-17 */
444 uint32_t ReqQAddrLo; /* 18-1B */ 343 uint32_t rqq_addr_lo; /* 18-1B */
445 uint32_t ReqQAddrHi; /* 1C-1F */ 344 uint32_t rqq_addr_hi; /* 1C-1F */
446 uint32_t ComplQAddrLo; /* 20-23 */ 345 uint32_t compq_addr_lo; /* 20-23 */
447 uint32_t ComplQAddrHi; /* 24-27 */ 346 uint32_t compq_addr_hi; /* 24-27 */
448 uint32_t ShadowRegBufAddrLo; /* 28-2B */ 347 uint32_t shdwreg_addr_lo; /* 28-2B */
449 uint32_t ShadowRegBufAddrHi; /* 2C-2F */ 348 uint32_t shdwreg_addr_hi; /* 2C-2F */
450 349
451 uint16_t iSCSIOptions; /* 30-31 */ 350 uint16_t iscsi_opts; /* 30-31 */
452 351 uint16_t ipv4_tcp_opts; /* 32-33 */
453 uint16_t TCPOptions; /* 32-33 */ 352 uint16_t ipv4_ip_opts; /* 34-35 */
454 353
455 uint16_t IPOptions; /* 34-35 */ 354 uint16_t iscsi_max_pdu_size; /* 36-37 */
456 355 uint8_t ipv4_tos; /* 38 */
457 uint16_t MaxPDUSize; /* 36-37 */ 356 uint8_t ipv4_ttl; /* 39 */
458 uint16_t RcvMarkerInt; /* 38-39 */ 357 uint8_t acb_version; /* 3A */
459 uint16_t SndMarkerInt; /* 3A-3B */ 358 uint8_t res2; /* 3B */
460 uint16_t InitMarkerlessInt; /* 3C-3D */ 359 uint16_t def_timeout; /* 3C-3D */
461 uint16_t FirstBurstSize; /* 3E-3F */ 360 uint16_t iscsi_fburst_len; /* 3E-3F */
462 uint16_t DefaultTime2Wait; /* 40-41 */ 361 uint16_t iscsi_def_time2wait; /* 40-41 */
463 uint16_t DefaultTime2Retain; /* 42-43 */ 362 uint16_t iscsi_def_time2retain; /* 42-43 */
464 uint16_t MaxOutStndngR2T; /* 44-45 */ 363 uint16_t iscsi_max_outstnd_r2t; /* 44-45 */
465 uint16_t KeepAliveTimeout; /* 46-47 */ 364 uint16_t conn_ka_timeout; /* 46-47 */
466 uint16_t PortNumber; /* 48-49 */ 365 uint16_t ipv4_port; /* 48-49 */
467 uint16_t MaxBurstSize; /* 4A-4B */ 366 uint16_t iscsi_max_burst_len; /* 4A-4B */
468 uint32_t RES4; /* 4C-4F */ 367 uint32_t res5; /* 4C-4F */
469 uint8_t IPAddr[4]; /* 50-53 */ 368 uint8_t ipv4_addr[4]; /* 50-53 */
470 uint8_t RES5[12]; /* 54-5F */ 369 uint16_t ipv4_vlan_tag; /* 54-55 */
471 uint8_t SubnetMask[4]; /* 60-63 */ 370 uint8_t ipv4_addr_state; /* 56 */
472 uint8_t RES6[12]; /* 64-6F */ 371 uint8_t ipv4_cacheid; /* 57 */
473 uint8_t GatewayIPAddr[4]; /* 70-73 */ 372 uint8_t res6[8]; /* 58-5F */
474 uint8_t RES7[12]; /* 74-7F */ 373 uint8_t ipv4_subnet[4]; /* 60-63 */
475 uint8_t PriDNSIPAddr[4]; /* 80-83 */ 374 uint8_t res7[12]; /* 64-6F */
476 uint8_t SecDNSIPAddr[4]; /* 84-87 */ 375 uint8_t ipv4_gw_addr[4]; /* 70-73 */
477 uint8_t RES8[8]; /* 88-8F */ 376 uint8_t res8[0xc]; /* 74-7F */
478 uint8_t Alias[32]; /* 90-AF */ 377 uint8_t pri_dns_srvr_ip[4];/* 80-83 */
479 uint8_t TargAddr[8]; /* B0-B7 *//* /FIXME: Remove?? */ 378 uint8_t sec_dns_srvr_ip[4];/* 84-87 */
480 uint8_t CHAPNameSecretsTable[8]; /* B8-BF */ 379 uint16_t min_eph_port; /* 88-89 */
481 uint8_t EthernetMACAddr[6]; /* C0-C5 */ 380 uint16_t max_eph_port; /* 8A-8B */
482 uint16_t TargetPortalGroup; /* C6-C7 */ 381 uint8_t res9[4]; /* 8C-8F */
483 uint8_t SendScale; /* C8 */ 382 uint8_t iscsi_alias[32];/* 90-AF */
484 uint8_t RecvScale; /* C9 */ 383 uint8_t res9_1[0x16]; /* B0-C5 */
485 uint8_t TypeOfService; /* CA */ 384 uint16_t tgt_portal_grp;/* C6-C7 */
486 uint8_t Time2Live; /* CB */ 385 uint8_t abort_timer; /* C8 */
487 uint16_t VLANPriority; /* CC-CD */ 386 uint8_t ipv4_tcp_wsf; /* C9 */
488 uint16_t Reserved8; /* CE-CF */ 387 uint8_t res10[6]; /* CA-CF */
489 uint8_t SecIPAddr[4]; /* D0-D3 */ 388 uint8_t ipv4_sec_ip_addr[4]; /* D0-D3 */
490 uint8_t Reserved9[12]; /* D4-DF */ 389 uint8_t ipv4_dhcp_vid_len; /* D4 */
491 uint8_t iSNSIPAddr[4]; /* E0-E3 */ 390 uint8_t ipv4_dhcp_vid[11]; /* D5-DF */
492 uint16_t iSNSServerPortNumber; /* E4-E5 */ 391 uint8_t res11[20]; /* E0-F3 */
493 uint8_t Reserved10[10]; /* E6-EF */ 392 uint8_t ipv4_dhcp_alt_cid_len; /* F4 */
494 uint8_t SLPDAIPAddr[4]; /* F0-F3 */ 393 uint8_t ipv4_dhcp_alt_cid[11]; /* F5-FF */
495 uint8_t Reserved11[12]; /* F4-FF */ 394 uint8_t iscsi_name[224]; /* 100-1DF */
496 uint8_t iSCSINameString[256]; /* 100-1FF */ 395 uint8_t res12[32]; /* 1E0-1FF */
396 uint32_t cookie; /* 200-203 */
397 uint16_t ipv6_port; /* 204-205 */
398 uint16_t ipv6_opts; /* 206-207 */
399 uint16_t ipv6_addtl_opts; /* 208-209 */
400 uint16_t ipv6_tcp_opts; /* 20A-20B */
401 uint8_t ipv6_tcp_wsf; /* 20C */
402 uint16_t ipv6_flow_lbl; /* 20D-20F */
403 uint8_t ipv6_gw_addr[16]; /* 210-21F */
404 uint16_t ipv6_vlan_tag; /* 220-221 */
405 uint8_t ipv6_lnk_lcl_addr_state;/* 222 */
406 uint8_t ipv6_addr0_state; /* 223 */
407 uint8_t ipv6_addr1_state; /* 224 */
408 uint8_t ipv6_gw_state; /* 225 */
409 uint8_t ipv6_traffic_class; /* 226 */
410 uint8_t ipv6_hop_limit; /* 227 */
411 uint8_t ipv6_if_id[8]; /* 228-22F */
412 uint8_t ipv6_addr0[16]; /* 230-23F */
413 uint8_t ipv6_addr1[16]; /* 240-24F */
414 uint32_t ipv6_nd_reach_time; /* 250-253 */
415 uint32_t ipv6_nd_rexmit_timer; /* 254-257 */
416 uint32_t ipv6_nd_stale_timeout; /* 258-25B */
417 uint8_t ipv6_dup_addr_detect_count; /* 25C */
418 uint8_t ipv6_cache_id; /* 25D */
419 uint8_t res13[18]; /* 25E-26F */
420 uint32_t ipv6_gw_advrt_mtu; /* 270-273 */
421 uint8_t res14[140]; /* 274-2FF */
422};
423
424struct init_fw_ctrl_blk {
425 struct addr_ctrl_blk pri;
426 struct addr_ctrl_blk sec;
497}; 427};
498 428
499/*************************************************************************/ 429/*************************************************************************/
500 430
501struct dev_db_entry { 431struct dev_db_entry {
502 uint8_t options; /* 00 */ 432 uint16_t options; /* 00-01 */
503#define DDB_OPT_DISC_SESSION 0x10 433#define DDB_OPT_DISC_SESSION 0x10
504#define DDB_OPT_TARGET 0x02 /* device is a target */ 434#define DDB_OPT_TARGET 0x02 /* device is a target */
505 435
506 uint8_t control; /* 01 */ 436 uint16_t exec_throttle; /* 02-03 */
507 437 uint16_t exec_count; /* 04-05 */
508 uint16_t exeThrottle; /* 02-03 */ 438 uint16_t res0; /* 06-07 */
509 uint16_t exeCount; /* 04-05 */ 439 uint16_t iscsi_options; /* 08-09 */
510 uint8_t retryCount; /* 06 */ 440 uint16_t tcp_options; /* 0A-0B */
511 uint8_t retryDelay; /* 07 */ 441 uint16_t ip_options; /* 0C-0D */
512 uint16_t iSCSIOptions; /* 08-09 */ 442 uint16_t iscsi_max_rcv_data_seg_len; /* 0E-0F */
513 443 uint32_t res1; /* 10-13 */
514 uint16_t TCPOptions; /* 0A-0B */ 444 uint16_t iscsi_max_snd_data_seg_len; /* 14-15 */
515 445 uint16_t iscsi_first_burst_len; /* 16-17 */
516 uint16_t IPOptions; /* 0C-0D */ 446 uint16_t iscsi_def_time2wait; /* 18-19 */
517 447 uint16_t iscsi_def_time2retain; /* 1A-1B */
518 uint16_t maxPDUSize; /* 0E-0F */ 448 uint16_t iscsi_max_outsnd_r2t; /* 1C-1D */
519 uint16_t rcvMarkerInt; /* 10-11 */ 449 uint16_t ka_timeout; /* 1E-1F */
520 uint16_t sndMarkerInt; /* 12-13 */ 450 uint8_t isid[6]; /* 20-25 big-endian, must be converted
521 uint16_t iSCSIMaxSndDataSegLen; /* 14-15 */
522 uint16_t firstBurstSize; /* 16-17 */
523 uint16_t minTime2Wait; /* 18-19 : RA :default_time2wait */
524 uint16_t maxTime2Retain; /* 1A-1B */
525 uint16_t maxOutstndngR2T; /* 1C-1D */
526 uint16_t keepAliveTimeout; /* 1E-1F */
527 uint8_t ISID[6]; /* 20-25 big-endian, must be converted
528 * to little-endian */ 451 * to little-endian */
529 uint16_t TSID; /* 26-27 */ 452 uint16_t tsid; /* 26-27 */
530 uint16_t portNumber; /* 28-29 */ 453 uint16_t port; /* 28-29 */
531 uint16_t maxBurstSize; /* 2A-2B */ 454 uint16_t iscsi_max_burst_len; /* 2A-2B */
532 uint16_t taskMngmntTimeout; /* 2C-2D */ 455 uint16_t def_timeout; /* 2C-2D */
533 uint16_t reserved1; /* 2E-2F */ 456 uint16_t res2; /* 2E-2F */
534 uint8_t ipAddr[0x10]; /* 30-3F */ 457 uint8_t ip_addr[0x10]; /* 30-3F */
535 uint8_t iSCSIAlias[0x20]; /* 40-5F */ 458 uint8_t iscsi_alias[0x20]; /* 40-5F */
536 uint8_t targetAddr[0x20]; /* 60-7F */ 459 uint8_t tgt_addr[0x20]; /* 60-7F */
537 uint8_t userID[0x20]; /* 80-9F */ 460 uint16_t mss; /* 80-81 */
538 uint8_t password[0x20]; /* A0-BF */ 461 uint16_t res3; /* 82-83 */
539 uint8_t iscsiName[0x100]; /* C0-1BF : xxzzy Make this a 462 uint16_t lcl_port; /* 84-85 */
463 uint8_t ipv4_tos; /* 86 */
464 uint16_t ipv6_flow_lbl; /* 87-89 */
465 uint8_t res4[0x36]; /* 8A-BF */
466 uint8_t iscsi_name[0xE0]; /* C0-19F : xxzzy Make this a
540 * pointer to a string so we 467 * pointer to a string so we
541 * don't have to reserve soooo 468 * don't have to reserve soooo
542 * much RAM */ 469 * much RAM */
543 uint16_t ddbLink; /* 1C0-1C1 */ 470 uint8_t ipv6_addr[0x10];/* 1A0-1AF */
544 uint16_t CHAPTableIndex; /* 1C2-1C3 */ 471 uint8_t res5[0x10]; /* 1B0-1BF */
545 uint16_t TargetPortalGroup; /* 1C4-1C5 */ 472 uint16_t ddb_link; /* 1C0-1C1 */
546 uint16_t reserved2[2]; /* 1C6-1C7 */ 473 uint16_t chap_tbl_idx; /* 1C2-1C3 */
547 uint32_t statSN; /* 1C8-1CB */ 474 uint16_t tgt_portal_grp; /* 1C4-1C5 */
548 uint32_t expStatSN; /* 1CC-1CF */ 475 uint8_t tcp_xmt_wsf; /* 1C6 */
549 uint16_t reserved3[0x2C]; /* 1D0-1FB */ 476 uint8_t tcp_rcv_wsf; /* 1C7 */
550 uint16_t ddbValidCookie; /* 1FC-1FD */ 477 uint32_t stat_sn; /* 1C8-1CB */
551 uint16_t ddbValidSize; /* 1FE-1FF */ 478 uint32_t exp_stat_sn; /* 1CC-1CF */
479 uint8_t res6[0x30]; /* 1D0-1FF */
552}; 480};
553 481
554/*************************************************************************/ 482/*************************************************************************/
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 5b00cb04e7c0..a3608e028bf6 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -8,6 +8,9 @@
8#ifndef __QLA4x_GBL_H 8#ifndef __QLA4x_GBL_H
9#define __QLA4x_GBL_H 9#define __QLA4x_GBL_H
10 10
11struct iscsi_cls_conn;
12
13void qla4xxx_hw_reset(struct scsi_qla_host *ha);
11int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a); 14int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a);
12int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port); 15int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port);
13int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb); 16int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb);
@@ -58,11 +61,13 @@ int qla4xxx_get_fw_version(struct scsi_qla_host * ha);
58void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha, 61void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
59 uint32_t intr_status); 62 uint32_t intr_status);
60int qla4xxx_init_rings(struct scsi_qla_host * ha); 63int qla4xxx_init_rings(struct scsi_qla_host * ha);
61struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha, uint32_t index); 64struct srb * qla4xxx_del_from_active_array(struct scsi_qla_host *ha,
65 uint32_t index);
62void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb); 66void qla4xxx_srb_compl(struct scsi_qla_host *ha, struct srb *srb);
63int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha); 67int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host * ha);
64int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha, 68int qla4xxx_process_ddb_changed(struct scsi_qla_host * ha,
65 uint32_t fw_ddb_index, uint32_t state); 69 uint32_t fw_ddb_index, uint32_t state);
70void qla4xxx_dump_buffer(void *b, uint32_t size);
66 71
67extern int ql4xextended_error_logging; 72extern int ql4xextended_error_logging;
68extern int ql4xdiscoverywait; 73extern int ql4xdiscoverywait;
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 6365df268612..d8c064c2afc3 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -6,6 +6,9 @@
6 */ 6 */
7 7
8#include "ql4_def.h" 8#include "ql4_def.h"
9#include "ql4_glbl.h"
10#include "ql4_dbg.h"
11#include "ql4_inline.h"
9 12
10static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha, 13static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha,
11 uint32_t fw_ddb_index); 14 uint32_t fw_ddb_index);
@@ -300,12 +303,12 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
300 if (!qla4xxx_fw_ready(ha)) 303 if (!qla4xxx_fw_ready(ha))
301 return status; 304 return status;
302 305
303 set_bit(AF_ONLINE, &ha->flags);
304 return qla4xxx_get_firmware_status(ha); 306 return qla4xxx_get_firmware_status(ha);
305} 307}
306 308
307static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha, 309static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
308 uint32_t fw_ddb_index) 310 uint32_t fw_ddb_index,
311 uint32_t *new_tgt)
309{ 312{
310 struct dev_db_entry *fw_ddb_entry = NULL; 313 struct dev_db_entry *fw_ddb_entry = NULL;
311 dma_addr_t fw_ddb_entry_dma; 314 dma_addr_t fw_ddb_entry_dma;
@@ -313,6 +316,7 @@ static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
313 int found = 0; 316 int found = 0;
314 uint32_t device_state; 317 uint32_t device_state;
315 318
319 *new_tgt = 0;
316 /* Make sure the dma buffer is valid */ 320 /* Make sure the dma buffer is valid */
317 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, 321 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev,
318 sizeof(*fw_ddb_entry), 322 sizeof(*fw_ddb_entry),
@@ -337,7 +341,7 @@ static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
337 DEBUG2(printk("scsi%ld: %s: Looking for ddb[%d]\n", ha->host_no, 341 DEBUG2(printk("scsi%ld: %s: Looking for ddb[%d]\n", ha->host_no,
338 __func__, fw_ddb_index)); 342 __func__, fw_ddb_index));
339 list_for_each_entry(ddb_entry, &ha->ddb_list, list) { 343 list_for_each_entry(ddb_entry, &ha->ddb_list, list) {
340 if (memcmp(ddb_entry->iscsi_name, fw_ddb_entry->iscsiName, 344 if (memcmp(ddb_entry->iscsi_name, fw_ddb_entry->iscsi_name,
341 ISCSI_NAME_SIZE) == 0) { 345 ISCSI_NAME_SIZE) == 0) {
342 found++; 346 found++;
343 break; 347 break;
@@ -348,6 +352,7 @@ static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha,
348 DEBUG2(printk("scsi%ld: %s: ddb[%d] not found - allocating " 352 DEBUG2(printk("scsi%ld: %s: ddb[%d] not found - allocating "
349 "new ddb\n", ha->host_no, __func__, 353 "new ddb\n", ha->host_no, __func__,
350 fw_ddb_index)); 354 fw_ddb_index));
355 *new_tgt = 1;
351 ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index); 356 ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index);
352 } 357 }
353 358
@@ -409,26 +414,26 @@ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha,
409 } 414 }
410 415
411 status = QLA_SUCCESS; 416 status = QLA_SUCCESS;
412 ddb_entry->target_session_id = le16_to_cpu(fw_ddb_entry->TSID); 417 ddb_entry->target_session_id = le16_to_cpu(fw_ddb_entry->tsid);
413 ddb_entry->task_mgmt_timeout = 418 ddb_entry->task_mgmt_timeout =
414 le16_to_cpu(fw_ddb_entry->taskMngmntTimeout); 419 le16_to_cpu(fw_ddb_entry->def_timeout);
415 ddb_entry->CmdSn = 0; 420 ddb_entry->CmdSn = 0;
416 ddb_entry->exe_throttle = le16_to_cpu(fw_ddb_entry->exeThrottle); 421 ddb_entry->exe_throttle = le16_to_cpu(fw_ddb_entry->exec_throttle);
417 ddb_entry->default_relogin_timeout = 422 ddb_entry->default_relogin_timeout =
418 le16_to_cpu(fw_ddb_entry->taskMngmntTimeout); 423 le16_to_cpu(fw_ddb_entry->def_timeout);
419 ddb_entry->default_time2wait = le16_to_cpu(fw_ddb_entry->minTime2Wait); 424 ddb_entry->default_time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait);
420 425
421 /* Update index in case it changed */ 426 /* Update index in case it changed */
422 ddb_entry->fw_ddb_index = fw_ddb_index; 427 ddb_entry->fw_ddb_index = fw_ddb_index;
423 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry; 428 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
424 429
425 ddb_entry->port = le16_to_cpu(fw_ddb_entry->portNumber); 430 ddb_entry->port = le16_to_cpu(fw_ddb_entry->port);
426 ddb_entry->tpgt = le32_to_cpu(fw_ddb_entry->TargetPortalGroup); 431 ddb_entry->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp);
427 memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsiName[0], 432 memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsi_name[0],
428 min(sizeof(ddb_entry->iscsi_name), 433 min(sizeof(ddb_entry->iscsi_name),
429 sizeof(fw_ddb_entry->iscsiName))); 434 sizeof(fw_ddb_entry->iscsi_name)));
430 memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ipAddr[0], 435 memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ip_addr[0],
431 min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ipAddr))); 436 min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ip_addr)));
432 437
433 DEBUG2(printk("scsi%ld: %s: ddb[%d] - State= %x status= %d.\n", 438 DEBUG2(printk("scsi%ld: %s: ddb[%d] - State= %x status= %d.\n",
434 ha->host_no, __func__, fw_ddb_index, 439 ha->host_no, __func__, fw_ddb_index,
@@ -495,6 +500,7 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
495 uint32_t ddb_state; 500 uint32_t ddb_state;
496 uint32_t conn_err, err_code; 501 uint32_t conn_err, err_code;
497 struct ddb_entry *ddb_entry; 502 struct ddb_entry *ddb_entry;
503 uint32_t new_tgt;
498 504
499 dev_info(&ha->pdev->dev, "Initializing DDBs ...\n"); 505 dev_info(&ha->pdev->dev, "Initializing DDBs ...\n");
500 for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; 506 for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES;
@@ -526,8 +532,19 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
526 "completed " 532 "completed "
527 "or access denied failure\n", 533 "or access denied failure\n",
528 ha->host_no, __func__)); 534 ha->host_no, __func__));
529 } else 535 } else {
530 qla4xxx_set_ddb_entry(ha, fw_ddb_index, 0); 536 qla4xxx_set_ddb_entry(ha, fw_ddb_index, 0);
537 if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index,
538 NULL, 0, NULL, &next_fw_ddb_index,
539 &ddb_state, &conn_err, NULL, NULL)
540 == QLA_ERROR) {
541 DEBUG2(printk("scsi%ld: %s:"
542 "get_ddb_entry %d failed\n",
543 ha->host_no,
544 __func__, fw_ddb_index));
545 return QLA_ERROR;
546 }
547 }
531 } 548 }
532 549
533 if (ddb_state != DDB_DS_SESSION_ACTIVE) 550 if (ddb_state != DDB_DS_SESSION_ACTIVE)
@@ -540,7 +557,7 @@ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha)
540 ha->host_no, __func__, fw_ddb_index)); 557 ha->host_no, __func__, fw_ddb_index));
541 558
542 /* Add DDB to internal our ddb list. */ 559 /* Add DDB to internal our ddb list. */
543 ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index); 560 ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index, &new_tgt);
544 if (ddb_entry == NULL) { 561 if (ddb_entry == NULL) {
545 DEBUG2(printk("scsi%ld: %s: Unable to allocate memory " 562 DEBUG2(printk("scsi%ld: %s: Unable to allocate memory "
546 "for device at fw_ddb_index %d\n", 563 "for device at fw_ddb_index %d\n",
@@ -865,21 +882,19 @@ static int qla4xxx_config_nvram(struct scsi_qla_host *ha)
865 882
866static void qla4x00_pci_config(struct scsi_qla_host *ha) 883static void qla4x00_pci_config(struct scsi_qla_host *ha)
867{ 884{
868 uint16_t w, mwi; 885 uint16_t w;
869 886
870 dev_info(&ha->pdev->dev, "Configuring PCI space...\n"); 887 dev_info(&ha->pdev->dev, "Configuring PCI space...\n");
871 888
872 pci_set_master(ha->pdev); 889 pci_set_master(ha->pdev);
873 mwi = 0; 890 pci_set_mwi(ha->pdev);
874 if (pci_set_mwi(ha->pdev))
875 mwi = PCI_COMMAND_INVALIDATE;
876 /* 891 /*
877 * We want to respect framework's setting of PCI configuration space 892 * We want to respect framework's setting of PCI configuration space
878 * command register and also want to make sure that all bits of 893 * command register and also want to make sure that all bits of
879 * interest to us are properly set in command register. 894 * interest to us are properly set in command register.
880 */ 895 */
881 pci_read_config_word(ha->pdev, PCI_COMMAND, &w); 896 pci_read_config_word(ha->pdev, PCI_COMMAND, &w);
882 w |= mwi | (PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 897 w |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
883 w &= ~PCI_COMMAND_INTX_DISABLE; 898 w &= ~PCI_COMMAND_INTX_DISABLE;
884 pci_write_config_word(ha->pdev, PCI_COMMAND, w); 899 pci_write_config_word(ha->pdev, PCI_COMMAND, w);
885} 900}
@@ -911,6 +926,9 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
911 writel(set_rmask(NVR_WRITE_ENABLE), 926 writel(set_rmask(NVR_WRITE_ENABLE),
912 &ha->reg->u1.isp4022.nvram); 927 &ha->reg->u1.isp4022.nvram);
913 928
929 writel(2, &ha->reg->mailbox[6]);
930 readl(&ha->reg->mailbox[6]);
931
914 writel(set_rmask(CSR_BOOT_ENABLE), &ha->reg->ctrl_status); 932 writel(set_rmask(CSR_BOOT_ENABLE), &ha->reg->ctrl_status);
915 readl(&ha->reg->ctrl_status); 933 readl(&ha->reg->ctrl_status);
916 spin_unlock_irqrestore(&ha->hardware_lock, flags); 934 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -958,25 +976,25 @@ static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha)
958 return status; 976 return status;
959} 977}
960 978
961int ql4xxx_lock_drvr_wait(struct scsi_qla_host *ha) 979int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a)
962{ 980{
963#define QL4_LOCK_DRVR_WAIT 30 981#define QL4_LOCK_DRVR_WAIT 60
964#define QL4_LOCK_DRVR_SLEEP 1 982#define QL4_LOCK_DRVR_SLEEP 1
965 983
966 int drvr_wait = QL4_LOCK_DRVR_WAIT; 984 int drvr_wait = QL4_LOCK_DRVR_WAIT;
967 while (drvr_wait) { 985 while (drvr_wait) {
968 if (ql4xxx_lock_drvr(ha) == 0) { 986 if (ql4xxx_lock_drvr(a) == 0) {
969 ssleep(QL4_LOCK_DRVR_SLEEP); 987 ssleep(QL4_LOCK_DRVR_SLEEP);
970 if (drvr_wait) { 988 if (drvr_wait) {
971 DEBUG2(printk("scsi%ld: %s: Waiting for " 989 DEBUG2(printk("scsi%ld: %s: Waiting for "
972 "Global Init Semaphore(%d)...n", 990 "Global Init Semaphore(%d)...\n",
973 ha->host_no, 991 a->host_no,
974 __func__, drvr_wait)); 992 __func__, drvr_wait));
975 } 993 }
976 drvr_wait -= QL4_LOCK_DRVR_SLEEP; 994 drvr_wait -= QL4_LOCK_DRVR_SLEEP;
977 } else { 995 } else {
978 DEBUG2(printk("scsi%ld: %s: Global Init Semaphore " 996 DEBUG2(printk("scsi%ld: %s: Global Init Semaphore "
979 "acquired.n", ha->host_no, __func__)); 997 "acquired\n", a->host_no, __func__));
980 return QLA_SUCCESS; 998 return QLA_SUCCESS;
981 } 999 }
982 } 1000 }
@@ -1142,8 +1160,10 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
1142 * the ddb_list and wait for DHCP lease acquired aen to come in 1160 * the ddb_list and wait for DHCP lease acquired aen to come in
1143 * followed by 0x8014 aen" to trigger the tgt discovery process. 1161 * followed by 0x8014 aen" to trigger the tgt discovery process.
1144 */ 1162 */
1145 if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS) 1163 if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS){
1164 set_bit(AF_ONLINE, &ha->flags);
1146 return status; 1165 return status;
1166 }
1147 1167
1148 /* Skip device discovery if ip and subnet is zero */ 1168 /* Skip device discovery if ip and subnet is zero */
1149 if (memcmp(ha->ip_address, ip_address, IP_ADDR_LEN) == 0 || 1169 if (memcmp(ha->ip_address, ip_address, IP_ADDR_LEN) == 0 ||
@@ -1177,6 +1197,7 @@ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha,
1177 ha->host_no)); 1197 ha->host_no));
1178 } 1198 }
1179 1199
1200 set_bit(AF_ONLINE, &ha->flags);
1180 exit_init_hba: 1201 exit_init_hba:
1181 return status; 1202 return status;
1182 1203
@@ -1193,9 +1214,10 @@ static void qla4xxx_add_device_dynamically(struct scsi_qla_host *ha,
1193 uint32_t fw_ddb_index) 1214 uint32_t fw_ddb_index)
1194{ 1215{
1195 struct ddb_entry * ddb_entry; 1216 struct ddb_entry * ddb_entry;
1217 uint32_t new_tgt;
1196 1218
1197 /* First allocate a device structure */ 1219 /* First allocate a device structure */
1198 ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index); 1220 ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index, &new_tgt);
1199 if (ddb_entry == NULL) { 1221 if (ddb_entry == NULL) {
1200 DEBUG2(printk(KERN_WARNING 1222 DEBUG2(printk(KERN_WARNING
1201 "scsi%ld: Unable to allocate memory to add " 1223 "scsi%ld: Unable to allocate memory to add "
@@ -1203,6 +1225,18 @@ static void qla4xxx_add_device_dynamically(struct scsi_qla_host *ha,
1203 return; 1225 return;
1204 } 1226 }
1205 1227
1228 if (!new_tgt && (ddb_entry->fw_ddb_index != fw_ddb_index)) {
1229 /* Target has been bound to a new fw_ddb_index */
1230 qla4xxx_free_ddb(ha, ddb_entry);
1231 ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index);
1232 if (ddb_entry == NULL) {
1233 DEBUG2(printk(KERN_WARNING
1234 "scsi%ld: Unable to allocate memory"
1235 " to add fw_ddb_index %d\n",
1236 ha->host_no, fw_ddb_index));
1237 return;
1238 }
1239 }
1206 if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) == 1240 if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) ==
1207 QLA_ERROR) { 1241 QLA_ERROR) {
1208 ha->fw_ddb_index_map[fw_ddb_index] = 1242 ha->fw_ddb_index_map[fw_ddb_index] =
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index a216a1781afb..5006ecb3ef5e 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -6,6 +6,10 @@
6 */ 6 */
7 7
8#include "ql4_def.h" 8#include "ql4_def.h"
9#include "ql4_glbl.h"
10#include "ql4_dbg.h"
11#include "ql4_inline.h"
12
9 13
10#include <scsi/scsi_tcq.h> 14#include <scsi/scsi_tcq.h>
11 15
@@ -141,11 +145,13 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
141 uint16_t avail_dsds; 145 uint16_t avail_dsds;
142 struct data_seg_a64 *cur_dsd; 146 struct data_seg_a64 *cur_dsd;
143 struct scsi_cmnd *cmd; 147 struct scsi_cmnd *cmd;
148 struct scatterlist *sg;
149 int i;
144 150
145 cmd = srb->cmd; 151 cmd = srb->cmd;
146 ha = srb->ha; 152 ha = srb->ha;
147 153
148 if (cmd->request_bufflen == 0 || cmd->sc_data_direction == DMA_NONE) { 154 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
149 /* No data being transferred */ 155 /* No data being transferred */
150 cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0); 156 cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
151 return; 157 return;
@@ -154,40 +160,27 @@ static void qla4xxx_build_scsi_iocbs(struct srb *srb,
154 avail_dsds = COMMAND_SEG; 160 avail_dsds = COMMAND_SEG;
155 cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]); 161 cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
156 162
157 /* Load data segments */ 163 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
158 if (cmd->use_sg) { 164 dma_addr_t sle_dma;
159 struct scatterlist *cur_seg; 165
160 struct scatterlist *end_seg; 166 /* Allocate additional continuation packets? */
161 167 if (avail_dsds == 0) {
162 cur_seg = (struct scatterlist *)cmd->request_buffer; 168 struct continuation_t1_entry *cont_entry;
163 end_seg = cur_seg + tot_dsds; 169
164 while (cur_seg < end_seg) { 170 cont_entry = qla4xxx_alloc_cont_entry(ha);
165 dma_addr_t sle_dma; 171 cur_dsd =
166 172 (struct data_seg_a64 *)
167 /* Allocate additional continuation packets? */ 173 &cont_entry->dataseg[0];
168 if (avail_dsds == 0) { 174 avail_dsds = CONTINUE_SEG;
169 struct continuation_t1_entry *cont_entry;
170
171 cont_entry = qla4xxx_alloc_cont_entry(ha);
172 cur_dsd =
173 (struct data_seg_a64 *)
174 &cont_entry->dataseg[0];
175 avail_dsds = CONTINUE_SEG;
176 }
177
178 sle_dma = sg_dma_address(cur_seg);
179 cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
180 cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
181 cur_dsd->count = cpu_to_le32(sg_dma_len(cur_seg));
182 avail_dsds--;
183
184 cur_dsd++;
185 cur_seg++;
186 } 175 }
187 } else { 176
188 cur_dsd->base.addrLow = cpu_to_le32(LSDW(srb->dma_handle)); 177 sle_dma = sg_dma_address(sg);
189 cur_dsd->base.addrHigh = cpu_to_le32(MSDW(srb->dma_handle)); 178 cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
190 cur_dsd->count = cpu_to_le32(cmd->request_bufflen); 179 cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
180 cur_dsd->count = cpu_to_le32(sg_dma_len(sg));
181 avail_dsds--;
182
183 cur_dsd++;
191 } 184 }
192} 185}
193 186
@@ -204,8 +197,8 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
204 struct scsi_cmnd *cmd = srb->cmd; 197 struct scsi_cmnd *cmd = srb->cmd;
205 struct ddb_entry *ddb_entry; 198 struct ddb_entry *ddb_entry;
206 struct command_t3_entry *cmd_entry; 199 struct command_t3_entry *cmd_entry;
207 struct scatterlist *sg = NULL;
208 200
201 int nseg;
209 uint16_t tot_dsds; 202 uint16_t tot_dsds;
210 uint16_t req_cnt; 203 uint16_t req_cnt;
211 204
@@ -233,24 +226,11 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
233 index = (uint32_t)cmd->request->tag; 226 index = (uint32_t)cmd->request->tag;
234 227
235 /* Calculate the number of request entries needed. */ 228 /* Calculate the number of request entries needed. */
236 if (cmd->use_sg) { 229 nseg = scsi_dma_map(cmd);
237 sg = (struct scatterlist *)cmd->request_buffer; 230 if (nseg < 0)
238 tot_dsds = pci_map_sg(ha->pdev, sg, cmd->use_sg, 231 goto queuing_error;
239 cmd->sc_data_direction); 232 tot_dsds = nseg;
240 if (tot_dsds == 0) 233
241 goto queuing_error;
242 } else if (cmd->request_bufflen) {
243 dma_addr_t req_dma;
244
245 req_dma = pci_map_single(ha->pdev, cmd->request_buffer,
246 cmd->request_bufflen,
247 cmd->sc_data_direction);
248 if (dma_mapping_error(req_dma))
249 goto queuing_error;
250
251 srb->dma_handle = req_dma;
252 tot_dsds = 1;
253 }
254 req_cnt = qla4xxx_calc_request_entries(tot_dsds); 234 req_cnt = qla4xxx_calc_request_entries(tot_dsds);
255 235
256 if (ha->req_q_count < (req_cnt + 2)) { 236 if (ha->req_q_count < (req_cnt + 2)) {
@@ -279,7 +259,7 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
279 259
280 int_to_scsilun(cmd->device->lun, &cmd_entry->lun); 260 int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
281 cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn); 261 cmd_entry->cmdSeqNum = cpu_to_le32(ddb_entry->CmdSn);
282 cmd_entry->ttlByteCnt = cpu_to_le32(cmd->request_bufflen); 262 cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
283 memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len); 263 memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
284 cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds); 264 cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
285 cmd_entry->hdr.entryCount = req_cnt; 265 cmd_entry->hdr.entryCount = req_cnt;
@@ -289,13 +269,13 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
289 * transferred, as the data direction bit is sometimed filled 269 * transferred, as the data direction bit is sometimed filled
290 * in when there is no data to be transferred */ 270 * in when there is no data to be transferred */
291 cmd_entry->control_flags = CF_NO_DATA; 271 cmd_entry->control_flags = CF_NO_DATA;
292 if (cmd->request_bufflen) { 272 if (scsi_bufflen(cmd)) {
293 if (cmd->sc_data_direction == DMA_TO_DEVICE) 273 if (cmd->sc_data_direction == DMA_TO_DEVICE)
294 cmd_entry->control_flags = CF_WRITE; 274 cmd_entry->control_flags = CF_WRITE;
295 else if (cmd->sc_data_direction == DMA_FROM_DEVICE) 275 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
296 cmd_entry->control_flags = CF_READ; 276 cmd_entry->control_flags = CF_READ;
297 277
298 ha->bytes_xfered += cmd->request_bufflen; 278 ha->bytes_xfered += scsi_bufflen(cmd);
299 if (ha->bytes_xfered & ~0xFFFFF){ 279 if (ha->bytes_xfered & ~0xFFFFF){
300 ha->total_mbytes_xferred += ha->bytes_xfered >> 20; 280 ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
301 ha->bytes_xfered &= 0xFFFFF; 281 ha->bytes_xfered &= 0xFFFFF;
@@ -359,14 +339,9 @@ int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
359 return QLA_SUCCESS; 339 return QLA_SUCCESS;
360 340
361queuing_error: 341queuing_error:
342 if (tot_dsds)
343 scsi_dma_unmap(cmd);
362 344
363 if (cmd->use_sg && tot_dsds) {
364 sg = (struct scatterlist *) cmd->request_buffer;
365 pci_unmap_sg(ha->pdev, sg, cmd->use_sg,
366 cmd->sc_data_direction);
367 } else if (tot_dsds)
368 pci_unmap_single(ha->pdev, srb->dma_handle,
369 cmd->request_bufflen, cmd->sc_data_direction);
370 spin_unlock_irqrestore(&ha->hardware_lock, flags); 345 spin_unlock_irqrestore(&ha->hardware_lock, flags);
371 346
372 return QLA_ERROR; 347 return QLA_ERROR;
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 35b9e36a0e8d..b47bd85f114d 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -6,6 +6,9 @@
6 */ 6 */
7 7
8#include "ql4_def.h" 8#include "ql4_def.h"
9#include "ql4_glbl.h"
10#include "ql4_dbg.h"
11#include "ql4_inline.h"
9 12
10/** 13/**
11 * qla2x00_process_completed_request() - Process a Fast Post response. 14 * qla2x00_process_completed_request() - Process a Fast Post response.
@@ -92,7 +95,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
92 95
93 if (sts_entry->iscsiFlags & 96 if (sts_entry->iscsiFlags &
94 (ISCSI_FLAG_RESIDUAL_OVER|ISCSI_FLAG_RESIDUAL_UNDER)) 97 (ISCSI_FLAG_RESIDUAL_OVER|ISCSI_FLAG_RESIDUAL_UNDER))
95 cmd->resid = residual; 98 scsi_set_resid(cmd, residual);
96 99
97 cmd->result = DID_OK << 16 | scsi_status; 100 cmd->result = DID_OK << 16 | scsi_status;
98 101
@@ -176,14 +179,14 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
176 * Firmware detected a SCSI transport underrun 179 * Firmware detected a SCSI transport underrun
177 * condition 180 * condition
178 */ 181 */
179 cmd->resid = residual; 182 scsi_set_resid(cmd, residual);
180 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: UNDERRUN status " 183 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: UNDERRUN status "
181 "detected, xferlen = 0x%x, residual = " 184 "detected, xferlen = 0x%x, residual = "
182 "0x%x\n", 185 "0x%x\n",
183 ha->host_no, cmd->device->channel, 186 ha->host_no, cmd->device->channel,
184 cmd->device->id, 187 cmd->device->id,
185 cmd->device->lun, __func__, 188 cmd->device->lun, __func__,
186 cmd->request_bufflen, 189 scsi_bufflen(cmd),
187 residual)); 190 residual));
188 } 191 }
189 192
@@ -227,7 +230,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
227 if ((sts_entry->iscsiFlags & 230 if ((sts_entry->iscsiFlags &
228 ISCSI_FLAG_RESIDUAL_UNDER) == 0) { 231 ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
229 cmd->result = DID_BUS_BUSY << 16; 232 cmd->result = DID_BUS_BUSY << 16;
230 } else if ((cmd->request_bufflen - residual) < 233 } else if ((scsi_bufflen(cmd) - residual) <
231 cmd->underflow) { 234 cmd->underflow) {
232 /* 235 /*
233 * Handle mid-layer underflow??? 236 * Handle mid-layer underflow???
@@ -248,7 +251,7 @@ static void qla4xxx_status_entry(struct scsi_qla_host *ha,
248 cmd->device->channel, 251 cmd->device->channel,
249 cmd->device->id, 252 cmd->device->id,
250 cmd->device->lun, __func__, 253 cmd->device->lun, __func__,
251 cmd->request_bufflen, residual)); 254 scsi_bufflen(cmd), residual));
252 255
253 cmd->result = DID_ERROR << 16; 256 cmd->result = DID_ERROR << 16;
254 } else { 257 } else {
@@ -417,6 +420,7 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
417 uint32_t mbox_status) 420 uint32_t mbox_status)
418{ 421{
419 int i; 422 int i;
423 uint32_t mbox_stat2, mbox_stat3;
420 424
421 if ((mbox_status == MBOX_STS_BUSY) || 425 if ((mbox_status == MBOX_STS_BUSY) ||
422 (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) || 426 (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
@@ -437,6 +441,12 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
437 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) { 441 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
438 /* Immediately process the AENs that don't require much work. 442 /* Immediately process the AENs that don't require much work.
439 * Only queue the database_changed AENs */ 443 * Only queue the database_changed AENs */
444 if (ha->aen_log.count < MAX_AEN_ENTRIES) {
445 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
446 ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
447 readl(&ha->reg->mailbox[i]);
448 ha->aen_log.count++;
449 }
440 switch (mbox_status) { 450 switch (mbox_status) {
441 case MBOX_ASTS_SYSTEM_ERROR: 451 case MBOX_ASTS_SYSTEM_ERROR:
442 /* Log Mailbox registers */ 452 /* Log Mailbox registers */
@@ -493,6 +503,16 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
493 mbox_status)); 503 mbox_status));
494 break; 504 break;
495 505
506 case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
507 mbox_stat2 = readl(&ha->reg->mailbox[2]);
508 mbox_stat3 = readl(&ha->reg->mailbox[3]);
509
510 if ((mbox_stat3 == 5) && (mbox_stat2 == 3))
511 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
512 else if ((mbox_stat3 == 2) && (mbox_stat2 == 5))
513 set_bit(DPC_RESET_HA, &ha->dpc_flags);
514 break;
515
496 case MBOX_ASTS_MAC_ADDRESS_CHANGED: 516 case MBOX_ASTS_MAC_ADDRESS_CHANGED:
497 case MBOX_ASTS_DNS: 517 case MBOX_ASTS_DNS:
498 /* No action */ 518 /* No action */
@@ -518,11 +538,6 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
518 /* Queue AEN information and process it in the DPC 538 /* Queue AEN information and process it in the DPC
519 * routine */ 539 * routine */
520 if (ha->aen_q_count > 0) { 540 if (ha->aen_q_count > 0) {
521 /* advance pointer */
522 if (ha->aen_in == (MAX_AEN_ENTRIES - 1))
523 ha->aen_in = 0;
524 else
525 ha->aen_in++;
526 541
527 /* decrement available counter */ 542 /* decrement available counter */
528 ha->aen_q_count--; 543 ha->aen_q_count--;
@@ -542,6 +557,10 @@ static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
542 ha->aen_q[ha->aen_in].mbox_sts[2], 557 ha->aen_q[ha->aen_in].mbox_sts[2],
543 ha->aen_q[ha->aen_in].mbox_sts[3], 558 ha->aen_q[ha->aen_in].mbox_sts[3],
544 ha->aen_q[ha->aen_in]. mbox_sts[4])); 559 ha->aen_q[ha->aen_in]. mbox_sts[4]));
560 /* advance pointer */
561 ha->aen_in++;
562 if (ha->aen_in == MAX_AEN_ENTRIES)
563 ha->aen_in = 0;
545 564
546 /* The DPC routine will process the aen */ 565 /* The DPC routine will process the aen */
547 set_bit(DPC_AEN, &ha->dpc_flags); 566 set_bit(DPC_AEN, &ha->dpc_flags);
@@ -724,25 +743,24 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
724 743
725 spin_lock_irqsave(&ha->hardware_lock, flags); 744 spin_lock_irqsave(&ha->hardware_lock, flags);
726 while (ha->aen_out != ha->aen_in) { 745 while (ha->aen_out != ha->aen_in) {
727 /* Advance pointers for next entry */
728 if (ha->aen_out == (MAX_AEN_ENTRIES - 1))
729 ha->aen_out = 0;
730 else
731 ha->aen_out++;
732
733 ha->aen_q_count++;
734 aen = &ha->aen_q[ha->aen_out]; 746 aen = &ha->aen_q[ha->aen_out];
735
736 /* copy aen information to local structure */ 747 /* copy aen information to local structure */
737 for (i = 0; i < MBOX_AEN_REG_COUNT; i++) 748 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
738 mbox_sts[i] = aen->mbox_sts[i]; 749 mbox_sts[i] = aen->mbox_sts[i];
739 750
751 ha->aen_q_count++;
752 ha->aen_out++;
753
754 if (ha->aen_out == MAX_AEN_ENTRIES)
755 ha->aen_out = 0;
756
740 spin_unlock_irqrestore(&ha->hardware_lock, flags); 757 spin_unlock_irqrestore(&ha->hardware_lock, flags);
741 758
742 DEBUG(printk("scsi%ld: AEN[%d] %04x, index [%d] state=%04x " 759 DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x"
743 "mod=%x conerr=%08x \n", ha->host_no, ha->aen_out, 760 " mbx3=0x%08x mbx4=0x%08x\n", ha->host_no,
744 mbox_sts[0], mbox_sts[2], mbox_sts[3], 761 (ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)),
745 mbox_sts[1], mbox_sts[4])); 762 mbox_sts[0], mbox_sts[1], mbox_sts[2],
763 mbox_sts[3], mbox_sts[4]));
746 764
747 switch (mbox_sts[0]) { 765 switch (mbox_sts[0]) {
748 case MBOX_ASTS_DATABASE_CHANGED: 766 case MBOX_ASTS_DATABASE_CHANGED:
@@ -792,6 +810,5 @@ void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
792 spin_lock_irqsave(&ha->hardware_lock, flags); 810 spin_lock_irqsave(&ha->hardware_lock, flags);
793 } 811 }
794 spin_unlock_irqrestore(&ha->hardware_lock, flags); 812 spin_unlock_irqrestore(&ha->hardware_lock, flags);
795
796} 813}
797 814
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index f116ff917237..35cd73c72a68 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -6,6 +6,9 @@
6 */ 6 */
7 7
8#include "ql4_def.h" 8#include "ql4_def.h"
9#include "ql4_glbl.h"
10#include "ql4_dbg.h"
11#include "ql4_inline.h"
9 12
10 13
11/** 14/**
@@ -169,84 +172,6 @@ mbox_exit:
169 return status; 172 return status;
170} 173}
171 174
172
173#if 0
174
175/**
176 * qla4xxx_issue_iocb - issue mailbox iocb command
177 * @ha: adapter state pointer.
178 * @buffer: buffer pointer.
179 * @phys_addr: physical address of buffer.
180 * @size: size of buffer.
181 *
182 * Issues iocbs via mailbox commands.
183 * TARGET_QUEUE_LOCK must be released.
184 * ADAPTER_STATE_LOCK must be released.
185 **/
186int
187qla4xxx_issue_iocb(struct scsi_qla_host * ha, void *buffer,
188 dma_addr_t phys_addr, size_t size)
189{
190 uint32_t mbox_cmd[MBOX_REG_COUNT];
191 uint32_t mbox_sts[MBOX_REG_COUNT];
192 int status;
193
194 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
195 memset(&mbox_sts, 0, sizeof(mbox_sts));
196 mbox_cmd[0] = MBOX_CMD_EXECUTE_IOCB_A64;
197 mbox_cmd[1] = 0;
198 mbox_cmd[2] = LSDW(phys_addr);
199 mbox_cmd[3] = MSDW(phys_addr);
200 status = qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]);
201 return status;
202}
203
204int qla4xxx_conn_close_sess_logout(struct scsi_qla_host * ha,
205 uint16_t fw_ddb_index,
206 uint16_t connection_id,
207 uint16_t option)
208{
209 uint32_t mbox_cmd[MBOX_REG_COUNT];
210 uint32_t mbox_sts[MBOX_REG_COUNT];
211
212 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
213 memset(&mbox_sts, 0, sizeof(mbox_sts));
214 mbox_cmd[0] = MBOX_CMD_CONN_CLOSE_SESS_LOGOUT;
215 mbox_cmd[1] = fw_ddb_index;
216 mbox_cmd[2] = connection_id;
217 mbox_cmd[3] = LOGOUT_OPTION_RELOGIN;
218 if (qla4xxx_mailbox_command(ha, 4, 2, &mbox_cmd[0], &mbox_sts[0]) !=
219 QLA_SUCCESS) {
220 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_CONN_CLOSE_SESS_LOGOUT "
221 "option %04x failed sts %04X %04X",
222 ha->host_no, __func__,
223 option, mbox_sts[0], mbox_sts[1]));
224 if (mbox_sts[0] == 0x4005)
225 DEBUG2(printk("%s reason %04X\n", __func__,
226 mbox_sts[1]));
227 }
228 return QLA_SUCCESS;
229}
230
231int qla4xxx_clear_database_entry(struct scsi_qla_host * ha,
232 uint16_t fw_ddb_index)
233{
234 uint32_t mbox_cmd[MBOX_REG_COUNT];
235 uint32_t mbox_sts[MBOX_REG_COUNT];
236
237 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
238 memset(&mbox_sts, 0, sizeof(mbox_sts));
239 mbox_cmd[0] = MBOX_CMD_CLEAR_DATABASE_ENTRY;
240 mbox_cmd[1] = fw_ddb_index;
241 if (qla4xxx_mailbox_command(ha, 2, 5, &mbox_cmd[0], &mbox_sts[0]) !=
242 QLA_SUCCESS)
243 return QLA_ERROR;
244
245 return QLA_SUCCESS;
246}
247
248#endif /* 0 */
249
250/** 175/**
251 * qla4xxx_initialize_fw_cb - initializes firmware control block. 176 * qla4xxx_initialize_fw_cb - initializes firmware control block.
252 * @ha: Pointer to host adapter structure. 177 * @ha: Pointer to host adapter structure.
@@ -272,10 +197,13 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
272 /* Get Initialize Firmware Control Block. */ 197 /* Get Initialize Firmware Control Block. */
273 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 198 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
274 memset(&mbox_sts, 0, sizeof(mbox_sts)); 199 memset(&mbox_sts, 0, sizeof(mbox_sts));
200
275 mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK; 201 mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
276 mbox_cmd[2] = LSDW(init_fw_cb_dma); 202 mbox_cmd[2] = LSDW(init_fw_cb_dma);
277 mbox_cmd[3] = MSDW(init_fw_cb_dma); 203 mbox_cmd[3] = MSDW(init_fw_cb_dma);
278 if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) != 204 mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
205
206 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
279 QLA_SUCCESS) { 207 QLA_SUCCESS) {
280 dma_free_coherent(&ha->pdev->dev, 208 dma_free_coherent(&ha->pdev->dev,
281 sizeof(struct init_fw_ctrl_blk), 209 sizeof(struct init_fw_ctrl_blk),
@@ -287,51 +215,56 @@ int qla4xxx_initialize_fw_cb(struct scsi_qla_host * ha)
287 qla4xxx_init_rings(ha); 215 qla4xxx_init_rings(ha);
288 216
289 /* Fill in the request and response queue information. */ 217 /* Fill in the request and response queue information. */
290 init_fw_cb->ReqQConsumerIndex = cpu_to_le16(ha->request_out); 218 init_fw_cb->pri.rqq_consumer_idx = cpu_to_le16(ha->request_out);
291 init_fw_cb->ComplQProducerIndex = cpu_to_le16(ha->response_in); 219 init_fw_cb->pri.compq_producer_idx = cpu_to_le16(ha->response_in);
292 init_fw_cb->ReqQLen = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH); 220 init_fw_cb->pri.rqq_len = __constant_cpu_to_le16(REQUEST_QUEUE_DEPTH);
293 init_fw_cb->ComplQLen = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH); 221 init_fw_cb->pri.compq_len = __constant_cpu_to_le16(RESPONSE_QUEUE_DEPTH);
294 init_fw_cb->ReqQAddrLo = cpu_to_le32(LSDW(ha->request_dma)); 222 init_fw_cb->pri.rqq_addr_lo = cpu_to_le32(LSDW(ha->request_dma));
295 init_fw_cb->ReqQAddrHi = cpu_to_le32(MSDW(ha->request_dma)); 223 init_fw_cb->pri.rqq_addr_hi = cpu_to_le32(MSDW(ha->request_dma));
296 init_fw_cb->ComplQAddrLo = cpu_to_le32(LSDW(ha->response_dma)); 224 init_fw_cb->pri.compq_addr_lo = cpu_to_le32(LSDW(ha->response_dma));
297 init_fw_cb->ComplQAddrHi = cpu_to_le32(MSDW(ha->response_dma)); 225 init_fw_cb->pri.compq_addr_hi = cpu_to_le32(MSDW(ha->response_dma));
298 init_fw_cb->ShadowRegBufAddrLo = 226 init_fw_cb->pri.shdwreg_addr_lo =
299 cpu_to_le32(LSDW(ha->shadow_regs_dma)); 227 cpu_to_le32(LSDW(ha->shadow_regs_dma));
300 init_fw_cb->ShadowRegBufAddrHi = 228 init_fw_cb->pri.shdwreg_addr_hi =
301 cpu_to_le32(MSDW(ha->shadow_regs_dma)); 229 cpu_to_le32(MSDW(ha->shadow_regs_dma));
302 230
303 /* Set up required options. */ 231 /* Set up required options. */
304 init_fw_cb->FwOptions |= 232 init_fw_cb->pri.fw_options |=
305 __constant_cpu_to_le16(FWOPT_SESSION_MODE | 233 __constant_cpu_to_le16(FWOPT_SESSION_MODE |
306 FWOPT_INITIATOR_MODE); 234 FWOPT_INITIATOR_MODE);
307 init_fw_cb->FwOptions &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE); 235 init_fw_cb->pri.fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
308 236
309 /* Save some info in adapter structure. */ 237 /* Save some info in adapter structure. */
310 ha->firmware_options = le16_to_cpu(init_fw_cb->FwOptions); 238 ha->firmware_options = le16_to_cpu(init_fw_cb->pri.fw_options);
311 ha->tcp_options = le16_to_cpu(init_fw_cb->TCPOptions); 239 ha->tcp_options = le16_to_cpu(init_fw_cb->pri.ipv4_tcp_opts);
312 ha->heartbeat_interval = init_fw_cb->HeartbeatInterval; 240 ha->heartbeat_interval = init_fw_cb->pri.hb_interval;
313 memcpy(ha->ip_address, init_fw_cb->IPAddr, 241 memcpy(ha->ip_address, init_fw_cb->pri.ipv4_addr,
314 min(sizeof(ha->ip_address), sizeof(init_fw_cb->IPAddr))); 242 min(sizeof(ha->ip_address), sizeof(init_fw_cb->pri.ipv4_addr)));
315 memcpy(ha->subnet_mask, init_fw_cb->SubnetMask, 243 memcpy(ha->subnet_mask, init_fw_cb->pri.ipv4_subnet,
316 min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->SubnetMask))); 244 min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->pri.ipv4_subnet)));
317 memcpy(ha->gateway, init_fw_cb->GatewayIPAddr, 245 memcpy(ha->gateway, init_fw_cb->pri.ipv4_gw_addr,
318 min(sizeof(ha->gateway), sizeof(init_fw_cb->GatewayIPAddr))); 246 min(sizeof(ha->gateway), sizeof(init_fw_cb->pri.ipv4_gw_addr)));
319 memcpy(ha->name_string, init_fw_cb->iSCSINameString, 247 memcpy(ha->name_string, init_fw_cb->pri.iscsi_name,
320 min(sizeof(ha->name_string), 248 min(sizeof(ha->name_string),
321 sizeof(init_fw_cb->iSCSINameString))); 249 sizeof(init_fw_cb->pri.iscsi_name)));
322 memcpy(ha->alias, init_fw_cb->Alias, 250 /*memcpy(ha->alias, init_fw_cb->Alias,
323 min(sizeof(ha->alias), sizeof(init_fw_cb->Alias))); 251 min(sizeof(ha->alias), sizeof(init_fw_cb->Alias)));*/
324 252
325 /* Save Command Line Paramater info */ 253 /* Save Command Line Paramater info */
326 ha->port_down_retry_count = le16_to_cpu(init_fw_cb->KeepAliveTimeout); 254 ha->port_down_retry_count = le16_to_cpu(init_fw_cb->pri.conn_ka_timeout);
327 ha->discovery_wait = ql4xdiscoverywait; 255 ha->discovery_wait = ql4xdiscoverywait;
328 256
329 /* Send Initialize Firmware Control Block. */ 257 /* Send Initialize Firmware Control Block. */
258 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
259 memset(&mbox_sts, 0, sizeof(mbox_sts));
260
330 mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE; 261 mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
331 mbox_cmd[1] = 0; 262 mbox_cmd[1] = 0;
332 mbox_cmd[2] = LSDW(init_fw_cb_dma); 263 mbox_cmd[2] = LSDW(init_fw_cb_dma);
333 mbox_cmd[3] = MSDW(init_fw_cb_dma); 264 mbox_cmd[3] = MSDW(init_fw_cb_dma);
334 if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) == 265 mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
266
267 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) ==
335 QLA_SUCCESS) 268 QLA_SUCCESS)
336 status = QLA_SUCCESS; 269 status = QLA_SUCCESS;
337 else { 270 else {
@@ -368,12 +301,14 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
368 /* Get Initialize Firmware Control Block. */ 301 /* Get Initialize Firmware Control Block. */
369 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 302 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
370 memset(&mbox_sts, 0, sizeof(mbox_sts)); 303 memset(&mbox_sts, 0, sizeof(mbox_sts));
304
371 memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk)); 305 memset(init_fw_cb, 0, sizeof(struct init_fw_ctrl_blk));
372 mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK; 306 mbox_cmd[0] = MBOX_CMD_GET_INIT_FW_CTRL_BLOCK;
373 mbox_cmd[2] = LSDW(init_fw_cb_dma); 307 mbox_cmd[2] = LSDW(init_fw_cb_dma);
374 mbox_cmd[3] = MSDW(init_fw_cb_dma); 308 mbox_cmd[3] = MSDW(init_fw_cb_dma);
309 mbox_cmd[4] = sizeof(struct init_fw_ctrl_blk);
375 310
376 if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) != 311 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
377 QLA_SUCCESS) { 312 QLA_SUCCESS) {
378 DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n", 313 DEBUG2(printk("scsi%ld: %s: Failed to get init_fw_ctrl_blk\n",
379 ha->host_no, __func__)); 314 ha->host_no, __func__));
@@ -384,12 +319,12 @@ int qla4xxx_get_dhcp_ip_address(struct scsi_qla_host * ha)
384 } 319 }
385 320
386 /* Save IP Address. */ 321 /* Save IP Address. */
387 memcpy(ha->ip_address, init_fw_cb->IPAddr, 322 memcpy(ha->ip_address, init_fw_cb->pri.ipv4_addr,
388 min(sizeof(ha->ip_address), sizeof(init_fw_cb->IPAddr))); 323 min(sizeof(ha->ip_address), sizeof(init_fw_cb->pri.ipv4_addr)));
389 memcpy(ha->subnet_mask, init_fw_cb->SubnetMask, 324 memcpy(ha->subnet_mask, init_fw_cb->pri.ipv4_subnet,
390 min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->SubnetMask))); 325 min(sizeof(ha->subnet_mask), sizeof(init_fw_cb->pri.ipv4_subnet)));
391 memcpy(ha->gateway, init_fw_cb->GatewayIPAddr, 326 memcpy(ha->gateway, init_fw_cb->pri.ipv4_gw_addr,
392 min(sizeof(ha->gateway), sizeof(init_fw_cb->GatewayIPAddr))); 327 min(sizeof(ha->gateway), sizeof(init_fw_cb->pri.ipv4_gw_addr)));
393 328
394 dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk), 329 dma_free_coherent(&ha->pdev->dev, sizeof(struct init_fw_ctrl_blk),
395 init_fw_cb, init_fw_cb_dma); 330 init_fw_cb, init_fw_cb_dma);
@@ -409,8 +344,10 @@ int qla4xxx_get_firmware_state(struct scsi_qla_host * ha)
409 /* Get firmware version */ 344 /* Get firmware version */
410 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 345 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
411 memset(&mbox_sts, 0, sizeof(mbox_sts)); 346 memset(&mbox_sts, 0, sizeof(mbox_sts));
347
412 mbox_cmd[0] = MBOX_CMD_GET_FW_STATE; 348 mbox_cmd[0] = MBOX_CMD_GET_FW_STATE;
413 if (qla4xxx_mailbox_command(ha, 1, 4, &mbox_cmd[0], &mbox_sts[0]) != 349
350 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 4, &mbox_cmd[0], &mbox_sts[0]) !=
414 QLA_SUCCESS) { 351 QLA_SUCCESS) {
415 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATE failed w/ " 352 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATE failed w/ "
416 "status %04X\n", ha->host_no, __func__, 353 "status %04X\n", ha->host_no, __func__,
@@ -438,8 +375,10 @@ int qla4xxx_get_firmware_status(struct scsi_qla_host * ha)
438 /* Get firmware version */ 375 /* Get firmware version */
439 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 376 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
440 memset(&mbox_sts, 0, sizeof(mbox_sts)); 377 memset(&mbox_sts, 0, sizeof(mbox_sts));
378
441 mbox_cmd[0] = MBOX_CMD_GET_FW_STATUS; 379 mbox_cmd[0] = MBOX_CMD_GET_FW_STATUS;
442 if (qla4xxx_mailbox_command(ha, 1, 3, &mbox_cmd[0], &mbox_sts[0]) != 380
381 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], &mbox_sts[0]) !=
443 QLA_SUCCESS) { 382 QLA_SUCCESS) {
444 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATUS failed w/ " 383 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_FW_STATUS failed w/ "
445 "status %04X\n", ha->host_no, __func__, 384 "status %04X\n", ha->host_no, __func__,
@@ -491,11 +430,14 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
491 } 430 }
492 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 431 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
493 memset(&mbox_sts, 0, sizeof(mbox_sts)); 432 memset(&mbox_sts, 0, sizeof(mbox_sts));
433
494 mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY; 434 mbox_cmd[0] = MBOX_CMD_GET_DATABASE_ENTRY;
495 mbox_cmd[1] = (uint32_t) fw_ddb_index; 435 mbox_cmd[1] = (uint32_t) fw_ddb_index;
496 mbox_cmd[2] = LSDW(fw_ddb_entry_dma); 436 mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
497 mbox_cmd[3] = MSDW(fw_ddb_entry_dma); 437 mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
498 if (qla4xxx_mailbox_command(ha, 4, 7, &mbox_cmd[0], &mbox_sts[0]) == 438 mbox_cmd[4] = sizeof(struct dev_db_entry);
439
440 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 7, &mbox_cmd[0], &mbox_sts[0]) ==
499 QLA_ERROR) { 441 QLA_ERROR) {
500 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_DATABASE_ENTRY failed" 442 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_GET_DATABASE_ENTRY failed"
501 " with status 0x%04X\n", ha->host_no, __func__, 443 " with status 0x%04X\n", ha->host_no, __func__,
@@ -512,11 +454,11 @@ int qla4xxx_get_fwddb_entry(struct scsi_qla_host *ha,
512 dev_info(&ha->pdev->dev, "DDB[%d] MB0 %04x Tot %d Next %d " 454 dev_info(&ha->pdev->dev, "DDB[%d] MB0 %04x Tot %d Next %d "
513 "State %04x ConnErr %08x %d.%d.%d.%d:%04d \"%s\"\n", 455 "State %04x ConnErr %08x %d.%d.%d.%d:%04d \"%s\"\n",
514 fw_ddb_index, mbox_sts[0], mbox_sts[2], mbox_sts[3], 456 fw_ddb_index, mbox_sts[0], mbox_sts[2], mbox_sts[3],
515 mbox_sts[4], mbox_sts[5], fw_ddb_entry->ipAddr[0], 457 mbox_sts[4], mbox_sts[5], fw_ddb_entry->ip_addr[0],
516 fw_ddb_entry->ipAddr[1], fw_ddb_entry->ipAddr[2], 458 fw_ddb_entry->ip_addr[1], fw_ddb_entry->ip_addr[2],
517 fw_ddb_entry->ipAddr[3], 459 fw_ddb_entry->ip_addr[3],
518 le16_to_cpu(fw_ddb_entry->portNumber), 460 le16_to_cpu(fw_ddb_entry->port),
519 fw_ddb_entry->iscsiName); 461 fw_ddb_entry->iscsi_name);
520 } 462 }
521 if (num_valid_ddb_entries) 463 if (num_valid_ddb_entries)
522 *num_valid_ddb_entries = mbox_sts[2]; 464 *num_valid_ddb_entries = mbox_sts[2];
@@ -571,35 +513,10 @@ int qla4xxx_set_ddb_entry(struct scsi_qla_host * ha, uint16_t fw_ddb_index,
571 mbox_cmd[1] = (uint32_t) fw_ddb_index; 513 mbox_cmd[1] = (uint32_t) fw_ddb_index;
572 mbox_cmd[2] = LSDW(fw_ddb_entry_dma); 514 mbox_cmd[2] = LSDW(fw_ddb_entry_dma);
573 mbox_cmd[3] = MSDW(fw_ddb_entry_dma); 515 mbox_cmd[3] = MSDW(fw_ddb_entry_dma);
574 return qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]); 516 mbox_cmd[4] = sizeof(struct dev_db_entry);
575}
576 517
577#if 0 518 return qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]);
578int qla4xxx_conn_open_session_login(struct scsi_qla_host * ha,
579 uint16_t fw_ddb_index)
580{
581 int status = QLA_ERROR;
582 uint32_t mbox_cmd[MBOX_REG_COUNT];
583 uint32_t mbox_sts[MBOX_REG_COUNT];
584
585 /* Do not wait for completion. The firmware will send us an
586 * ASTS_DATABASE_CHANGED (0x8014) to notify us of the login status.
587 */
588 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
589 memset(&mbox_sts, 0, sizeof(mbox_sts));
590 mbox_cmd[0] = MBOX_CMD_CONN_OPEN_SESS_LOGIN;
591 mbox_cmd[1] = (uint32_t) fw_ddb_index;
592 mbox_cmd[2] = 0;
593 mbox_cmd[3] = 0;
594 mbox_cmd[4] = 0;
595 status = qla4xxx_mailbox_command(ha, 4, 0, &mbox_cmd[0], &mbox_sts[0]);
596 DEBUG2(printk("%s fw_ddb_index=%d status=%d mbx0_1=0x%x :0x%x\n",
597 __func__, fw_ddb_index, status, mbox_sts[0],
598 mbox_sts[1]);)
599
600 return status;
601} 519}
602#endif /* 0 */
603 520
604/** 521/**
605 * qla4xxx_get_crash_record - retrieves crash record. 522 * qla4xxx_get_crash_record - retrieves crash record.
@@ -614,12 +531,14 @@ void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
614 struct crash_record *crash_record = NULL; 531 struct crash_record *crash_record = NULL;
615 dma_addr_t crash_record_dma = 0; 532 dma_addr_t crash_record_dma = 0;
616 uint32_t crash_record_size = 0; 533 uint32_t crash_record_size = 0;
534
617 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 535 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
618 memset(&mbox_sts, 0, sizeof(mbox_cmd)); 536 memset(&mbox_sts, 0, sizeof(mbox_cmd));
619 537
620 /* Get size of crash record. */ 538 /* Get size of crash record. */
621 mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD; 539 mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
622 if (qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]) != 540
541 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
623 QLA_SUCCESS) { 542 QLA_SUCCESS) {
624 DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve size!\n", 543 DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve size!\n",
625 ha->host_no, __func__)); 544 ha->host_no, __func__));
@@ -639,11 +558,15 @@ void qla4xxx_get_crash_record(struct scsi_qla_host * ha)
639 goto exit_get_crash_record; 558 goto exit_get_crash_record;
640 559
641 /* Get Crash Record. */ 560 /* Get Crash Record. */
561 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
562 memset(&mbox_sts, 0, sizeof(mbox_cmd));
563
642 mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD; 564 mbox_cmd[0] = MBOX_CMD_GET_CRASH_RECORD;
643 mbox_cmd[2] = LSDW(crash_record_dma); 565 mbox_cmd[2] = LSDW(crash_record_dma);
644 mbox_cmd[3] = MSDW(crash_record_dma); 566 mbox_cmd[3] = MSDW(crash_record_dma);
645 mbox_cmd[4] = crash_record_size; 567 mbox_cmd[4] = crash_record_size;
646 if (qla4xxx_mailbox_command(ha, 5, 5, &mbox_cmd[0], &mbox_sts[0]) != 568
569 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
647 QLA_SUCCESS) 570 QLA_SUCCESS)
648 goto exit_get_crash_record; 571 goto exit_get_crash_record;
649 572
@@ -655,7 +578,6 @@ exit_get_crash_record:
655 crash_record, crash_record_dma); 578 crash_record, crash_record_dma);
656} 579}
657 580
658#if 0
659/** 581/**
660 * qla4xxx_get_conn_event_log - retrieves connection event log 582 * qla4xxx_get_conn_event_log - retrieves connection event log
661 * @ha: Pointer to host adapter structure. 583 * @ha: Pointer to host adapter structure.
@@ -678,7 +600,8 @@ void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
678 600
679 /* Get size of crash record. */ 601 /* Get size of crash record. */
680 mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG; 602 mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
681 if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) != 603
604 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
682 QLA_SUCCESS) 605 QLA_SUCCESS)
683 goto exit_get_event_log; 606 goto exit_get_event_log;
684 607
@@ -693,10 +616,14 @@ void qla4xxx_get_conn_event_log(struct scsi_qla_host * ha)
693 goto exit_get_event_log; 616 goto exit_get_event_log;
694 617
695 /* Get Crash Record. */ 618 /* Get Crash Record. */
619 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
620 memset(&mbox_sts, 0, sizeof(mbox_cmd));
621
696 mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG; 622 mbox_cmd[0] = MBOX_CMD_GET_CONN_EVENT_LOG;
697 mbox_cmd[2] = LSDW(event_log_dma); 623 mbox_cmd[2] = LSDW(event_log_dma);
698 mbox_cmd[3] = MSDW(event_log_dma); 624 mbox_cmd[3] = MSDW(event_log_dma);
699 if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) != 625
626 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
700 QLA_SUCCESS) { 627 QLA_SUCCESS) {
701 DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve event " 628 DEBUG2(printk("scsi%ld: %s: ERROR: Unable to retrieve event "
702 "log!\n", ha->host_no, __func__)); 629 "log!\n", ha->host_no, __func__));
@@ -745,7 +672,6 @@ exit_get_event_log:
745 dma_free_coherent(&ha->pdev->dev, event_log_size, event_log, 672 dma_free_coherent(&ha->pdev->dev, event_log_size, event_log,
746 event_log_dma); 673 event_log_dma);
747} 674}
748#endif /* 0 */
749 675
750/** 676/**
751 * qla4xxx_reset_lun - issues LUN Reset 677 * qla4xxx_reset_lun - issues LUN Reset
@@ -773,11 +699,13 @@ int qla4xxx_reset_lun(struct scsi_qla_host * ha, struct ddb_entry * ddb_entry,
773 */ 699 */
774 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 700 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
775 memset(&mbox_sts, 0, sizeof(mbox_sts)); 701 memset(&mbox_sts, 0, sizeof(mbox_sts));
702
776 mbox_cmd[0] = MBOX_CMD_LUN_RESET; 703 mbox_cmd[0] = MBOX_CMD_LUN_RESET;
777 mbox_cmd[1] = ddb_entry->fw_ddb_index; 704 mbox_cmd[1] = ddb_entry->fw_ddb_index;
778 mbox_cmd[2] = lun << 8; 705 mbox_cmd[2] = lun << 8;
779 mbox_cmd[5] = 0x01; /* Immediate Command Enable */ 706 mbox_cmd[5] = 0x01; /* Immediate Command Enable */
780 qla4xxx_mailbox_command(ha, 6, 1, &mbox_cmd[0], &mbox_sts[0]); 707
708 qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]);
781 if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE && 709 if (mbox_sts[0] != MBOX_STS_COMMAND_COMPLETE &&
782 mbox_sts[0] != MBOX_STS_COMMAND_ERROR) 710 mbox_sts[0] != MBOX_STS_COMMAND_ERROR)
783 status = QLA_ERROR; 711 status = QLA_ERROR;
@@ -794,12 +722,14 @@ int qla4xxx_get_flash(struct scsi_qla_host * ha, dma_addr_t dma_addr,
794 722
795 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 723 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
796 memset(&mbox_sts, 0, sizeof(mbox_sts)); 724 memset(&mbox_sts, 0, sizeof(mbox_sts));
725
797 mbox_cmd[0] = MBOX_CMD_READ_FLASH; 726 mbox_cmd[0] = MBOX_CMD_READ_FLASH;
798 mbox_cmd[1] = LSDW(dma_addr); 727 mbox_cmd[1] = LSDW(dma_addr);
799 mbox_cmd[2] = MSDW(dma_addr); 728 mbox_cmd[2] = MSDW(dma_addr);
800 mbox_cmd[3] = offset; 729 mbox_cmd[3] = offset;
801 mbox_cmd[4] = len; 730 mbox_cmd[4] = len;
802 if (qla4xxx_mailbox_command(ha, 5, 2, &mbox_cmd[0], &mbox_sts[0]) != 731
732 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0], &mbox_sts[0]) !=
803 QLA_SUCCESS) { 733 QLA_SUCCESS) {
804 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_READ_FLASH, failed w/ " 734 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_READ_FLASH, failed w/ "
805 "status %04X %04X, offset %08x, len %08x\n", ha->host_no, 735 "status %04X %04X, offset %08x, len %08x\n", ha->host_no,
@@ -825,8 +755,10 @@ int qla4xxx_get_fw_version(struct scsi_qla_host * ha)
825 /* Get firmware version. */ 755 /* Get firmware version. */
826 memset(&mbox_cmd, 0, sizeof(mbox_cmd)); 756 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
827 memset(&mbox_sts, 0, sizeof(mbox_sts)); 757 memset(&mbox_sts, 0, sizeof(mbox_sts));
758
828 mbox_cmd[0] = MBOX_CMD_ABOUT_FW; 759 mbox_cmd[0] = MBOX_CMD_ABOUT_FW;
829 if (qla4xxx_mailbox_command(ha, 4, 5, &mbox_cmd[0], &mbox_sts[0]) != 760
761 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 5, &mbox_cmd[0], &mbox_sts[0]) !=
830 QLA_SUCCESS) { 762 QLA_SUCCESS) {
831 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_ABOUT_FW failed w/ " 763 DEBUG2(printk("scsi%ld: %s: MBOX_CMD_ABOUT_FW failed w/ "
832 "status %04X\n", ha->host_no, __func__, mbox_sts[0])); 764 "status %04X\n", ha->host_no, __func__, mbox_sts[0]));
@@ -855,7 +787,7 @@ static int qla4xxx_get_default_ddb(struct scsi_qla_host *ha,
855 mbox_cmd[2] = LSDW(dma_addr); 787 mbox_cmd[2] = LSDW(dma_addr);
856 mbox_cmd[3] = MSDW(dma_addr); 788 mbox_cmd[3] = MSDW(dma_addr);
857 789
858 if (qla4xxx_mailbox_command(ha, 4, 1, &mbox_cmd[0], &mbox_sts[0]) != 790 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 1, &mbox_cmd[0], &mbox_sts[0]) !=
859 QLA_SUCCESS) { 791 QLA_SUCCESS) {
860 DEBUG2(printk("scsi%ld: %s: failed status %04X\n", 792 DEBUG2(printk("scsi%ld: %s: failed status %04X\n",
861 ha->host_no, __func__, mbox_sts[0])); 793 ha->host_no, __func__, mbox_sts[0]));
@@ -875,7 +807,7 @@ static int qla4xxx_req_ddb_entry(struct scsi_qla_host *ha, uint32_t *ddb_index)
875 mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY; 807 mbox_cmd[0] = MBOX_CMD_REQUEST_DATABASE_ENTRY;
876 mbox_cmd[1] = MAX_PRST_DEV_DB_ENTRIES; 808 mbox_cmd[1] = MAX_PRST_DEV_DB_ENTRIES;
877 809
878 if (qla4xxx_mailbox_command(ha, 2, 3, &mbox_cmd[0], &mbox_sts[0]) != 810 if (qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 3, &mbox_cmd[0], &mbox_sts[0]) !=
879 QLA_SUCCESS) { 811 QLA_SUCCESS) {
880 if (mbox_sts[0] == MBOX_STS_COMMAND_ERROR) { 812 if (mbox_sts[0] == MBOX_STS_COMMAND_ERROR) {
881 *ddb_index = mbox_sts[2]; 813 *ddb_index = mbox_sts[2];
@@ -918,23 +850,23 @@ int qla4xxx_send_tgts(struct scsi_qla_host *ha, char *ip, uint16_t port)
918 if (ret_val != QLA_SUCCESS) 850 if (ret_val != QLA_SUCCESS)
919 goto qla4xxx_send_tgts_exit; 851 goto qla4xxx_send_tgts_exit;
920 852
921 memset((void *)fw_ddb_entry->iSCSIAlias, 0, 853 memset(fw_ddb_entry->iscsi_alias, 0,
922 sizeof(fw_ddb_entry->iSCSIAlias)); 854 sizeof(fw_ddb_entry->iscsi_alias));
923 855
924 memset((void *)fw_ddb_entry->iscsiName, 0, 856 memset(fw_ddb_entry->iscsi_name, 0,
925 sizeof(fw_ddb_entry->iscsiName)); 857 sizeof(fw_ddb_entry->iscsi_name));
926 858
927 memset((void *)fw_ddb_entry->ipAddr, 0, sizeof(fw_ddb_entry->ipAddr)); 859 memset(fw_ddb_entry->ip_addr, 0, sizeof(fw_ddb_entry->ip_addr));
928 memset((void *)fw_ddb_entry->targetAddr, 0, 860 memset(fw_ddb_entry->tgt_addr, 0,
929 sizeof(fw_ddb_entry->targetAddr)); 861 sizeof(fw_ddb_entry->tgt_addr));
930 862
931 fw_ddb_entry->options = (DDB_OPT_DISC_SESSION | DDB_OPT_TARGET); 863 fw_ddb_entry->options = (DDB_OPT_DISC_SESSION | DDB_OPT_TARGET);
932 fw_ddb_entry->portNumber = cpu_to_le16(ntohs(port)); 864 fw_ddb_entry->port = cpu_to_le16(ntohs(port));
933 865
934 fw_ddb_entry->ipAddr[0] = *ip; 866 fw_ddb_entry->ip_addr[0] = *ip;
935 fw_ddb_entry->ipAddr[1] = *(ip + 1); 867 fw_ddb_entry->ip_addr[1] = *(ip + 1);
936 fw_ddb_entry->ipAddr[2] = *(ip + 2); 868 fw_ddb_entry->ip_addr[2] = *(ip + 2);
937 fw_ddb_entry->ipAddr[3] = *(ip + 3); 869 fw_ddb_entry->ip_addr[3] = *(ip + 3);
938 870
939 ret_val = qla4xxx_set_ddb_entry(ha, ddb_index, fw_ddb_entry_dma); 871 ret_val = qla4xxx_set_ddb_entry(ha, ddb_index, fw_ddb_entry_dma);
940 872
diff --git a/drivers/scsi/qla4xxx/ql4_nvram.c b/drivers/scsi/qla4xxx/ql4_nvram.c
index 58afd135aa1d..7fe0482ecf03 100644
--- a/drivers/scsi/qla4xxx/ql4_nvram.c
+++ b/drivers/scsi/qla4xxx/ql4_nvram.c
@@ -6,6 +6,9 @@
6 */ 6 */
7 7
8#include "ql4_def.h" 8#include "ql4_def.h"
9#include "ql4_glbl.h"
10#include "ql4_dbg.h"
11#include "ql4_inline.h"
9 12
10static inline void eeprom_cmd(uint32_t cmd, struct scsi_qla_host *ha) 13static inline void eeprom_cmd(uint32_t cmd, struct scsi_qla_host *ha)
11{ 14{
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index da21f5fbbf87..e09fc4241970 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -10,6 +10,10 @@
10#include <scsi/scsicam.h> 10#include <scsi/scsicam.h>
11 11
12#include "ql4_def.h" 12#include "ql4_def.h"
13#include "ql4_version.h"
14#include "ql4_glbl.h"
15#include "ql4_dbg.h"
16#include "ql4_inline.h"
13 17
14/* 18/*
15 * Driver version 19 * Driver version
@@ -369,14 +373,7 @@ static void qla4xxx_srb_free_dma(struct scsi_qla_host *ha, struct srb *srb)
369 struct scsi_cmnd *cmd = srb->cmd; 373 struct scsi_cmnd *cmd = srb->cmd;
370 374
371 if (srb->flags & SRB_DMA_VALID) { 375 if (srb->flags & SRB_DMA_VALID) {
372 if (cmd->use_sg) { 376 scsi_dma_unmap(cmd);
373 pci_unmap_sg(ha->pdev, cmd->request_buffer,
374 cmd->use_sg, cmd->sc_data_direction);
375 } else if (cmd->request_bufflen) {
376 pci_unmap_single(ha->pdev, srb->dma_handle,
377 cmd->request_bufflen,
378 cmd->sc_data_direction);
379 }
380 srb->flags &= ~SRB_DMA_VALID; 377 srb->flags &= ~SRB_DMA_VALID;
381 } 378 }
382 cmd->SCp.ptr = NULL; 379 cmd->SCp.ptr = NULL;
@@ -711,7 +708,7 @@ static int qla4xxx_cmd_wait(struct scsi_qla_host *ha)
711 return stat; 708 return stat;
712} 709}
713 710
714static void qla4xxx_hw_reset(struct scsi_qla_host *ha) 711void qla4xxx_hw_reset(struct scsi_qla_host *ha)
715{ 712{
716 uint32_t ctrl_status; 713 uint32_t ctrl_status;
717 unsigned long flags = 0; 714 unsigned long flags = 0;
@@ -1081,13 +1078,13 @@ static void qla4xxx_free_adapter(struct scsi_qla_host *ha)
1081 if (ha->timer_active) 1078 if (ha->timer_active)
1082 qla4xxx_stop_timer(ha); 1079 qla4xxx_stop_timer(ha);
1083 1080
1084 /* free extra memory */
1085 qla4xxx_mem_free(ha);
1086
1087 /* Detach interrupts */ 1081 /* Detach interrupts */
1088 if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags)) 1082 if (test_and_clear_bit(AF_IRQ_ATTACHED, &ha->flags))
1089 free_irq(ha->pdev->irq, ha); 1083 free_irq(ha->pdev->irq, ha);
1090 1084
1085 /* free extra memory */
1086 qla4xxx_mem_free(ha);
1087
1091 pci_disable_device(ha->pdev); 1088 pci_disable_device(ha->pdev);
1092 1089
1093} 1090}
@@ -1332,6 +1329,11 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
1332 1329
1333 ha = pci_get_drvdata(pdev); 1330 ha = pci_get_drvdata(pdev);
1334 1331
1332 qla4xxx_disable_intrs(ha);
1333
1334 while (test_bit(DPC_RESET_HA_INTR, &ha->dpc_flags))
1335 ssleep(1);
1336
1335 /* remove devs from iscsi_sessions to scsi_devices */ 1337 /* remove devs from iscsi_sessions to scsi_devices */
1336 qla4xxx_free_ddb_list(ha); 1338 qla4xxx_free_ddb_list(ha);
1337 1339
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index e5183a697d1f..2149069689bd 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,5 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.00.07-k1" 8#define QLA4XXX_DRIVER_VERSION "5.01.00-k7"
9
diff --git a/drivers/scsi/qlogicfas408.c b/drivers/scsi/qlogicfas408.c
index 2e7db18f5aef..2bfbf26c00ed 100644
--- a/drivers/scsi/qlogicfas408.c
+++ b/drivers/scsi/qlogicfas408.c
@@ -265,8 +265,6 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
265 unsigned int message; /* scsi returned message */ 265 unsigned int message; /* scsi returned message */
266 unsigned int phase; /* recorded scsi phase */ 266 unsigned int phase; /* recorded scsi phase */
267 unsigned int reqlen; /* total length of transfer */ 267 unsigned int reqlen; /* total length of transfer */
268 struct scatterlist *sglist; /* scatter-gather list pointer */
269 unsigned int sgcount; /* sg counter */
270 char *buf; 268 char *buf;
271 struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd); 269 struct qlogicfas408_priv *priv = get_priv_by_cmd(cmd);
272 int qbase = priv->qbase; 270 int qbase = priv->qbase;
@@ -301,9 +299,10 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
301 if (inb(qbase + 7) & 0x1f) /* if some bytes in fifo */ 299 if (inb(qbase + 7) & 0x1f) /* if some bytes in fifo */
302 outb(1, qbase + 3); /* clear fifo */ 300 outb(1, qbase + 3); /* clear fifo */
303 /* note that request_bufflen is the total xfer size when sg is used */ 301 /* note that request_bufflen is the total xfer size when sg is used */
304 reqlen = cmd->request_bufflen; 302 reqlen = scsi_bufflen(cmd);
305 /* note that it won't work if transfers > 16M are requested */ 303 /* note that it won't work if transfers > 16M are requested */
306 if (reqlen && !((phase = inb(qbase + 4)) & 6)) { /* data phase */ 304 if (reqlen && !((phase = inb(qbase + 4)) & 6)) { /* data phase */
305 struct scatterlist *sg;
307 rtrc(2) 306 rtrc(2)
308 outb(reqlen, qbase); /* low-mid xfer cnt */ 307 outb(reqlen, qbase); /* low-mid xfer cnt */
309 outb(reqlen >> 8, qbase + 1); /* low-mid xfer cnt */ 308 outb(reqlen >> 8, qbase + 1); /* low-mid xfer cnt */
@@ -311,23 +310,16 @@ static unsigned int ql_pcmd(struct scsi_cmnd *cmd)
311 outb(0x90, qbase + 3); /* command do xfer */ 310 outb(0x90, qbase + 3); /* command do xfer */
312 /* PIO pseudo DMA to buffer or sglist */ 311 /* PIO pseudo DMA to buffer or sglist */
313 REG1; 312 REG1;
314 if (!cmd->use_sg) 313
315 ql_pdma(priv, phase, cmd->request_buffer, 314 scsi_for_each_sg(cmd, sg, scsi_sg_count(cmd), i) {
316 cmd->request_bufflen); 315 if (priv->qabort) {
317 else { 316 REG0;
318 sgcount = cmd->use_sg; 317 return ((priv->qabort == 1 ?
319 sglist = cmd->request_buffer; 318 DID_ABORT : DID_RESET) << 16);
320 while (sgcount--) {
321 if (priv->qabort) {
322 REG0;
323 return ((priv->qabort == 1 ?
324 DID_ABORT : DID_RESET) << 16);
325 }
326 buf = page_address(sglist->page) + sglist->offset;
327 if (ql_pdma(priv, phase, buf, sglist->length))
328 break;
329 sglist++;
330 } 319 }
320 buf = page_address(sg->page) + sg->offset;
321 if (ql_pdma(priv, phase, buf, sg->length))
322 break;
331 } 323 }
332 REG0; 324 REG0;
333 rtrc(2) 325 rtrc(2)
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index e8350c562d24..9adb64ac054c 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -18,12 +18,12 @@
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/timer.h> 19#include <linux/timer.h>
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/slab.h>
22#include <linux/kernel.h> 21#include <linux/kernel.h>
23#include <linux/kthread.h> 22#include <linux/kthread.h>
24#include <linux/interrupt.h> 23#include <linux/interrupt.h>
25#include <linux/blkdev.h> 24#include <linux/blkdev.h>
26#include <linux/delay.h> 25#include <linux/delay.h>
26#include <linux/scatterlist.h>
27 27
28#include <scsi/scsi.h> 28#include <scsi/scsi.h>
29#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_cmnd.h>
@@ -640,16 +640,8 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
640 memcpy(scmd->cmnd, cmnd, cmnd_size); 640 memcpy(scmd->cmnd, cmnd, cmnd_size);
641 641
642 if (copy_sense) { 642 if (copy_sense) {
643 gfp_t gfp_mask = GFP_ATOMIC; 643 sg_init_one(&sgl, scmd->sense_buffer,
644 644 sizeof(scmd->sense_buffer));
645 if (shost->hostt->unchecked_isa_dma)
646 gfp_mask |= __GFP_DMA;
647
648 sgl.page = alloc_page(gfp_mask);
649 if (!sgl.page)
650 return FAILED;
651 sgl.offset = 0;
652 sgl.length = 252;
653 645
654 scmd->sc_data_direction = DMA_FROM_DEVICE; 646 scmd->sc_data_direction = DMA_FROM_DEVICE;
655 scmd->request_bufflen = sgl.length; 647 scmd->request_bufflen = sgl.length;
@@ -720,18 +712,6 @@ static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, unsigned char *cmnd,
720 712
721 713
722 /* 714 /*
723 * Last chance to have valid sense data.
724 */
725 if (copy_sense) {
726 if (!SCSI_SENSE_VALID(scmd)) {
727 memcpy(scmd->sense_buffer, page_address(sgl.page),
728 sizeof(scmd->sense_buffer));
729 }
730 __free_page(sgl.page);
731 }
732
733
734 /*
735 * Restore original data 715 * Restore original data
736 */ 716 */
737 scmd->request_buffer = old_buffer; 717 scmd->request_buffer = old_buffer;
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 1f5a07bf2a75..70454b4e8485 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2290,3 +2290,41 @@ void scsi_kunmap_atomic_sg(void *virt)
2290 kunmap_atomic(virt, KM_BIO_SRC_IRQ); 2290 kunmap_atomic(virt, KM_BIO_SRC_IRQ);
2291} 2291}
2292EXPORT_SYMBOL(scsi_kunmap_atomic_sg); 2292EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
2293
2294/**
2295 * scsi_dma_map - perform DMA mapping against command's sg lists
2296 * @cmd: scsi command
2297 *
2298 * Returns the number of sg lists actually used, zero if the sg lists
2299 * is NULL, or -ENOMEM if the mapping failed.
2300 */
2301int scsi_dma_map(struct scsi_cmnd *cmd)
2302{
2303 int nseg = 0;
2304
2305 if (scsi_sg_count(cmd)) {
2306 struct device *dev = cmd->device->host->shost_gendev.parent;
2307
2308 nseg = dma_map_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
2309 cmd->sc_data_direction);
2310 if (unlikely(!nseg))
2311 return -ENOMEM;
2312 }
2313 return nseg;
2314}
2315EXPORT_SYMBOL(scsi_dma_map);
2316
2317/**
2318 * scsi_dma_unmap - unmap command's sg lists mapped by scsi_dma_map
2319 * @cmd: scsi command
2320 */
2321void scsi_dma_unmap(struct scsi_cmnd *cmd)
2322{
2323 if (scsi_sg_count(cmd)) {
2324 struct device *dev = cmd->device->host->shost_gendev.parent;
2325
2326 dma_unmap_sg(dev, scsi_sglist(cmd), scsi_sg_count(cmd),
2327 cmd->sc_data_direction);
2328 }
2329}
2330EXPORT_SYMBOL(scsi_dma_unmap);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 67a38a1409ba..ed720863ab97 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -293,30 +293,18 @@ static int scsi_bus_suspend(struct device * dev, pm_message_t state)
293{ 293{
294 struct device_driver *drv = dev->driver; 294 struct device_driver *drv = dev->driver;
295 struct scsi_device *sdev = to_scsi_device(dev); 295 struct scsi_device *sdev = to_scsi_device(dev);
296 struct scsi_host_template *sht = sdev->host->hostt;
297 int err; 296 int err;
298 297
299 err = scsi_device_quiesce(sdev); 298 err = scsi_device_quiesce(sdev);
300 if (err) 299 if (err)
301 return err; 300 return err;
302 301
303 /* call HLD suspend first */
304 if (drv && drv->suspend) { 302 if (drv && drv->suspend) {
305 err = drv->suspend(dev, state); 303 err = drv->suspend(dev, state);
306 if (err) 304 if (err)
307 return err; 305 return err;
308 } 306 }
309 307
310 /* then, call host suspend */
311 if (sht->suspend) {
312 err = sht->suspend(sdev, state);
313 if (err) {
314 if (drv && drv->resume)
315 drv->resume(dev);
316 return err;
317 }
318 }
319
320 return 0; 308 return 0;
321} 309}
322 310
@@ -324,21 +312,14 @@ static int scsi_bus_resume(struct device * dev)
324{ 312{
325 struct device_driver *drv = dev->driver; 313 struct device_driver *drv = dev->driver;
326 struct scsi_device *sdev = to_scsi_device(dev); 314 struct scsi_device *sdev = to_scsi_device(dev);
327 struct scsi_host_template *sht = sdev->host->hostt; 315 int err = 0;
328 int err = 0, err2 = 0;
329
330 /* call host resume first */
331 if (sht->resume)
332 err = sht->resume(sdev);
333 316
334 /* then, call HLD resume */
335 if (drv && drv->resume) 317 if (drv && drv->resume)
336 err2 = drv->resume(dev); 318 err = drv->resume(dev);
337 319
338 scsi_device_resume(sdev); 320 scsi_device_resume(sdev);
339 321
340 /* favor LLD failure */ 322 return err;
341 return err ? err : err2;;
342} 323}
343 324
344struct bus_type scsi_bus_type = { 325struct bus_type scsi_bus_type = {
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index b4d1ece46f78..4953f0dca029 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1,4 +1,4 @@
1/* 1/*
2 * FiberChannel transport specific attributes exported to sysfs. 2 * FiberChannel transport specific attributes exported to sysfs.
3 * 3 *
4 * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved. 4 * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved.
@@ -19,9 +19,10 @@
19 * 19 *
20 * ======== 20 * ========
21 * 21 *
22 * Copyright (C) 2004-2005 James Smart, Emulex Corporation 22 * Copyright (C) 2004-2007 James Smart, Emulex Corporation
23 * Rewrite for host, target, device, and remote port attributes, 23 * Rewrite for host, target, device, and remote port attributes,
24 * statistics, and service functions... 24 * statistics, and service functions...
25 * Add vports, etc
25 * 26 *
26 */ 27 */
27#include <linux/module.h> 28#include <linux/module.h>
@@ -37,6 +38,34 @@
37#include "scsi_priv.h" 38#include "scsi_priv.h"
38 39
39static int fc_queue_work(struct Scsi_Host *, struct work_struct *); 40static int fc_queue_work(struct Scsi_Host *, struct work_struct *);
41static void fc_vport_sched_delete(struct work_struct *work);
42
43/*
44 * This is a temporary carrier for creating a vport. It will eventually
45 * be replaced by a real message definition for sgio or netlink.
46 *
47 * fc_vport_identifiers: This set of data contains all elements
48 * to uniquely identify and instantiate a FC virtual port.
49 *
50 * Notes:
51 * symbolic_name: The driver is to append the symbolic_name string data
52 * to the symbolic_node_name data that it generates by default.
53 * the resulting combination should then be registered with the switch.
54 * It is expected that things like Xen may stuff a VM title into
55 * this field.
56 */
57struct fc_vport_identifiers {
58 u64 node_name;
59 u64 port_name;
60 u32 roles;
61 bool disable;
62 enum fc_port_type vport_type; /* only FC_PORTTYPE_NPIV allowed */
63 char symbolic_name[FC_VPORT_SYMBOLIC_NAMELEN];
64};
65
66static int fc_vport_create(struct Scsi_Host *shost, int channel,
67 struct device *pdev, struct fc_vport_identifiers *ids,
68 struct fc_vport **vport);
40 69
41/* 70/*
42 * Redefine so that we can have same named attributes in the 71 * Redefine so that we can have same named attributes in the
@@ -90,10 +119,14 @@ static struct {
90 { FC_PORTTYPE_NLPORT, "NLPort (fabric via loop)" }, 119 { FC_PORTTYPE_NLPORT, "NLPort (fabric via loop)" },
91 { FC_PORTTYPE_LPORT, "LPort (private loop)" }, 120 { FC_PORTTYPE_LPORT, "LPort (private loop)" },
92 { FC_PORTTYPE_PTP, "Point-To-Point (direct nport connection" }, 121 { FC_PORTTYPE_PTP, "Point-To-Point (direct nport connection" },
122 { FC_PORTTYPE_NPIV, "NPIV VPORT" },
93}; 123};
94fc_enum_name_search(port_type, fc_port_type, fc_port_type_names) 124fc_enum_name_search(port_type, fc_port_type, fc_port_type_names)
95#define FC_PORTTYPE_MAX_NAMELEN 50 125#define FC_PORTTYPE_MAX_NAMELEN 50
96 126
127/* Reuse fc_port_type enum function for vport_type */
128#define get_fc_vport_type_name get_fc_port_type_name
129
97 130
98/* Convert fc_host_event_code values to ascii string name */ 131/* Convert fc_host_event_code values to ascii string name */
99static const struct { 132static const struct {
@@ -139,6 +172,29 @@ fc_enum_name_search(port_state, fc_port_state, fc_port_state_names)
139#define FC_PORTSTATE_MAX_NAMELEN 20 172#define FC_PORTSTATE_MAX_NAMELEN 20
140 173
141 174
175/* Convert fc_vport_state values to ascii string name */
176static struct {
177 enum fc_vport_state value;
178 char *name;
179} fc_vport_state_names[] = {
180 { FC_VPORT_UNKNOWN, "Unknown" },
181 { FC_VPORT_ACTIVE, "Active" },
182 { FC_VPORT_DISABLED, "Disabled" },
183 { FC_VPORT_LINKDOWN, "Linkdown" },
184 { FC_VPORT_INITIALIZING, "Initializing" },
185 { FC_VPORT_NO_FABRIC_SUPP, "No Fabric Support" },
186 { FC_VPORT_NO_FABRIC_RSCS, "No Fabric Resources" },
187 { FC_VPORT_FABRIC_LOGOUT, "Fabric Logout" },
188 { FC_VPORT_FABRIC_REJ_WWN, "Fabric Rejected WWN" },
189 { FC_VPORT_FAILED, "VPort Failed" },
190};
191fc_enum_name_search(vport_state, fc_vport_state, fc_vport_state_names)
192#define FC_VPORTSTATE_MAX_NAMELEN 24
193
194/* Reuse fc_vport_state enum function for vport_last_state */
195#define get_fc_vport_last_state_name get_fc_vport_state_name
196
197
142/* Convert fc_tgtid_binding_type values to ascii string name */ 198/* Convert fc_tgtid_binding_type values to ascii string name */
143static const struct { 199static const struct {
144 enum fc_tgtid_binding_type value; 200 enum fc_tgtid_binding_type value;
@@ -219,16 +275,16 @@ show_fc_fc4s (char *buf, u8 *fc4_list)
219} 275}
220 276
221 277
222/* Convert FC_RPORT_ROLE bit values to ascii string name */ 278/* Convert FC_PORT_ROLE bit values to ascii string name */
223static const struct { 279static const struct {
224 u32 value; 280 u32 value;
225 char *name; 281 char *name;
226} fc_remote_port_role_names[] = { 282} fc_port_role_names[] = {
227 { FC_RPORT_ROLE_FCP_TARGET, "FCP Target" }, 283 { FC_PORT_ROLE_FCP_TARGET, "FCP Target" },
228 { FC_RPORT_ROLE_FCP_INITIATOR, "FCP Initiator" }, 284 { FC_PORT_ROLE_FCP_INITIATOR, "FCP Initiator" },
229 { FC_RPORT_ROLE_IP_PORT, "IP Port" }, 285 { FC_PORT_ROLE_IP_PORT, "IP Port" },
230}; 286};
231fc_bitfield_name_search(remote_port_roles, fc_remote_port_role_names) 287fc_bitfield_name_search(port_roles, fc_port_role_names)
232 288
233/* 289/*
234 * Define roles that are specific to port_id. Values are relative to ROLE_MASK. 290 * Define roles that are specific to port_id. Values are relative to ROLE_MASK.
@@ -252,7 +308,8 @@ static void fc_scsi_scan_rport(struct work_struct *work);
252 */ 308 */
253#define FC_STARGET_NUM_ATTRS 3 309#define FC_STARGET_NUM_ATTRS 3
254#define FC_RPORT_NUM_ATTRS 10 310#define FC_RPORT_NUM_ATTRS 10
255#define FC_HOST_NUM_ATTRS 17 311#define FC_VPORT_NUM_ATTRS 9
312#define FC_HOST_NUM_ATTRS 21
256 313
257struct fc_internal { 314struct fc_internal {
258 struct scsi_transport_template t; 315 struct scsi_transport_template t;
@@ -278,6 +335,10 @@ struct fc_internal {
278 struct transport_container rport_attr_cont; 335 struct transport_container rport_attr_cont;
279 struct class_device_attribute private_rport_attrs[FC_RPORT_NUM_ATTRS]; 336 struct class_device_attribute private_rport_attrs[FC_RPORT_NUM_ATTRS];
280 struct class_device_attribute *rport_attrs[FC_RPORT_NUM_ATTRS + 1]; 337 struct class_device_attribute *rport_attrs[FC_RPORT_NUM_ATTRS + 1];
338
339 struct transport_container vport_attr_cont;
340 struct class_device_attribute private_vport_attrs[FC_VPORT_NUM_ATTRS];
341 struct class_device_attribute *vport_attrs[FC_VPORT_NUM_ATTRS + 1];
281}; 342};
282 343
283#define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t) 344#define to_fc_internal(tmpl) container_of(tmpl, struct fc_internal, t)
@@ -318,7 +379,7 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
318 struct Scsi_Host *shost = dev_to_shost(dev); 379 struct Scsi_Host *shost = dev_to_shost(dev);
319 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 380 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
320 381
321 /* 382 /*
322 * Set default values easily detected by the midlayer as 383 * Set default values easily detected by the midlayer as
323 * failure cases. The scsi lldd is responsible for initializing 384 * failure cases. The scsi lldd is responsible for initializing
324 * all transport attributes to valid values per host. 385 * all transport attributes to valid values per host.
@@ -331,6 +392,7 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
331 sizeof(fc_host->supported_fc4s)); 392 sizeof(fc_host->supported_fc4s));
332 fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN; 393 fc_host->supported_speeds = FC_PORTSPEED_UNKNOWN;
333 fc_host->maxframe_size = -1; 394 fc_host->maxframe_size = -1;
395 fc_host->max_npiv_vports = 0;
334 memset(fc_host->serial_number, 0, 396 memset(fc_host->serial_number, 0,
335 sizeof(fc_host->serial_number)); 397 sizeof(fc_host->serial_number));
336 398
@@ -348,8 +410,11 @@ static int fc_host_setup(struct transport_container *tc, struct device *dev,
348 410
349 INIT_LIST_HEAD(&fc_host->rports); 411 INIT_LIST_HEAD(&fc_host->rports);
350 INIT_LIST_HEAD(&fc_host->rport_bindings); 412 INIT_LIST_HEAD(&fc_host->rport_bindings);
413 INIT_LIST_HEAD(&fc_host->vports);
351 fc_host->next_rport_number = 0; 414 fc_host->next_rport_number = 0;
352 fc_host->next_target_id = 0; 415 fc_host->next_target_id = 0;
416 fc_host->next_vport_number = 0;
417 fc_host->npiv_vports_inuse = 0;
353 418
354 snprintf(fc_host->work_q_name, KOBJ_NAME_LEN, "fc_wq_%d", 419 snprintf(fc_host->work_q_name, KOBJ_NAME_LEN, "fc_wq_%d",
355 shost->host_no); 420 shost->host_no);
@@ -388,6 +453,16 @@ static DECLARE_TRANSPORT_CLASS(fc_rport_class,
388 NULL); 453 NULL);
389 454
390/* 455/*
456 * Setup and Remove actions for virtual ports are handled
457 * in the service functions below.
458 */
459static DECLARE_TRANSPORT_CLASS(fc_vport_class,
460 "fc_vports",
461 NULL,
462 NULL,
463 NULL);
464
465/*
391 * Module Parameters 466 * Module Parameters
392 */ 467 */
393 468
@@ -585,6 +660,9 @@ static __init int fc_transport_init(void)
585 error = transport_class_register(&fc_host_class); 660 error = transport_class_register(&fc_host_class);
586 if (error) 661 if (error)
587 return error; 662 return error;
663 error = transport_class_register(&fc_vport_class);
664 if (error)
665 return error;
588 error = transport_class_register(&fc_rport_class); 666 error = transport_class_register(&fc_rport_class);
589 if (error) 667 if (error)
590 return error; 668 return error;
@@ -596,6 +674,7 @@ static void __exit fc_transport_exit(void)
596 transport_class_unregister(&fc_transport_class); 674 transport_class_unregister(&fc_transport_class);
597 transport_class_unregister(&fc_rport_class); 675 transport_class_unregister(&fc_rport_class);
598 transport_class_unregister(&fc_host_class); 676 transport_class_unregister(&fc_host_class);
677 transport_class_unregister(&fc_vport_class);
599} 678}
600 679
601/* 680/*
@@ -800,9 +879,9 @@ show_fc_rport_roles (struct class_device *cdev, char *buf)
800 return snprintf(buf, 30, "Unknown Fabric Entity\n"); 879 return snprintf(buf, 30, "Unknown Fabric Entity\n");
801 } 880 }
802 } else { 881 } else {
803 if (rport->roles == FC_RPORT_ROLE_UNKNOWN) 882 if (rport->roles == FC_PORT_ROLE_UNKNOWN)
804 return snprintf(buf, 20, "unknown\n"); 883 return snprintf(buf, 20, "unknown\n");
805 return get_fc_remote_port_roles_names(rport->roles, buf); 884 return get_fc_port_roles_names(rport->roles, buf);
806 } 885 }
807} 886}
808static FC_CLASS_DEVICE_ATTR(rport, roles, S_IRUGO, 887static FC_CLASS_DEVICE_ATTR(rport, roles, S_IRUGO,
@@ -857,7 +936,7 @@ static FC_CLASS_DEVICE_ATTR(rport, fast_io_fail_tmo, S_IRUGO | S_IWUSR,
857 936
858/* 937/*
859 * Note: in the target show function we recognize when the remote 938 * Note: in the target show function we recognize when the remote
860 * port is in the hierarchy and do not allow the driver to get 939 * port is in the heirarchy and do not allow the driver to get
861 * involved in sysfs functions. The driver only gets involved if 940 * involved in sysfs functions. The driver only gets involved if
862 * it's the "old" style that doesn't use rports. 941 * it's the "old" style that doesn't use rports.
863 */ 942 */
@@ -912,6 +991,257 @@ fc_starget_rd_attr(port_id, "0x%06x\n", 20);
912 991
913 992
914/* 993/*
994 * FC Virtual Port Attribute Management
995 */
996
997#define fc_vport_show_function(field, format_string, sz, cast) \
998static ssize_t \
999show_fc_vport_##field (struct class_device *cdev, char *buf) \
1000{ \
1001 struct fc_vport *vport = transport_class_to_vport(cdev); \
1002 struct Scsi_Host *shost = vport_to_shost(vport); \
1003 struct fc_internal *i = to_fc_internal(shost->transportt); \
1004 if ((i->f->get_vport_##field) && \
1005 !(vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))) \
1006 i->f->get_vport_##field(vport); \
1007 return snprintf(buf, sz, format_string, cast vport->field); \
1008}
1009
1010#define fc_vport_store_function(field) \
1011static ssize_t \
1012store_fc_vport_##field(struct class_device *cdev, const char *buf, \
1013 size_t count) \
1014{ \
1015 int val; \
1016 struct fc_vport *vport = transport_class_to_vport(cdev); \
1017 struct Scsi_Host *shost = vport_to_shost(vport); \
1018 struct fc_internal *i = to_fc_internal(shost->transportt); \
1019 char *cp; \
1020 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \
1021 return -EBUSY; \
1022 val = simple_strtoul(buf, &cp, 0); \
1023 if (*cp && (*cp != '\n')) \
1024 return -EINVAL; \
1025 i->f->set_vport_##field(vport, val); \
1026 return count; \
1027}
1028
1029#define fc_vport_store_str_function(field, slen) \
1030static ssize_t \
1031store_fc_vport_##field(struct class_device *cdev, const char *buf, \
1032 size_t count) \
1033{ \
1034 struct fc_vport *vport = transport_class_to_vport(cdev); \
1035 struct Scsi_Host *shost = vport_to_shost(vport); \
1036 struct fc_internal *i = to_fc_internal(shost->transportt); \
1037 unsigned int cnt=count; \
1038 \
1039 /* count may include a LF at end of string */ \
1040 if (buf[cnt-1] == '\n') \
1041 cnt--; \
1042 if (cnt > ((slen) - 1)) \
1043 return -EINVAL; \
1044 memcpy(vport->field, buf, cnt); \
1045 i->f->set_vport_##field(vport); \
1046 return count; \
1047}
1048
1049#define fc_vport_rd_attr(field, format_string, sz) \
1050 fc_vport_show_function(field, format_string, sz, ) \
1051static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO, \
1052 show_fc_vport_##field, NULL)
1053
1054#define fc_vport_rd_attr_cast(field, format_string, sz, cast) \
1055 fc_vport_show_function(field, format_string, sz, (cast)) \
1056static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO, \
1057 show_fc_vport_##field, NULL)
1058
1059#define fc_vport_rw_attr(field, format_string, sz) \
1060 fc_vport_show_function(field, format_string, sz, ) \
1061 fc_vport_store_function(field) \
1062static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \
1063 show_fc_vport_##field, \
1064 store_fc_vport_##field)
1065
1066#define fc_private_vport_show_function(field, format_string, sz, cast) \
1067static ssize_t \
1068show_fc_vport_##field (struct class_device *cdev, char *buf) \
1069{ \
1070 struct fc_vport *vport = transport_class_to_vport(cdev); \
1071 return snprintf(buf, sz, format_string, cast vport->field); \
1072}
1073
1074#define fc_private_vport_store_u32_function(field) \
1075static ssize_t \
1076store_fc_vport_##field(struct class_device *cdev, const char *buf, \
1077 size_t count) \
1078{ \
1079 u32 val; \
1080 struct fc_vport *vport = transport_class_to_vport(cdev); \
1081 char *cp; \
1082 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) \
1083 return -EBUSY; \
1084 val = simple_strtoul(buf, &cp, 0); \
1085 if (*cp && (*cp != '\n')) \
1086 return -EINVAL; \
1087 vport->field = val; \
1088 return count; \
1089}
1090
1091
1092#define fc_private_vport_rd_attr(field, format_string, sz) \
1093 fc_private_vport_show_function(field, format_string, sz, ) \
1094static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO, \
1095 show_fc_vport_##field, NULL)
1096
1097#define fc_private_vport_rd_attr_cast(field, format_string, sz, cast) \
1098 fc_private_vport_show_function(field, format_string, sz, (cast)) \
1099static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO, \
1100 show_fc_vport_##field, NULL)
1101
1102#define fc_private_vport_rw_u32_attr(field, format_string, sz) \
1103 fc_private_vport_show_function(field, format_string, sz, ) \
1104 fc_private_vport_store_u32_function(field) \
1105static FC_CLASS_DEVICE_ATTR(vport, field, S_IRUGO | S_IWUSR, \
1106 show_fc_vport_##field, \
1107 store_fc_vport_##field)
1108
1109
1110#define fc_private_vport_rd_enum_attr(title, maxlen) \
1111static ssize_t \
1112show_fc_vport_##title (struct class_device *cdev, char *buf) \
1113{ \
1114 struct fc_vport *vport = transport_class_to_vport(cdev); \
1115 const char *name; \
1116 name = get_fc_##title##_name(vport->title); \
1117 if (!name) \
1118 return -EINVAL; \
1119 return snprintf(buf, maxlen, "%s\n", name); \
1120} \
1121static FC_CLASS_DEVICE_ATTR(vport, title, S_IRUGO, \
1122 show_fc_vport_##title, NULL)
1123
1124
1125#define SETUP_VPORT_ATTRIBUTE_RD(field) \
1126 i->private_vport_attrs[count] = class_device_attr_vport_##field; \
1127 i->private_vport_attrs[count].attr.mode = S_IRUGO; \
1128 i->private_vport_attrs[count].store = NULL; \
1129 i->vport_attrs[count] = &i->private_vport_attrs[count]; \
1130 if (i->f->get_##field) \
1131 count++
1132 /* NOTE: Above MACRO differs: checks function not show bit */
1133
1134#define SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(field) \
1135 i->private_vport_attrs[count] = class_device_attr_vport_##field; \
1136 i->private_vport_attrs[count].attr.mode = S_IRUGO; \
1137 i->private_vport_attrs[count].store = NULL; \
1138 i->vport_attrs[count] = &i->private_vport_attrs[count]; \
1139 count++
1140
1141#define SETUP_VPORT_ATTRIBUTE_WR(field) \
1142 i->private_vport_attrs[count] = class_device_attr_vport_##field; \
1143 i->vport_attrs[count] = &i->private_vport_attrs[count]; \
1144 if (i->f->field) \
1145 count++
1146 /* NOTE: Above MACRO differs: checks function */
1147
1148#define SETUP_VPORT_ATTRIBUTE_RW(field) \
1149 i->private_vport_attrs[count] = class_device_attr_vport_##field; \
1150 if (!i->f->set_vport_##field) { \
1151 i->private_vport_attrs[count].attr.mode = S_IRUGO; \
1152 i->private_vport_attrs[count].store = NULL; \
1153 } \
1154 i->vport_attrs[count] = &i->private_vport_attrs[count]; \
1155 count++
1156 /* NOTE: Above MACRO differs: does not check show bit */
1157
1158#define SETUP_PRIVATE_VPORT_ATTRIBUTE_RW(field) \
1159{ \
1160 i->private_vport_attrs[count] = class_device_attr_vport_##field; \
1161 i->vport_attrs[count] = &i->private_vport_attrs[count]; \
1162 count++; \
1163}
1164
1165
1166/* The FC Transport Virtual Port Attributes: */
1167
1168/* Fixed Virtual Port Attributes */
1169
1170/* Dynamic Virtual Port Attributes */
1171
1172/* Private Virtual Port Attributes */
1173
1174fc_private_vport_rd_enum_attr(vport_state, FC_VPORTSTATE_MAX_NAMELEN);
1175fc_private_vport_rd_enum_attr(vport_last_state, FC_VPORTSTATE_MAX_NAMELEN);
1176fc_private_vport_rd_attr_cast(node_name, "0x%llx\n", 20, unsigned long long);
1177fc_private_vport_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1178
1179static ssize_t
1180show_fc_vport_roles (struct class_device *cdev, char *buf)
1181{
1182 struct fc_vport *vport = transport_class_to_vport(cdev);
1183
1184 if (vport->roles == FC_PORT_ROLE_UNKNOWN)
1185 return snprintf(buf, 20, "unknown\n");
1186 return get_fc_port_roles_names(vport->roles, buf);
1187}
1188static FC_CLASS_DEVICE_ATTR(vport, roles, S_IRUGO, show_fc_vport_roles, NULL);
1189
1190fc_private_vport_rd_enum_attr(vport_type, FC_PORTTYPE_MAX_NAMELEN);
1191
1192fc_private_vport_show_function(symbolic_name, "%s\n",
1193 FC_VPORT_SYMBOLIC_NAMELEN + 1, )
1194fc_vport_store_str_function(symbolic_name, FC_VPORT_SYMBOLIC_NAMELEN)
1195static FC_CLASS_DEVICE_ATTR(vport, symbolic_name, S_IRUGO | S_IWUSR,
1196 show_fc_vport_symbolic_name, store_fc_vport_symbolic_name);
1197
1198static ssize_t
1199store_fc_vport_delete(struct class_device *cdev, const char *buf,
1200 size_t count)
1201{
1202 struct fc_vport *vport = transport_class_to_vport(cdev);
1203 struct Scsi_Host *shost = vport_to_shost(vport);
1204
1205 fc_queue_work(shost, &vport->vport_delete_work);
1206 return count;
1207}
1208static FC_CLASS_DEVICE_ATTR(vport, vport_delete, S_IWUSR,
1209 NULL, store_fc_vport_delete);
1210
1211
1212/*
1213 * Enable/Disable vport
1214 * Write "1" to disable, write "0" to enable
1215 */
1216static ssize_t
1217store_fc_vport_disable(struct class_device *cdev, const char *buf,
1218 size_t count)
1219{
1220 struct fc_vport *vport = transport_class_to_vport(cdev);
1221 struct Scsi_Host *shost = vport_to_shost(vport);
1222 struct fc_internal *i = to_fc_internal(shost->transportt);
1223 int stat;
1224
1225 if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
1226 return -EBUSY;
1227
1228 if (*buf == '0') {
1229 if (vport->vport_state != FC_VPORT_DISABLED)
1230 return -EALREADY;
1231 } else if (*buf == '1') {
1232 if (vport->vport_state == FC_VPORT_DISABLED)
1233 return -EALREADY;
1234 } else
1235 return -EINVAL;
1236
1237 stat = i->f->vport_disable(vport, ((*buf == '0') ? false : true));
1238 return stat ? stat : count;
1239}
1240static FC_CLASS_DEVICE_ATTR(vport, vport_disable, S_IWUSR,
1241 NULL, store_fc_vport_disable);
1242
1243
1244/*
915 * Host Attribute Management 1245 * Host Attribute Management
916 */ 1246 */
917 1247
@@ -1003,6 +1333,13 @@ static FC_CLASS_DEVICE_ATTR(host, title, S_IRUGO, show_fc_host_##title, NULL)
1003 if (i->f->show_host_##field) \ 1333 if (i->f->show_host_##field) \
1004 count++ 1334 count++
1005 1335
1336#define SETUP_HOST_ATTRIBUTE_RD_NS(field) \
1337 i->private_host_attrs[count] = class_device_attr_host_##field; \
1338 i->private_host_attrs[count].attr.mode = S_IRUGO; \
1339 i->private_host_attrs[count].store = NULL; \
1340 i->host_attrs[count] = &i->private_host_attrs[count]; \
1341 count++
1342
1006#define SETUP_HOST_ATTRIBUTE_RW(field) \ 1343#define SETUP_HOST_ATTRIBUTE_RW(field) \
1007 i->private_host_attrs[count] = class_device_attr_host_##field; \ 1344 i->private_host_attrs[count] = class_device_attr_host_##field; \
1008 if (!i->f->set_host_##field) { \ 1345 if (!i->f->set_host_##field) { \
@@ -1090,6 +1427,7 @@ fc_private_host_rd_attr_cast(port_name, "0x%llx\n", 20, unsigned long long);
1090fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20, 1427fc_private_host_rd_attr_cast(permanent_port_name, "0x%llx\n", 20,
1091 unsigned long long); 1428 unsigned long long);
1092fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20); 1429fc_private_host_rd_attr(maxframe_size, "%u bytes\n", 20);
1430fc_private_host_rd_attr(max_npiv_vports, "%u\n", 20);
1093fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1)); 1431fc_private_host_rd_attr(serial_number, "%s\n", (FC_SERIAL_NUMBER_SIZE +1));
1094 1432
1095 1433
@@ -1210,6 +1548,9 @@ store_fc_private_host_issue_lip(struct class_device *cdev,
1210static FC_CLASS_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL, 1548static FC_CLASS_DEVICE_ATTR(host, issue_lip, S_IWUSR, NULL,
1211 store_fc_private_host_issue_lip); 1549 store_fc_private_host_issue_lip);
1212 1550
1551fc_private_host_rd_attr(npiv_vports_inuse, "%u\n", 20);
1552
1553
1213/* 1554/*
1214 * Host Statistics Management 1555 * Host Statistics Management
1215 */ 1556 */
@@ -1285,7 +1626,6 @@ fc_reset_statistics(struct class_device *cdev, const char *buf,
1285static FC_CLASS_DEVICE_ATTR(host, reset_statistics, S_IWUSR, NULL, 1626static FC_CLASS_DEVICE_ATTR(host, reset_statistics, S_IWUSR, NULL,
1286 fc_reset_statistics); 1627 fc_reset_statistics);
1287 1628
1288
1289static struct attribute *fc_statistics_attrs[] = { 1629static struct attribute *fc_statistics_attrs[] = {
1290 &class_device_attr_host_seconds_since_last_reset.attr, 1630 &class_device_attr_host_seconds_since_last_reset.attr,
1291 &class_device_attr_host_tx_frames.attr, 1631 &class_device_attr_host_tx_frames.attr,
@@ -1316,6 +1656,142 @@ static struct attribute_group fc_statistics_group = {
1316 .attrs = fc_statistics_attrs, 1656 .attrs = fc_statistics_attrs,
1317}; 1657};
1318 1658
1659
1660/* Host Vport Attributes */
1661
1662static int
1663fc_parse_wwn(const char *ns, u64 *nm)
1664{
1665 unsigned int i, j;
1666 u8 wwn[8];
1667
1668 memset(wwn, 0, sizeof(wwn));
1669
1670 /* Validate and store the new name */
1671 for (i=0, j=0; i < 16; i++) {
1672 if ((*ns >= 'a') && (*ns <= 'f'))
1673 j = ((j << 4) | ((*ns++ -'a') + 10));
1674 else if ((*ns >= 'A') && (*ns <= 'F'))
1675 j = ((j << 4) | ((*ns++ -'A') + 10));
1676 else if ((*ns >= '0') && (*ns <= '9'))
1677 j = ((j << 4) | (*ns++ -'0'));
1678 else
1679 return -EINVAL;
1680 if (i % 2) {
1681 wwn[i/2] = j & 0xff;
1682 j = 0;
1683 }
1684 }
1685
1686 *nm = wwn_to_u64(wwn);
1687
1688 return 0;
1689}
1690
1691
1692/*
1693 * "Short-cut" sysfs variable to create a new vport on a FC Host.
1694 * Input is a string of the form "<WWPN>:<WWNN>". Other attributes
1695 * will default to a NPIV-based FCP_Initiator; The WWNs are specified
1696 * as hex characters, and may *not* contain any prefixes (e.g. 0x, x, etc)
1697 */
1698static ssize_t
1699store_fc_host_vport_create(struct class_device *cdev, const char *buf,
1700 size_t count)
1701{
1702 struct Scsi_Host *shost = transport_class_to_shost(cdev);
1703 struct fc_vport_identifiers vid;
1704 struct fc_vport *vport;
1705 unsigned int cnt=count;
1706 int stat;
1707
1708 memset(&vid, 0, sizeof(vid));
1709
1710 /* count may include a LF at end of string */
1711 if (buf[cnt-1] == '\n')
1712 cnt--;
1713
1714 /* validate we have enough characters for WWPN */
1715 if ((cnt != (16+1+16)) || (buf[16] != ':'))
1716 return -EINVAL;
1717
1718 stat = fc_parse_wwn(&buf[0], &vid.port_name);
1719 if (stat)
1720 return stat;
1721
1722 stat = fc_parse_wwn(&buf[17], &vid.node_name);
1723 if (stat)
1724 return stat;
1725
1726 vid.roles = FC_PORT_ROLE_FCP_INITIATOR;
1727 vid.vport_type = FC_PORTTYPE_NPIV;
1728 /* vid.symbolic_name is already zero/NULL's */
1729 vid.disable = false; /* always enabled */
1730
1731 /* we only allow support on Channel 0 !!! */
1732 stat = fc_vport_create(shost, 0, &shost->shost_gendev, &vid, &vport);
1733 return stat ? stat : count;
1734}
1735static FC_CLASS_DEVICE_ATTR(host, vport_create, S_IWUSR, NULL,
1736 store_fc_host_vport_create);
1737
1738
1739/*
1740 * "Short-cut" sysfs variable to delete a vport on a FC Host.
1741 * Vport is identified by a string containing "<WWPN>:<WWNN>".
1742 * The WWNs are specified as hex characters, and may *not* contain
1743 * any prefixes (e.g. 0x, x, etc)
1744 */
1745static ssize_t
1746store_fc_host_vport_delete(struct class_device *cdev, const char *buf,
1747 size_t count)
1748{
1749 struct Scsi_Host *shost = transport_class_to_shost(cdev);
1750 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
1751 struct fc_vport *vport;
1752 u64 wwpn, wwnn;
1753 unsigned long flags;
1754 unsigned int cnt=count;
1755 int stat, match;
1756
1757 /* count may include a LF at end of string */
1758 if (buf[cnt-1] == '\n')
1759 cnt--;
1760
1761 /* validate we have enough characters for WWPN */
1762 if ((cnt != (16+1+16)) || (buf[16] != ':'))
1763 return -EINVAL;
1764
1765 stat = fc_parse_wwn(&buf[0], &wwpn);
1766 if (stat)
1767 return stat;
1768
1769 stat = fc_parse_wwn(&buf[17], &wwnn);
1770 if (stat)
1771 return stat;
1772
1773 spin_lock_irqsave(shost->host_lock, flags);
1774 match = 0;
1775 /* we only allow support on Channel 0 !!! */
1776 list_for_each_entry(vport, &fc_host->vports, peers) {
1777 if ((vport->channel == 0) &&
1778 (vport->port_name == wwpn) && (vport->node_name == wwnn)) {
1779 match = 1;
1780 break;
1781 }
1782 }
1783 spin_unlock_irqrestore(shost->host_lock, flags);
1784
1785 if (!match)
1786 return -ENODEV;
1787
1788 stat = fc_vport_terminate(vport);
1789 return stat ? stat : count;
1790}
1791static FC_CLASS_DEVICE_ATTR(host, vport_delete, S_IWUSR, NULL,
1792 store_fc_host_vport_delete);
1793
1794
1319static int fc_host_match(struct attribute_container *cont, 1795static int fc_host_match(struct attribute_container *cont,
1320 struct device *dev) 1796 struct device *dev)
1321{ 1797{
@@ -1387,6 +1863,40 @@ static int fc_rport_match(struct attribute_container *cont,
1387} 1863}
1388 1864
1389 1865
1866static void fc_vport_dev_release(struct device *dev)
1867{
1868 struct fc_vport *vport = dev_to_vport(dev);
1869 put_device(dev->parent); /* release kobj parent */
1870 kfree(vport);
1871}
1872
1873int scsi_is_fc_vport(const struct device *dev)
1874{
1875 return dev->release == fc_vport_dev_release;
1876}
1877EXPORT_SYMBOL(scsi_is_fc_vport);
1878
1879static int fc_vport_match(struct attribute_container *cont,
1880 struct device *dev)
1881{
1882 struct fc_vport *vport;
1883 struct Scsi_Host *shost;
1884 struct fc_internal *i;
1885
1886 if (!scsi_is_fc_vport(dev))
1887 return 0;
1888 vport = dev_to_vport(dev);
1889
1890 shost = vport_to_shost(vport);
1891 if (!shost->transportt || shost->transportt->host_attrs.ac.class
1892 != &fc_host_class.class)
1893 return 0;
1894
1895 i = to_fc_internal(shost->transportt);
1896 return &i->vport_attr_cont.ac == cont;
1897}
1898
1899
1390/** 1900/**
1391 * fc_timed_out - FC Transport I/O timeout intercept handler 1901 * fc_timed_out - FC Transport I/O timeout intercept handler
1392 * 1902 *
@@ -1472,6 +1982,11 @@ fc_attach_transport(struct fc_function_template *ft)
1472 i->rport_attr_cont.ac.match = fc_rport_match; 1982 i->rport_attr_cont.ac.match = fc_rport_match;
1473 transport_container_register(&i->rport_attr_cont); 1983 transport_container_register(&i->rport_attr_cont);
1474 1984
1985 i->vport_attr_cont.ac.attrs = &i->vport_attrs[0];
1986 i->vport_attr_cont.ac.class = &fc_vport_class.class;
1987 i->vport_attr_cont.ac.match = fc_vport_match;
1988 transport_container_register(&i->vport_attr_cont);
1989
1475 i->f = ft; 1990 i->f = ft;
1476 1991
1477 /* Transport uses the shost workq for scsi scanning */ 1992 /* Transport uses the shost workq for scsi scanning */
@@ -1480,7 +1995,7 @@ fc_attach_transport(struct fc_function_template *ft)
1480 i->t.eh_timed_out = fc_timed_out; 1995 i->t.eh_timed_out = fc_timed_out;
1481 1996
1482 i->t.user_scan = fc_user_scan; 1997 i->t.user_scan = fc_user_scan;
1483 1998
1484 /* 1999 /*
1485 * Setup SCSI Target Attributes. 2000 * Setup SCSI Target Attributes.
1486 */ 2001 */
@@ -1505,6 +2020,10 @@ fc_attach_transport(struct fc_function_template *ft)
1505 SETUP_HOST_ATTRIBUTE_RD(supported_fc4s); 2020 SETUP_HOST_ATTRIBUTE_RD(supported_fc4s);
1506 SETUP_HOST_ATTRIBUTE_RD(supported_speeds); 2021 SETUP_HOST_ATTRIBUTE_RD(supported_speeds);
1507 SETUP_HOST_ATTRIBUTE_RD(maxframe_size); 2022 SETUP_HOST_ATTRIBUTE_RD(maxframe_size);
2023 if (ft->vport_create) {
2024 SETUP_HOST_ATTRIBUTE_RD_NS(max_npiv_vports);
2025 SETUP_HOST_ATTRIBUTE_RD_NS(npiv_vports_inuse);
2026 }
1508 SETUP_HOST_ATTRIBUTE_RD(serial_number); 2027 SETUP_HOST_ATTRIBUTE_RD(serial_number);
1509 2028
1510 SETUP_HOST_ATTRIBUTE_RD(port_id); 2029 SETUP_HOST_ATTRIBUTE_RD(port_id);
@@ -1520,6 +2039,10 @@ fc_attach_transport(struct fc_function_template *ft)
1520 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type); 2039 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(tgtid_bind_type);
1521 if (ft->issue_fc_host_lip) 2040 if (ft->issue_fc_host_lip)
1522 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip); 2041 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(issue_lip);
2042 if (ft->vport_create)
2043 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_create);
2044 if (ft->vport_delete)
2045 SETUP_PRIVATE_HOST_ATTRIBUTE_RW(vport_delete);
1523 2046
1524 BUG_ON(count > FC_HOST_NUM_ATTRS); 2047 BUG_ON(count > FC_HOST_NUM_ATTRS);
1525 2048
@@ -1545,6 +2068,24 @@ fc_attach_transport(struct fc_function_template *ft)
1545 2068
1546 i->rport_attrs[count] = NULL; 2069 i->rport_attrs[count] = NULL;
1547 2070
2071 /*
2072 * Setup Virtual Port Attributes.
2073 */
2074 count=0;
2075 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_state);
2076 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_last_state);
2077 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(node_name);
2078 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(port_name);
2079 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(roles);
2080 SETUP_PRIVATE_VPORT_ATTRIBUTE_RD(vport_type);
2081 SETUP_VPORT_ATTRIBUTE_RW(symbolic_name);
2082 SETUP_VPORT_ATTRIBUTE_WR(vport_delete);
2083 SETUP_VPORT_ATTRIBUTE_WR(vport_disable);
2084
2085 BUG_ON(count > FC_VPORT_NUM_ATTRS);
2086
2087 i->vport_attrs[count] = NULL;
2088
1548 return &i->t; 2089 return &i->t;
1549} 2090}
1550EXPORT_SYMBOL(fc_attach_transport); 2091EXPORT_SYMBOL(fc_attach_transport);
@@ -1556,6 +2097,7 @@ void fc_release_transport(struct scsi_transport_template *t)
1556 transport_container_unregister(&i->t.target_attrs); 2097 transport_container_unregister(&i->t.target_attrs);
1557 transport_container_unregister(&i->t.host_attrs); 2098 transport_container_unregister(&i->t.host_attrs);
1558 transport_container_unregister(&i->rport_attr_cont); 2099 transport_container_unregister(&i->rport_attr_cont);
2100 transport_container_unregister(&i->vport_attr_cont);
1559 2101
1560 kfree(i); 2102 kfree(i);
1561} 2103}
@@ -1667,9 +2209,17 @@ fc_flush_devloss(struct Scsi_Host *shost)
1667void 2209void
1668fc_remove_host(struct Scsi_Host *shost) 2210fc_remove_host(struct Scsi_Host *shost)
1669{ 2211{
1670 struct fc_rport *rport, *next_rport; 2212 struct fc_vport *vport = NULL, *next_vport = NULL;
2213 struct fc_rport *rport = NULL, *next_rport = NULL;
1671 struct workqueue_struct *work_q; 2214 struct workqueue_struct *work_q;
1672 struct fc_host_attrs *fc_host = shost_to_fc_host(shost); 2215 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
2216 unsigned long flags;
2217
2218 spin_lock_irqsave(shost->host_lock, flags);
2219
2220 /* Remove any vports */
2221 list_for_each_entry_safe(vport, next_vport, &fc_host->vports, peers)
2222 fc_queue_work(shost, &vport->vport_delete_work);
1673 2223
1674 /* Remove any remote ports */ 2224 /* Remove any remote ports */
1675 list_for_each_entry_safe(rport, next_rport, 2225 list_for_each_entry_safe(rport, next_rport,
@@ -1686,6 +2236,8 @@ fc_remove_host(struct Scsi_Host *shost)
1686 fc_queue_work(shost, &rport->rport_delete_work); 2236 fc_queue_work(shost, &rport->rport_delete_work);
1687 } 2237 }
1688 2238
2239 spin_unlock_irqrestore(shost->host_lock, flags);
2240
1689 /* flush all scan work items */ 2241 /* flush all scan work items */
1690 scsi_flush_work(shost); 2242 scsi_flush_work(shost);
1691 2243
@@ -1744,7 +2296,7 @@ fc_rport_final_delete(struct work_struct *work)
1744 unsigned long flags; 2296 unsigned long flags;
1745 2297
1746 /* 2298 /*
1747 * if a scan is pending, flush the SCSI Host work_q so that 2299 * if a scan is pending, flush the SCSI Host work_q so that
1748 * that we can reclaim the rport scan work element. 2300 * that we can reclaim the rport scan work element.
1749 */ 2301 */
1750 if (rport->flags & FC_RPORT_SCAN_PENDING) 2302 if (rport->flags & FC_RPORT_SCAN_PENDING)
@@ -1844,7 +2396,7 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
1844 spin_lock_irqsave(shost->host_lock, flags); 2396 spin_lock_irqsave(shost->host_lock, flags);
1845 2397
1846 rport->number = fc_host->next_rport_number++; 2398 rport->number = fc_host->next_rport_number++;
1847 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) 2399 if (rport->roles & FC_PORT_ROLE_FCP_TARGET)
1848 rport->scsi_target_id = fc_host->next_target_id++; 2400 rport->scsi_target_id = fc_host->next_target_id++;
1849 else 2401 else
1850 rport->scsi_target_id = -1; 2402 rport->scsi_target_id = -1;
@@ -1869,7 +2421,7 @@ fc_rport_create(struct Scsi_Host *shost, int channel,
1869 transport_add_device(dev); 2421 transport_add_device(dev);
1870 transport_configure_device(dev); 2422 transport_configure_device(dev);
1871 2423
1872 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) { 2424 if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
1873 /* initiate a scan of the target */ 2425 /* initiate a scan of the target */
1874 rport->flags |= FC_RPORT_SCAN_PENDING; 2426 rport->flags |= FC_RPORT_SCAN_PENDING;
1875 scsi_queue_work(shost, &rport->scan_work); 2427 scsi_queue_work(shost, &rport->scan_work);
@@ -2003,7 +2555,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
2003 2555
2004 /* was a target, not in roles */ 2556 /* was a target, not in roles */
2005 if ((rport->scsi_target_id != -1) && 2557 if ((rport->scsi_target_id != -1) &&
2006 (!(ids->roles & FC_RPORT_ROLE_FCP_TARGET))) 2558 (!(ids->roles & FC_PORT_ROLE_FCP_TARGET)))
2007 return rport; 2559 return rport;
2008 2560
2009 /* 2561 /*
@@ -2086,7 +2638,7 @@ fc_remote_port_add(struct Scsi_Host *shost, int channel,
2086 memset(rport->dd_data, 0, 2638 memset(rport->dd_data, 0,
2087 fci->f->dd_fcrport_size); 2639 fci->f->dd_fcrport_size);
2088 2640
2089 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) { 2641 if (rport->roles & FC_PORT_ROLE_FCP_TARGET) {
2090 /* initiate a scan of the target */ 2642 /* initiate a scan of the target */
2091 rport->flags |= FC_RPORT_SCAN_PENDING; 2643 rport->flags |= FC_RPORT_SCAN_PENDING;
2092 scsi_queue_work(shost, &rport->scan_work); 2644 scsi_queue_work(shost, &rport->scan_work);
@@ -2243,11 +2795,11 @@ fc_remote_port_rolechg(struct fc_rport *rport, u32 roles)
2243 int create = 0; 2795 int create = 0;
2244 2796
2245 spin_lock_irqsave(shost->host_lock, flags); 2797 spin_lock_irqsave(shost->host_lock, flags);
2246 if (roles & FC_RPORT_ROLE_FCP_TARGET) { 2798 if (roles & FC_PORT_ROLE_FCP_TARGET) {
2247 if (rport->scsi_target_id == -1) { 2799 if (rport->scsi_target_id == -1) {
2248 rport->scsi_target_id = fc_host->next_target_id++; 2800 rport->scsi_target_id = fc_host->next_target_id++;
2249 create = 1; 2801 create = 1;
2250 } else if (!(rport->roles & FC_RPORT_ROLE_FCP_TARGET)) 2802 } else if (!(rport->roles & FC_PORT_ROLE_FCP_TARGET))
2251 create = 1; 2803 create = 1;
2252 } 2804 }
2253 2805
@@ -2294,7 +2846,7 @@ EXPORT_SYMBOL(fc_remote_port_rolechg);
2294 * fc_timeout_deleted_rport - Timeout handler for a deleted remote port, 2846 * fc_timeout_deleted_rport - Timeout handler for a deleted remote port,
2295 * which we blocked, and has now failed to return 2847 * which we blocked, and has now failed to return
2296 * in the allotted time. 2848 * in the allotted time.
2297 * 2849 *
2298 * @work: rport target that failed to reappear in the allotted time. 2850 * @work: rport target that failed to reappear in the allotted time.
2299 **/ 2851 **/
2300static void 2852static void
@@ -2317,7 +2869,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
2317 */ 2869 */
2318 if ((rport->port_state == FC_PORTSTATE_ONLINE) && 2870 if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
2319 (rport->scsi_target_id != -1) && 2871 (rport->scsi_target_id != -1) &&
2320 !(rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { 2872 !(rport->roles & FC_PORT_ROLE_FCP_TARGET)) {
2321 dev_printk(KERN_ERR, &rport->dev, 2873 dev_printk(KERN_ERR, &rport->dev,
2322 "blocked FC remote port time out: no longer" 2874 "blocked FC remote port time out: no longer"
2323 " a FCP target, removing starget\n"); 2875 " a FCP target, removing starget\n");
@@ -2367,7 +2919,7 @@ fc_timeout_deleted_rport(struct work_struct *work)
2367 */ 2919 */
2368 rport->maxframe_size = -1; 2920 rport->maxframe_size = -1;
2369 rport->supported_classes = FC_COS_UNSPECIFIED; 2921 rport->supported_classes = FC_COS_UNSPECIFIED;
2370 rport->roles = FC_RPORT_ROLE_UNKNOWN; 2922 rport->roles = FC_PORT_ROLE_UNKNOWN;
2371 rport->port_state = FC_PORTSTATE_NOTPRESENT; 2923 rport->port_state = FC_PORTSTATE_NOTPRESENT;
2372 2924
2373 /* remove the identifiers that aren't used in the consisting binding */ 2925 /* remove the identifiers that aren't used in the consisting binding */
@@ -2436,7 +2988,7 @@ fc_scsi_scan_rport(struct work_struct *work)
2436 unsigned long flags; 2988 unsigned long flags;
2437 2989
2438 if ((rport->port_state == FC_PORTSTATE_ONLINE) && 2990 if ((rport->port_state == FC_PORTSTATE_ONLINE) &&
2439 (rport->roles & FC_RPORT_ROLE_FCP_TARGET)) { 2991 (rport->roles & FC_PORT_ROLE_FCP_TARGET)) {
2440 scsi_scan_target(&rport->dev, rport->channel, 2992 scsi_scan_target(&rport->dev, rport->channel,
2441 rport->scsi_target_id, SCAN_WILD_CARD, 1); 2993 rport->scsi_target_id, SCAN_WILD_CARD, 1);
2442 } 2994 }
@@ -2447,7 +2999,227 @@ fc_scsi_scan_rport(struct work_struct *work)
2447} 2999}
2448 3000
2449 3001
2450MODULE_AUTHOR("Martin Hicks"); 3002/**
3003 * fc_vport_create - allocates and creates a FC virtual port.
3004 * @shost: scsi host the virtual port is connected to.
3005 * @channel: Channel on shost port connected to.
3006 * @pdev: parent device for vport
3007 * @ids: The world wide names, FC4 port roles, etc for
3008 * the virtual port.
3009 * @ret_vport: The pointer to the created vport.
3010 *
3011 * Allocates and creates the vport structure, calls the parent host
3012 * to instantiate the vport, the completes w/ class and sysfs creation.
3013 *
3014 * Notes:
3015 * This routine assumes no locks are held on entry.
3016 **/
3017static int
3018fc_vport_create(struct Scsi_Host *shost, int channel, struct device *pdev,
3019 struct fc_vport_identifiers *ids, struct fc_vport **ret_vport)
3020{
3021 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3022 struct fc_internal *fci = to_fc_internal(shost->transportt);
3023 struct fc_vport *vport;
3024 struct device *dev;
3025 unsigned long flags;
3026 size_t size;
3027 int error;
3028
3029 *ret_vport = NULL;
3030
3031 if ( ! fci->f->vport_create)
3032 return -ENOENT;
3033
3034 size = (sizeof(struct fc_vport) + fci->f->dd_fcvport_size);
3035 vport = kzalloc(size, GFP_KERNEL);
3036 if (unlikely(!vport)) {
3037 printk(KERN_ERR "%s: allocation failure\n", __FUNCTION__);
3038 return -ENOMEM;
3039 }
3040
3041 vport->vport_state = FC_VPORT_UNKNOWN;
3042 vport->vport_last_state = FC_VPORT_UNKNOWN;
3043 vport->node_name = ids->node_name;
3044 vport->port_name = ids->port_name;
3045 vport->roles = ids->roles;
3046 vport->vport_type = ids->vport_type;
3047 if (fci->f->dd_fcvport_size)
3048 vport->dd_data = &vport[1];
3049 vport->shost = shost;
3050 vport->channel = channel;
3051 vport->flags = FC_VPORT_CREATING;
3052 INIT_WORK(&vport->vport_delete_work, fc_vport_sched_delete);
3053
3054 spin_lock_irqsave(shost->host_lock, flags);
3055
3056 if (fc_host->npiv_vports_inuse >= fc_host->max_npiv_vports) {
3057 spin_unlock_irqrestore(shost->host_lock, flags);
3058 kfree(vport);
3059 return -ENOSPC;
3060 }
3061 fc_host->npiv_vports_inuse++;
3062 vport->number = fc_host->next_vport_number++;
3063 list_add_tail(&vport->peers, &fc_host->vports);
3064 get_device(&shost->shost_gendev); /* for fc_host->vport list */
3065
3066 spin_unlock_irqrestore(shost->host_lock, flags);
3067
3068 dev = &vport->dev;
3069 device_initialize(dev); /* takes self reference */
3070 dev->parent = get_device(pdev); /* takes parent reference */
3071 dev->release = fc_vport_dev_release;
3072 sprintf(dev->bus_id, "vport-%d:%d-%d",
3073 shost->host_no, channel, vport->number);
3074 transport_setup_device(dev);
3075
3076 error = device_add(dev);
3077 if (error) {
3078 printk(KERN_ERR "FC Virtual Port device_add failed\n");
3079 goto delete_vport;
3080 }
3081 transport_add_device(dev);
3082 transport_configure_device(dev);
3083
3084 error = fci->f->vport_create(vport, ids->disable);
3085 if (error) {
3086 printk(KERN_ERR "FC Virtual Port LLDD Create failed\n");
3087 goto delete_vport_all;
3088 }
3089
3090 /*
3091 * if the parent isn't the physical adapter's Scsi_Host, ensure
3092 * the Scsi_Host at least contains ia symlink to the vport.
3093 */
3094 if (pdev != &shost->shost_gendev) {
3095 error = sysfs_create_link(&shost->shost_gendev.kobj,
3096 &dev->kobj, dev->bus_id);
3097 if (error)
3098 printk(KERN_ERR
3099 "%s: Cannot create vport symlinks for "
3100 "%s, err=%d\n",
3101 __FUNCTION__, dev->bus_id, error);
3102 }
3103 spin_lock_irqsave(shost->host_lock, flags);
3104 vport->flags &= ~FC_VPORT_CREATING;
3105 spin_unlock_irqrestore(shost->host_lock, flags);
3106
3107 dev_printk(KERN_NOTICE, pdev,
3108 "%s created via shost%d channel %d\n", dev->bus_id,
3109 shost->host_no, channel);
3110
3111 *ret_vport = vport;
3112
3113 return 0;
3114
3115delete_vport_all:
3116 transport_remove_device(dev);
3117 device_del(dev);
3118delete_vport:
3119 transport_destroy_device(dev);
3120 spin_lock_irqsave(shost->host_lock, flags);
3121 list_del(&vport->peers);
3122 put_device(&shost->shost_gendev); /* for fc_host->vport list */
3123 fc_host->npiv_vports_inuse--;
3124 spin_unlock_irqrestore(shost->host_lock, flags);
3125 put_device(dev->parent);
3126 kfree(vport);
3127
3128 return error;
3129}
3130
3131
3132/**
3133 * fc_vport_terminate - Admin App or LLDD requests termination of a vport
3134 * @vport: fc_vport to be terminated
3135 *
3136 * Calls the LLDD vport_delete() function, then deallocates and removes
3137 * the vport from the shost and object tree.
3138 *
3139 * Notes:
3140 * This routine assumes no locks are held on entry.
3141 **/
3142int
3143fc_vport_terminate(struct fc_vport *vport)
3144{
3145 struct Scsi_Host *shost = vport_to_shost(vport);
3146 struct fc_host_attrs *fc_host = shost_to_fc_host(shost);
3147 struct fc_internal *i = to_fc_internal(shost->transportt);
3148 struct device *dev = &vport->dev;
3149 unsigned long flags;
3150 int stat;
3151
3152 spin_lock_irqsave(shost->host_lock, flags);
3153 if (vport->flags & FC_VPORT_CREATING) {
3154 spin_unlock_irqrestore(shost->host_lock, flags);
3155 return -EBUSY;
3156 }
3157 if (vport->flags & (FC_VPORT_DEL)) {
3158 spin_unlock_irqrestore(shost->host_lock, flags);
3159 return -EALREADY;
3160 }
3161 vport->flags |= FC_VPORT_DELETING;
3162 spin_unlock_irqrestore(shost->host_lock, flags);
3163
3164 if (i->f->vport_delete)
3165 stat = i->f->vport_delete(vport);
3166 else
3167 stat = -ENOENT;
3168
3169 spin_lock_irqsave(shost->host_lock, flags);
3170 vport->flags &= ~FC_VPORT_DELETING;
3171 if (!stat) {
3172 vport->flags |= FC_VPORT_DELETED;
3173 list_del(&vport->peers);
3174 fc_host->npiv_vports_inuse--;
3175 put_device(&shost->shost_gendev); /* for fc_host->vport list */
3176 }
3177 spin_unlock_irqrestore(shost->host_lock, flags);
3178
3179 if (stat)
3180 return stat;
3181
3182 if (dev->parent != &shost->shost_gendev)
3183 sysfs_remove_link(&shost->shost_gendev.kobj, dev->bus_id);
3184 transport_remove_device(dev);
3185 device_del(dev);
3186 transport_destroy_device(dev);
3187
3188 /*
3189 * Removing our self-reference should mean our
3190 * release function gets called, which will drop the remaining
3191 * parent reference and free the data structure.
3192 */
3193 put_device(dev); /* for self-reference */
3194
3195 return 0; /* SUCCESS */
3196}
3197EXPORT_SYMBOL(fc_vport_terminate);
3198
3199/**
3200 * fc_vport_sched_delete - workq-based delete request for a vport
3201 *
3202 * @work: vport to be deleted.
3203 **/
3204static void
3205fc_vport_sched_delete(struct work_struct *work)
3206{
3207 struct fc_vport *vport =
3208 container_of(work, struct fc_vport, vport_delete_work);
3209 int stat;
3210
3211 stat = fc_vport_terminate(vport);
3212 if (stat)
3213 dev_printk(KERN_ERR, vport->dev.parent,
3214 "%s: %s could not be deleted created via "
3215 "shost%d channel %d - error %d\n", __FUNCTION__,
3216 vport->dev.bus_id, vport->shost->host_no,
3217 vport->channel, stat);
3218}
3219
3220
3221/* Original Author: Martin Hicks */
3222MODULE_AUTHOR("James Smart");
2451MODULE_DESCRIPTION("FC Transport Attributes"); 3223MODULE_DESCRIPTION("FC Transport Attributes");
2452MODULE_LICENSE("GPL"); 3224MODULE_LICENSE("GPL");
2453 3225
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3d8c9cb24f91..448d316f12d7 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1515,7 +1515,7 @@ static int sd_revalidate_disk(struct gendisk *disk)
1515 if (!scsi_device_online(sdp)) 1515 if (!scsi_device_online(sdp))
1516 goto out; 1516 goto out;
1517 1517
1518 buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL | __GFP_DMA); 1518 buffer = kmalloc(SD_BUF_SIZE, GFP_KERNEL);
1519 if (!buffer) { 1519 if (!buffer) {
1520 sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory " 1520 sd_printk(KERN_WARNING, sdkp, "sd_revalidate_disk: Memory "
1521 "allocation failure.\n"); 1521 "allocation failure.\n");
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 0c691a60a756..85d38940a6c9 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1842,7 +1842,7 @@ sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size)
1842 int blk_size = buff_size; 1842 int blk_size = buff_size;
1843 struct page *p = NULL; 1843 struct page *p = NULL;
1844 1844
1845 if ((blk_size < 0) || (!sfp)) 1845 if (blk_size < 0)
1846 return -EFAULT; 1846 return -EFAULT;
1847 if (0 == blk_size) 1847 if (0 == blk_size)
1848 ++blk_size; /* don't know why */ 1848 ++blk_size; /* don't know why */
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 9ac83abc4028..adda296b594b 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -395,53 +395,34 @@ static struct req_msg *stex_alloc_req(struct st_hba *hba)
395static int stex_map_sg(struct st_hba *hba, 395static int stex_map_sg(struct st_hba *hba,
396 struct req_msg *req, struct st_ccb *ccb) 396 struct req_msg *req, struct st_ccb *ccb)
397{ 397{
398 struct pci_dev *pdev = hba->pdev;
399 struct scsi_cmnd *cmd; 398 struct scsi_cmnd *cmd;
400 dma_addr_t dma_handle; 399 struct scatterlist *sg;
401 struct scatterlist *src;
402 struct st_sgtable *dst; 400 struct st_sgtable *dst;
403 int i; 401 int i, nseg;
404 402
405 cmd = ccb->cmd; 403 cmd = ccb->cmd;
406 dst = (struct st_sgtable *)req->variable; 404 dst = (struct st_sgtable *)req->variable;
407 dst->max_sg_count = cpu_to_le16(ST_MAX_SG); 405 dst->max_sg_count = cpu_to_le16(ST_MAX_SG);
408 dst->sz_in_byte = cpu_to_le32(cmd->request_bufflen); 406 dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
409
410 if (cmd->use_sg) {
411 int n_elem;
412 407
413 src = (struct scatterlist *) cmd->request_buffer; 408 nseg = scsi_dma_map(cmd);
414 n_elem = pci_map_sg(pdev, src, 409 if (nseg < 0)
415 cmd->use_sg, cmd->sc_data_direction); 410 return -EIO;
416 if (n_elem <= 0) 411 if (nseg) {
417 return -EIO; 412 ccb->sg_count = nseg;
413 dst->sg_count = cpu_to_le16((u16)nseg);
418 414
419 ccb->sg_count = n_elem; 415 scsi_for_each_sg(cmd, sg, nseg, i) {
420 dst->sg_count = cpu_to_le16((u16)n_elem); 416 dst->table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
421
422 for (i = 0; i < n_elem; i++, src++) {
423 dst->table[i].count = cpu_to_le32((u32)sg_dma_len(src));
424 dst->table[i].addr = 417 dst->table[i].addr =
425 cpu_to_le32(sg_dma_address(src) & 0xffffffff); 418 cpu_to_le32(sg_dma_address(sg) & 0xffffffff);
426 dst->table[i].addr_hi = 419 dst->table[i].addr_hi =
427 cpu_to_le32((sg_dma_address(src) >> 16) >> 16); 420 cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
428 dst->table[i].ctrl = SG_CF_64B | SG_CF_HOST; 421 dst->table[i].ctrl = SG_CF_64B | SG_CF_HOST;
429 } 422 }
430 dst->table[--i].ctrl |= SG_CF_EOT; 423 dst->table[--i].ctrl |= SG_CF_EOT;
431 return 0;
432 } 424 }
433 425
434 dma_handle = pci_map_single(pdev, cmd->request_buffer,
435 cmd->request_bufflen, cmd->sc_data_direction);
436 cmd->SCp.dma_handle = dma_handle;
437
438 ccb->sg_count = 1;
439 dst->sg_count = cpu_to_le16(1);
440 dst->table[0].addr = cpu_to_le32(dma_handle & 0xffffffff);
441 dst->table[0].addr_hi = cpu_to_le32((dma_handle >> 16) >> 16);
442 dst->table[0].count = cpu_to_le32((u32)cmd->request_bufflen);
443 dst->table[0].ctrl = SG_CF_EOT | SG_CF_64B | SG_CF_HOST;
444
445 return 0; 426 return 0;
446} 427}
447 428
@@ -451,24 +432,24 @@ static void stex_internal_copy(struct scsi_cmnd *cmd,
451 size_t lcount; 432 size_t lcount;
452 size_t len; 433 size_t len;
453 void *s, *d, *base = NULL; 434 void *s, *d, *base = NULL;
454 if (*count > cmd->request_bufflen) 435 size_t offset;
455 *count = cmd->request_bufflen; 436
437 if (*count > scsi_bufflen(cmd))
438 *count = scsi_bufflen(cmd);
456 lcount = *count; 439 lcount = *count;
457 while (lcount) { 440 while (lcount) {
458 len = lcount; 441 len = lcount;
459 s = (void *)src; 442 s = (void *)src;
460 if (cmd->use_sg) { 443
461 size_t offset = *count - lcount; 444 offset = *count - lcount;
462 s += offset; 445 s += offset;
463 base = scsi_kmap_atomic_sg(cmd->request_buffer, 446 base = scsi_kmap_atomic_sg(scsi_sglist(cmd),
464 sg_count, &offset, &len); 447 sg_count, &offset, &len);
465 if (base == NULL) { 448 if (!base) {
466 *count -= lcount; 449 *count -= lcount;
467 return; 450 return;
468 } 451 }
469 d = base + offset; 452 d = base + offset;
470 } else
471 d = cmd->request_buffer;
472 453
473 if (direction == ST_TO_CMD) 454 if (direction == ST_TO_CMD)
474 memcpy(d, s, len); 455 memcpy(d, s, len);
@@ -476,30 +457,24 @@ static void stex_internal_copy(struct scsi_cmnd *cmd,
476 memcpy(s, d, len); 457 memcpy(s, d, len);
477 458
478 lcount -= len; 459 lcount -= len;
479 if (cmd->use_sg) 460 scsi_kunmap_atomic_sg(base);
480 scsi_kunmap_atomic_sg(base);
481 } 461 }
482} 462}
483 463
484static int stex_direct_copy(struct scsi_cmnd *cmd, 464static int stex_direct_copy(struct scsi_cmnd *cmd,
485 const void *src, size_t count) 465 const void *src, size_t count)
486{ 466{
487 struct st_hba *hba = (struct st_hba *) &cmd->device->host->hostdata[0];
488 size_t cp_len = count; 467 size_t cp_len = count;
489 int n_elem = 0; 468 int n_elem = 0;
490 469
491 if (cmd->use_sg) { 470 n_elem = scsi_dma_map(cmd);
492 n_elem = pci_map_sg(hba->pdev, cmd->request_buffer, 471 if (n_elem < 0)
493 cmd->use_sg, cmd->sc_data_direction); 472 return 0;
494 if (n_elem <= 0)
495 return 0;
496 }
497 473
498 stex_internal_copy(cmd, src, &cp_len, n_elem, ST_TO_CMD); 474 stex_internal_copy(cmd, src, &cp_len, n_elem, ST_TO_CMD);
499 475
500 if (cmd->use_sg) 476 scsi_dma_unmap(cmd);
501 pci_unmap_sg(hba->pdev, cmd->request_buffer, 477
502 cmd->use_sg, cmd->sc_data_direction);
503 return cp_len == count; 478 return cp_len == count;
504} 479}
505 480
@@ -678,18 +653,6 @@ stex_queuecommand(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *))
678 return 0; 653 return 0;
679} 654}
680 655
681static void stex_unmap_sg(struct st_hba *hba, struct scsi_cmnd *cmd)
682{
683 if (cmd->sc_data_direction != DMA_NONE) {
684 if (cmd->use_sg)
685 pci_unmap_sg(hba->pdev, cmd->request_buffer,
686 cmd->use_sg, cmd->sc_data_direction);
687 else
688 pci_unmap_single(hba->pdev, cmd->SCp.dma_handle,
689 cmd->request_bufflen, cmd->sc_data_direction);
690 }
691}
692
693static void stex_scsi_done(struct st_ccb *ccb) 656static void stex_scsi_done(struct st_ccb *ccb)
694{ 657{
695 struct scsi_cmnd *cmd = ccb->cmd; 658 struct scsi_cmnd *cmd = ccb->cmd;
@@ -756,7 +719,7 @@ static void stex_ys_commands(struct st_hba *hba,
756 719
757 if (ccb->cmd->cmnd[0] == MGT_CMD && 720 if (ccb->cmd->cmnd[0] == MGT_CMD &&
758 resp->scsi_status != SAM_STAT_CHECK_CONDITION) { 721 resp->scsi_status != SAM_STAT_CHECK_CONDITION) {
759 ccb->cmd->request_bufflen = 722 scsi_bufflen(ccb->cmd) =
760 le32_to_cpu(*(__le32 *)&resp->variable[0]); 723 le32_to_cpu(*(__le32 *)&resp->variable[0]);
761 return; 724 return;
762 } 725 }
@@ -855,7 +818,7 @@ static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
855 ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER)) 818 ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
856 stex_controller_info(hba, ccb); 819 stex_controller_info(hba, ccb);
857 820
858 stex_unmap_sg(hba, ccb->cmd); 821 scsi_dma_unmap(ccb->cmd);
859 stex_scsi_done(ccb); 822 stex_scsi_done(ccb);
860 hba->out_req_cnt--; 823 hba->out_req_cnt--;
861 } else if (ccb->req_type & PASSTHRU_REQ_TYPE) { 824 } else if (ccb->req_type & PASSTHRU_REQ_TYPE) {
@@ -1028,7 +991,7 @@ static int stex_abort(struct scsi_cmnd *cmd)
1028 } 991 }
1029 992
1030fail_out: 993fail_out:
1031 stex_unmap_sg(hba, cmd); 994 scsi_dma_unmap(cmd);
1032 hba->wait_ccb->req = NULL; /* nullify the req's future return */ 995 hba->wait_ccb->req = NULL; /* nullify the req's future return */
1033 hba->wait_ccb = NULL; 996 hba->wait_ccb = NULL;
1034 result = FAILED; 997 result = FAILED;
diff --git a/drivers/scsi/sym53c416.c b/drivers/scsi/sym53c416.c
index 2ca950582bc3..92bfaeafe30d 100644
--- a/drivers/scsi/sym53c416.c
+++ b/drivers/scsi/sym53c416.c
@@ -332,8 +332,7 @@ static irqreturn_t sym53c416_intr_handle(int irq, void *dev_id)
332 int i; 332 int i;
333 unsigned long flags = 0; 333 unsigned long flags = 0;
334 unsigned char status_reg, pio_int_reg, int_reg; 334 unsigned char status_reg, pio_int_reg, int_reg;
335 struct scatterlist *sglist; 335 struct scatterlist *sg;
336 unsigned int sgcount;
337 unsigned int tot_trans = 0; 336 unsigned int tot_trans = 0;
338 337
339 /* We search the base address of the host adapter which caused the interrupt */ 338 /* We search the base address of the host adapter which caused the interrupt */
@@ -429,19 +428,15 @@ static irqreturn_t sym53c416_intr_handle(int irq, void *dev_id)
429 { 428 {
430 current_command->SCp.phase = data_out; 429 current_command->SCp.phase = data_out;
431 outb(FLUSH_FIFO, base + COMMAND_REG); 430 outb(FLUSH_FIFO, base + COMMAND_REG);
432 sym53c416_set_transfer_counter(base, current_command->request_bufflen); 431 sym53c416_set_transfer_counter(base,
432 scsi_bufflen(current_command));
433 outb(TRANSFER_INFORMATION | PIO_MODE, base + COMMAND_REG); 433 outb(TRANSFER_INFORMATION | PIO_MODE, base + COMMAND_REG);
434 if(!current_command->use_sg) 434
435 tot_trans = sym53c416_write(base, current_command->request_buffer, current_command->request_bufflen); 435 scsi_for_each_sg(current_command,
436 else 436 sg, scsi_sg_count(current_command), i) {
437 { 437 tot_trans += sym53c416_write(base,
438 sgcount = current_command->use_sg; 438 SG_ADDRESS(sg),
439 sglist = current_command->request_buffer; 439 sg->length);
440 while(sgcount--)
441 {
442 tot_trans += sym53c416_write(base, SG_ADDRESS(sglist), sglist->length);
443 sglist++;
444 }
445 } 440 }
446 if(tot_trans < current_command->underflow) 441 if(tot_trans < current_command->underflow)
447 printk(KERN_WARNING "sym53c416: Underflow, wrote %d bytes, request for %d bytes.\n", tot_trans, current_command->underflow); 442 printk(KERN_WARNING "sym53c416: Underflow, wrote %d bytes, request for %d bytes.\n", tot_trans, current_command->underflow);
@@ -455,19 +450,16 @@ static irqreturn_t sym53c416_intr_handle(int irq, void *dev_id)
455 { 450 {
456 current_command->SCp.phase = data_in; 451 current_command->SCp.phase = data_in;
457 outb(FLUSH_FIFO, base + COMMAND_REG); 452 outb(FLUSH_FIFO, base + COMMAND_REG);
458 sym53c416_set_transfer_counter(base, current_command->request_bufflen); 453 sym53c416_set_transfer_counter(base,
454 scsi_bufflen(current_command));
455
459 outb(TRANSFER_INFORMATION | PIO_MODE, base + COMMAND_REG); 456 outb(TRANSFER_INFORMATION | PIO_MODE, base + COMMAND_REG);
460 if(!current_command->use_sg) 457
461 tot_trans = sym53c416_read(base, current_command->request_buffer, current_command->request_bufflen); 458 scsi_for_each_sg(current_command,
462 else 459 sg, scsi_sg_count(current_command), i) {
463 { 460 tot_trans += sym53c416_read(base,
464 sgcount = current_command->use_sg; 461 SG_ADDRESS(sg),
465 sglist = current_command->request_buffer; 462 sg->length);
466 while(sgcount--)
467 {
468 tot_trans += sym53c416_read(base, SG_ADDRESS(sglist), sglist->length);
469 sglist++;
470 }
471 } 463 }
472 if(tot_trans < current_command->underflow) 464 if(tot_trans < current_command->underflow)
473 printk(KERN_WARNING "sym53c416: Underflow, read %d bytes, request for %d bytes.\n", tot_trans, current_command->underflow); 465 printk(KERN_WARNING "sym53c416: Underflow, read %d bytes, request for %d bytes.\n", tot_trans, current_command->underflow);
diff --git a/drivers/scsi/tmscsim.c b/drivers/scsi/tmscsim.c
index e7b85e832eb5..73c5ca082e65 100644
--- a/drivers/scsi/tmscsim.c
+++ b/drivers/scsi/tmscsim.c
@@ -457,28 +457,21 @@ static int dc390_pci_map (struct dc390_srb* pSRB)
457 error = 1; 457 error = 1;
458 DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __FUNCTION__, pcmd->sense_buffer, cmdp->saved_dma_handle)); 458 DEBUG1(printk("%s(): Mapped sense buffer %p at %x\n", __FUNCTION__, pcmd->sense_buffer, cmdp->saved_dma_handle));
459 /* Map SG list */ 459 /* Map SG list */
460 } else if (pcmd->use_sg) { 460 } else if (scsi_sg_count(pcmd)) {
461 pSRB->pSegmentList = (struct scatterlist *) pcmd->request_buffer; 461 int nseg;
462 pSRB->SGcount = pci_map_sg(pdev, pSRB->pSegmentList, pcmd->use_sg, 462
463 pcmd->sc_data_direction); 463 nseg = scsi_dma_map(pcmd);
464
465 pSRB->pSegmentList = scsi_sglist(pcmd);
466 pSRB->SGcount = nseg;
467
464 /* TODO: error handling */ 468 /* TODO: error handling */
465 if (!pSRB->SGcount) 469 if (nseg < 0)
466 error = 1; 470 error = 1;
467 DEBUG1(printk("%s(): Mapped SG %p with %d (%d) elements\n",\ 471 DEBUG1(printk("%s(): Mapped SG %p with %d (%d) elements\n",\
468 __FUNCTION__, pcmd->request_buffer, pSRB->SGcount, pcmd->use_sg)); 472 __FUNCTION__, scsi_sglist(pcmd), nseg, scsi_sg_count(pcmd)));
469 /* Map single segment */ 473 /* Map single segment */
470 } else if (pcmd->request_buffer && pcmd->request_bufflen) { 474 } else
471 pSRB->pSegmentList = dc390_sg_build_single(&pSRB->Segmentx, pcmd->request_buffer, pcmd->request_bufflen);
472 pSRB->SGcount = pci_map_sg(pdev, pSRB->pSegmentList, 1,
473 pcmd->sc_data_direction);
474 cmdp->saved_dma_handle = sg_dma_address(pSRB->pSegmentList);
475
476 /* TODO: error handling */
477 if (pSRB->SGcount != 1)
478 error = 1;
479 DEBUG1(printk("%s(): Mapped request buffer %p at %x\n", __FUNCTION__, pcmd->request_buffer, cmdp->saved_dma_handle));
480 /* No mapping !? */
481 } else
482 pSRB->SGcount = 0; 475 pSRB->SGcount = 0;
483 476
484 return error; 477 return error;
@@ -494,12 +487,10 @@ static void dc390_pci_unmap (struct dc390_srb* pSRB)
494 if (pSRB->SRBFlag) { 487 if (pSRB->SRBFlag) {
495 pci_unmap_sg(pdev, &pSRB->Segmentx, 1, DMA_FROM_DEVICE); 488 pci_unmap_sg(pdev, &pSRB->Segmentx, 1, DMA_FROM_DEVICE);
496 DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __FUNCTION__, cmdp->saved_dma_handle)); 489 DEBUG1(printk("%s(): Unmapped sense buffer at %x\n", __FUNCTION__, cmdp->saved_dma_handle));
497 } else if (pcmd->use_sg) { 490 } else {
498 pci_unmap_sg(pdev, pcmd->request_buffer, pcmd->use_sg, pcmd->sc_data_direction); 491 scsi_dma_unmap(pcmd);
499 DEBUG1(printk("%s(): Unmapped SG at %p with %d elements\n", __FUNCTION__, pcmd->request_buffer, pcmd->use_sg)); 492 DEBUG1(printk("%s(): Unmapped SG at %p with %d elements\n",
500 } else if (pcmd->request_buffer && pcmd->request_bufflen) { 493 __FUNCTION__, scsi_sglist(pcmd), scsi_sg_count(pcmd)));
501 pci_unmap_sg(pdev, &pSRB->Segmentx, 1, pcmd->sc_data_direction);
502 DEBUG1(printk("%s(): Unmapped request buffer at %x\n", __FUNCTION__, cmdp->saved_dma_handle));
503 } 494 }
504} 495}
505 496
@@ -1153,9 +1144,9 @@ dc390_restore_ptr (struct dc390_acb* pACB, struct dc390_srb* pSRB)
1153 struct scatterlist *psgl; 1144 struct scatterlist *psgl;
1154 pSRB->TotalXferredLen = 0; 1145 pSRB->TotalXferredLen = 0;
1155 pSRB->SGIndex = 0; 1146 pSRB->SGIndex = 0;
1156 if (pcmd->use_sg) { 1147 if (scsi_sg_count(pcmd)) {
1157 size_t saved; 1148 size_t saved;
1158 pSRB->pSegmentList = (struct scatterlist *)pcmd->request_buffer; 1149 pSRB->pSegmentList = scsi_sglist(pcmd);
1159 psgl = pSRB->pSegmentList; 1150 psgl = pSRB->pSegmentList;
1160 //dc390_pci_sync(pSRB); 1151 //dc390_pci_sync(pSRB);
1161 1152
@@ -1179,12 +1170,6 @@ dc390_restore_ptr (struct dc390_acb* pACB, struct dc390_srb* pSRB)
1179 printk (KERN_INFO "DC390: Pointer restored. Segment %i, Total %li, Bus %08lx\n", 1170 printk (KERN_INFO "DC390: Pointer restored. Segment %i, Total %li, Bus %08lx\n",
1180 pSRB->SGIndex, pSRB->Saved_Ptr, pSRB->SGBusAddr); 1171 pSRB->SGIndex, pSRB->Saved_Ptr, pSRB->SGBusAddr);
1181 1172
1182 } else if(pcmd->request_buffer) {
1183 //dc390_pci_sync(pSRB);
1184
1185 sg_dma_len(&pSRB->Segmentx) = pcmd->request_bufflen - pSRB->Saved_Ptr;
1186 pSRB->SGcount = 1;
1187 pSRB->pSegmentList = (struct scatterlist *) &pSRB->Segmentx;
1188 } else { 1173 } else {
1189 pSRB->SGcount = 0; 1174 pSRB->SGcount = 0;
1190 printk (KERN_INFO "DC390: RESTORE_PTR message for Transfer without Scatter-Gather ??\n"); 1175 printk (KERN_INFO "DC390: RESTORE_PTR message for Transfer without Scatter-Gather ??\n");
@@ -1612,7 +1597,7 @@ dc390_Reselect( struct dc390_acb* pACB )
1612 if( !( pACB->scan_devices ) ) 1597 if( !( pACB->scan_devices ) )
1613 { 1598 {
1614 struct scsi_cmnd *pcmd = pSRB->pcmd; 1599 struct scsi_cmnd *pcmd = pSRB->pcmd;
1615 pcmd->resid = pcmd->request_bufflen; 1600 scsi_set_resid(pcmd, scsi_bufflen(pcmd));
1616 SET_RES_DID(pcmd->result, DID_SOFT_ERROR); 1601 SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
1617 dc390_Going_remove(pDCB, pSRB); 1602 dc390_Going_remove(pDCB, pSRB);
1618 dc390_Free_insert(pACB, pSRB); 1603 dc390_Free_insert(pACB, pSRB);
@@ -1695,7 +1680,7 @@ dc390_RequestSense(struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_
1695 pcmd->cmnd[0], pDCB->TargetID, pDCB->TargetLUN)); 1680 pcmd->cmnd[0], pDCB->TargetID, pDCB->TargetLUN));
1696 1681
1697 pSRB->SRBFlag |= AUTO_REQSENSE; 1682 pSRB->SRBFlag |= AUTO_REQSENSE;
1698 pSRB->SavedSGCount = pcmd->use_sg; 1683 pSRB->SavedSGCount = scsi_sg_count(pcmd);
1699 pSRB->SavedTotXLen = pSRB->TotalXferredLen; 1684 pSRB->SavedTotXLen = pSRB->TotalXferredLen;
1700 pSRB->AdaptStatus = 0; 1685 pSRB->AdaptStatus = 0;
1701 pSRB->TargetStatus = 0; /* CHECK_CONDITION<<1; */ 1686 pSRB->TargetStatus = 0; /* CHECK_CONDITION<<1; */
@@ -1743,7 +1728,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
1743 (u32) pcmd->result, (u32) pSRB->TotalXferredLen)); 1728 (u32) pcmd->result, (u32) pSRB->TotalXferredLen));
1744 } else { 1729 } else {
1745 SET_RES_DRV(pcmd->result, DRIVER_SENSE); 1730 SET_RES_DRV(pcmd->result, DRIVER_SENSE);
1746 pcmd->use_sg = pSRB->SavedSGCount; 1731 scsi_sg_count(pcmd) = pSRB->SavedSGCount;
1747 //pSRB->ScsiCmdLen = (u8) (pSRB->Segment1[0] >> 8); 1732 //pSRB->ScsiCmdLen = (u8) (pSRB->Segment1[0] >> 8);
1748 DEBUG0 (printk ("DC390: RETRY pid %li (%02x), target %02i-%02i\n", pcmd->pid, pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun)); 1733 DEBUG0 (printk ("DC390: RETRY pid %li (%02x), target %02i-%02i\n", pcmd->pid, pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun));
1749 pSRB->TotalXferredLen = 0; 1734 pSRB->TotalXferredLen = 0;
@@ -1765,7 +1750,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
1765 else if( status_byte(status) == QUEUE_FULL ) 1750 else if( status_byte(status) == QUEUE_FULL )
1766 { 1751 {
1767 scsi_track_queue_full(pcmd->device, pDCB->GoingSRBCnt - 1); 1752 scsi_track_queue_full(pcmd->device, pDCB->GoingSRBCnt - 1);
1768 pcmd->use_sg = pSRB->SavedSGCount; 1753 scsi_sg_count(pcmd) = pSRB->SavedSGCount;
1769 DEBUG0 (printk ("DC390: RETRY pid %li (%02x), target %02i-%02i\n", pcmd->pid, pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun)); 1754 DEBUG0 (printk ("DC390: RETRY pid %li (%02x), target %02i-%02i\n", pcmd->pid, pcmd->cmnd[0], pcmd->device->id, pcmd->device->lun));
1770 pSRB->TotalXferredLen = 0; 1755 pSRB->TotalXferredLen = 0;
1771 SET_RES_DID(pcmd->result, DID_SOFT_ERROR); 1756 SET_RES_DID(pcmd->result, DID_SOFT_ERROR);
@@ -1816,7 +1801,7 @@ dc390_SRBdone( struct dc390_acb* pACB, struct dc390_dcb* pDCB, struct dc390_srb*
1816 } 1801 }
1817 1802
1818cmd_done: 1803cmd_done:
1819 pcmd->resid = pcmd->request_bufflen - pSRB->TotalXferredLen; 1804 scsi_set_resid(pcmd, scsi_bufflen(pcmd) - pSRB->TotalXferredLen);
1820 1805
1821 dc390_Going_remove (pDCB, pSRB); 1806 dc390_Going_remove (pDCB, pSRB);
1822 /* Add to free list */ 1807 /* Add to free list */
diff --git a/drivers/scsi/u14-34f.c b/drivers/scsi/u14-34f.c
index 3de08a15de40..9e8232a1f169 100644
--- a/drivers/scsi/u14-34f.c
+++ b/drivers/scsi/u14-34f.c
@@ -1111,7 +1111,7 @@ static int u14_34f_detect(struct scsi_host_template *tpnt) {
1111static void map_dma(unsigned int i, unsigned int j) { 1111static void map_dma(unsigned int i, unsigned int j) {
1112 unsigned int data_len = 0; 1112 unsigned int data_len = 0;
1113 unsigned int k, count, pci_dir; 1113 unsigned int k, count, pci_dir;
1114 struct scatterlist *sgpnt; 1114 struct scatterlist *sg;
1115 struct mscp *cpp; 1115 struct mscp *cpp;
1116 struct scsi_cmnd *SCpnt; 1116 struct scsi_cmnd *SCpnt;
1117 1117
@@ -1124,33 +1124,28 @@ static void map_dma(unsigned int i, unsigned int j) {
1124 1124
1125 cpp->sense_len = sizeof SCpnt->sense_buffer; 1125 cpp->sense_len = sizeof SCpnt->sense_buffer;
1126 1126
1127 if (!SCpnt->use_sg) { 1127 if (scsi_bufflen(SCpnt)) {
1128 1128 count = scsi_dma_map(SCpnt);
1129 /* If we get here with PCI_DMA_NONE, pci_map_single triggers a BUG() */ 1129 BUG_ON(count < 0);
1130 if (!SCpnt->request_bufflen) pci_dir = PCI_DMA_BIDIRECTIONAL; 1130
1131 1131 scsi_for_each_sg(SCpnt, sg, count, k) {
1132 if (SCpnt->request_buffer) 1132 cpp->sglist[k].address = H2DEV(sg_dma_address(sg));
1133 cpp->data_address = H2DEV(pci_map_single(HD(j)->pdev, 1133 cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(sg));
1134 SCpnt->request_buffer, SCpnt->request_bufflen, pci_dir)); 1134 data_len += sg->length;
1135 1135 }
1136 cpp->data_len = H2DEV(SCpnt->request_bufflen); 1136
1137 return; 1137 cpp->sg = TRUE;
1138 } 1138 cpp->use_sg = scsi_sg_count(SCpnt);
1139 1139 cpp->data_address =
1140 sgpnt = (struct scatterlist *) SCpnt->request_buffer; 1140 H2DEV(pci_map_single(HD(j)->pdev, cpp->sglist,
1141 count = pci_map_sg(HD(j)->pdev, sgpnt, SCpnt->use_sg, pci_dir); 1141 cpp->use_sg * sizeof(struct sg_list),
1142 1142 pci_dir));
1143 for (k = 0; k < count; k++) { 1143 cpp->data_len = H2DEV(data_len);
1144 cpp->sglist[k].address = H2DEV(sg_dma_address(&sgpnt[k])); 1144
1145 cpp->sglist[k].num_bytes = H2DEV(sg_dma_len(&sgpnt[k])); 1145 } else {
1146 data_len += sgpnt[k].length; 1146 pci_dir = PCI_DMA_BIDIRECTIONAL;
1147 } 1147 cpp->data_len = H2DEV(scsi_bufflen(SCpnt));
1148 1148 }
1149 cpp->sg = TRUE;
1150 cpp->use_sg = SCpnt->use_sg;
1151 cpp->data_address = H2DEV(pci_map_single(HD(j)->pdev, cpp->sglist,
1152 SCpnt->use_sg * sizeof(struct sg_list), pci_dir));
1153 cpp->data_len = H2DEV(data_len);
1154} 1149}
1155 1150
1156static void unmap_dma(unsigned int i, unsigned int j) { 1151static void unmap_dma(unsigned int i, unsigned int j) {
@@ -1165,8 +1160,7 @@ static void unmap_dma(unsigned int i, unsigned int j) {
1165 pci_unmap_single(HD(j)->pdev, DEV2H(cpp->sense_addr), 1160 pci_unmap_single(HD(j)->pdev, DEV2H(cpp->sense_addr),
1166 DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE); 1161 DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
1167 1162
1168 if (SCpnt->use_sg) 1163 scsi_dma_unmap(SCpnt);
1169 pci_unmap_sg(HD(j)->pdev, SCpnt->request_buffer, SCpnt->use_sg, pci_dir);
1170 1164
1171 if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL; 1165 if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL;
1172 1166
@@ -1187,9 +1181,9 @@ static void sync_dma(unsigned int i, unsigned int j) {
1187 pci_dma_sync_single_for_cpu(HD(j)->pdev, DEV2H(cpp->sense_addr), 1181 pci_dma_sync_single_for_cpu(HD(j)->pdev, DEV2H(cpp->sense_addr),
1188 DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE); 1182 DEV2H(cpp->sense_len), PCI_DMA_FROMDEVICE);
1189 1183
1190 if (SCpnt->use_sg) 1184 if (scsi_sg_count(SCpnt))
1191 pci_dma_sync_sg_for_cpu(HD(j)->pdev, SCpnt->request_buffer, 1185 pci_dma_sync_sg_for_cpu(HD(j)->pdev, scsi_sglist(SCpnt),
1192 SCpnt->use_sg, pci_dir); 1186 scsi_sg_count(SCpnt), pci_dir);
1193 1187
1194 if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL; 1188 if (!DEV2H(cpp->data_len)) pci_dir = PCI_DMA_BIDIRECTIONAL;
1195 1189
diff --git a/drivers/scsi/ultrastor.c b/drivers/scsi/ultrastor.c
index 56906aba5ee3..c08235d5afc9 100644
--- a/drivers/scsi/ultrastor.c
+++ b/drivers/scsi/ultrastor.c
@@ -675,16 +675,15 @@ static const char *ultrastor_info(struct Scsi_Host * shpnt)
675 675
676static inline void build_sg_list(struct mscp *mscp, struct scsi_cmnd *SCpnt) 676static inline void build_sg_list(struct mscp *mscp, struct scsi_cmnd *SCpnt)
677{ 677{
678 struct scatterlist *sl; 678 struct scatterlist *sg;
679 long transfer_length = 0; 679 long transfer_length = 0;
680 int i, max; 680 int i, max;
681 681
682 sl = (struct scatterlist *) SCpnt->request_buffer; 682 max = scsi_sg_count(SCpnt);
683 max = SCpnt->use_sg; 683 scsi_for_each_sg(SCpnt, sg, max, i) {
684 for (i = 0; i < max; i++) { 684 mscp->sglist[i].address = isa_page_to_bus(sg->page) + sg->offset;
685 mscp->sglist[i].address = isa_page_to_bus(sl[i].page) + sl[i].offset; 685 mscp->sglist[i].num_bytes = sg->length;
686 mscp->sglist[i].num_bytes = sl[i].length; 686 transfer_length += sg->length;
687 transfer_length += sl[i].length;
688 } 687 }
689 mscp->number_of_sg_list = max; 688 mscp->number_of_sg_list = max;
690 mscp->transfer_data = isa_virt_to_bus(mscp->sglist); 689 mscp->transfer_data = isa_virt_to_bus(mscp->sglist);
@@ -730,15 +729,15 @@ static int ultrastor_queuecommand(struct scsi_cmnd *SCpnt,
730 my_mscp->target_id = SCpnt->device->id; 729 my_mscp->target_id = SCpnt->device->id;
731 my_mscp->ch_no = 0; 730 my_mscp->ch_no = 0;
732 my_mscp->lun = SCpnt->device->lun; 731 my_mscp->lun = SCpnt->device->lun;
733 if (SCpnt->use_sg) { 732 if (scsi_sg_count(SCpnt)) {
734 /* Set scatter/gather flag in SCSI command packet */ 733 /* Set scatter/gather flag in SCSI command packet */
735 my_mscp->sg = TRUE; 734 my_mscp->sg = TRUE;
736 build_sg_list(my_mscp, SCpnt); 735 build_sg_list(my_mscp, SCpnt);
737 } else { 736 } else {
738 /* Unset scatter/gather flag in SCSI command packet */ 737 /* Unset scatter/gather flag in SCSI command packet */
739 my_mscp->sg = FALSE; 738 my_mscp->sg = FALSE;
740 my_mscp->transfer_data = isa_virt_to_bus(SCpnt->request_buffer); 739 my_mscp->transfer_data = isa_virt_to_bus(scsi_sglist(SCpnt));
741 my_mscp->transfer_data_length = SCpnt->request_bufflen; 740 my_mscp->transfer_data_length = scsi_bufflen(SCpnt);
742 } 741 }
743 my_mscp->command_link = 0; /*???*/ 742 my_mscp->command_link = 0; /*???*/
744 my_mscp->scsi_command_link_id = 0; /*???*/ 743 my_mscp->scsi_command_link_id = 0; /*???*/
diff --git a/drivers/scsi/wd7000.c b/drivers/scsi/wd7000.c
index 30be76514c43..d6fd4259c56b 100644
--- a/drivers/scsi/wd7000.c
+++ b/drivers/scsi/wd7000.c
@@ -1091,6 +1091,7 @@ static int wd7000_queuecommand(struct scsi_cmnd *SCpnt,
1091 unchar *cdb = (unchar *) SCpnt->cmnd; 1091 unchar *cdb = (unchar *) SCpnt->cmnd;
1092 unchar idlun; 1092 unchar idlun;
1093 short cdblen; 1093 short cdblen;
1094 int nseg;
1094 Adapter *host = (Adapter *) SCpnt->device->host->hostdata; 1095 Adapter *host = (Adapter *) SCpnt->device->host->hostdata;
1095 1096
1096 cdblen = SCpnt->cmd_len; 1097 cdblen = SCpnt->cmd_len;
@@ -1106,28 +1107,29 @@ static int wd7000_queuecommand(struct scsi_cmnd *SCpnt,
1106 SCpnt->host_scribble = (unchar *) scb; 1107 SCpnt->host_scribble = (unchar *) scb;
1107 scb->host = host; 1108 scb->host = host;
1108 1109
1109 if (SCpnt->use_sg) { 1110 nseg = scsi_sg_count(SCpnt);
1110 struct scatterlist *sg = (struct scatterlist *) SCpnt->request_buffer; 1111 if (nseg) {
1112 struct scatterlist *sg;
1111 unsigned i; 1113 unsigned i;
1112 1114
1113 if (SCpnt->device->host->sg_tablesize == SG_NONE) { 1115 if (SCpnt->device->host->sg_tablesize == SG_NONE) {
1114 panic("wd7000_queuecommand: scatter/gather not supported.\n"); 1116 panic("wd7000_queuecommand: scatter/gather not supported.\n");
1115 } 1117 }
1116 dprintk("Using scatter/gather with %d elements.\n", SCpnt->use_sg); 1118 dprintk("Using scatter/gather with %d elements.\n", nseg);
1117 1119
1118 sgb = scb->sgb; 1120 sgb = scb->sgb;
1119 scb->op = 1; 1121 scb->op = 1;
1120 any2scsi(scb->dataptr, (int) sgb); 1122 any2scsi(scb->dataptr, (int) sgb);
1121 any2scsi(scb->maxlen, SCpnt->use_sg * sizeof(Sgb)); 1123 any2scsi(scb->maxlen, nseg * sizeof(Sgb));
1122 1124
1123 for (i = 0; i < SCpnt->use_sg; i++) { 1125 scsi_for_each_sg(SCpnt, sg, nseg, i) {
1124 any2scsi(sgb[i].ptr, isa_page_to_bus(sg[i].page) + sg[i].offset); 1126 any2scsi(sgb[i].ptr, isa_page_to_bus(sg->page) + sg->offset);
1125 any2scsi(sgb[i].len, sg[i].length); 1127 any2scsi(sgb[i].len, sg->length);
1126 } 1128 }
1127 } else { 1129 } else {
1128 scb->op = 0; 1130 scb->op = 0;
1129 any2scsi(scb->dataptr, isa_virt_to_bus(SCpnt->request_buffer)); 1131 any2scsi(scb->dataptr, isa_virt_to_bus(scsi_sglist(SCpnt)));
1130 any2scsi(scb->maxlen, SCpnt->request_bufflen); 1132 any2scsi(scb->maxlen, scsi_bufflen(SCpnt));
1131 } 1133 }
1132 1134
1133 /* FIXME: drop lock and yield here ? */ 1135 /* FIXME: drop lock and yield here ? */
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index a2e0c1032491..53e170586c26 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -135,4 +135,24 @@ extern void scsi_kunmap_atomic_sg(void *virt);
135extern struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t); 135extern struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *, gfp_t);
136extern void scsi_free_sgtable(struct scatterlist *, int); 136extern void scsi_free_sgtable(struct scatterlist *, int);
137 137
138extern int scsi_dma_map(struct scsi_cmnd *cmd);
139extern void scsi_dma_unmap(struct scsi_cmnd *cmd);
140
141#define scsi_sg_count(cmd) ((cmd)->use_sg)
142#define scsi_sglist(cmd) ((struct scatterlist *)(cmd)->request_buffer)
143#define scsi_bufflen(cmd) ((cmd)->request_bufflen)
144
145static inline void scsi_set_resid(struct scsi_cmnd *cmd, int resid)
146{
147 cmd->resid = resid;
148}
149
150static inline int scsi_get_resid(struct scsi_cmnd *cmd)
151{
152 return cmd->resid;
153}
154
155#define scsi_for_each_sg(cmd, sg, nseg, __i) \
156 for (__i = 0, sg = scsi_sglist(cmd); __i < (nseg); __i++, (sg)++)
157
138#endif /* _SCSI_SCSI_CMND_H */ 158#endif /* _SCSI_SCSI_CMND_H */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 68f461b7a835..4a2e490abfed 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -339,12 +339,6 @@ struct scsi_host_template {
339 enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *); 339 enum scsi_eh_timer_return (* eh_timed_out)(struct scsi_cmnd *);
340 340
341 /* 341 /*
342 * suspend support
343 */
344 int (*resume)(struct scsi_device *);
345 int (*suspend)(struct scsi_device *, pm_message_t state);
346
347 /*
348 * Name of proc directory 342 * Name of proc directory
349 */ 343 */
350 char *proc_name; 344 char *proc_name;
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index 1e797308640a..a0d80bcaa93d 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -1,4 +1,4 @@
1/* 1/*
2 * FiberChannel transport specific attributes exported to sysfs. 2 * FiberChannel transport specific attributes exported to sysfs.
3 * 3 *
4 * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved. 4 * Copyright (c) 2003 Silicon Graphics, Inc. All rights reserved.
@@ -19,7 +19,7 @@
19 * 19 *
20 * ======== 20 * ========
21 * 21 *
22 * Copyright (C) 2004-2005 James Smart, Emulex Corporation 22 * Copyright (C) 2004-2007 James Smart, Emulex Corporation
23 * Rewrite for host, target, device, and remote port attributes, 23 * Rewrite for host, target, device, and remote port attributes,
24 * statistics, and service functions... 24 * statistics, and service functions...
25 * 25 *
@@ -62,8 +62,10 @@ enum fc_port_type {
62 FC_PORTTYPE_NLPORT, /* (Public) Loop w/ FLPort */ 62 FC_PORTTYPE_NLPORT, /* (Public) Loop w/ FLPort */
63 FC_PORTTYPE_LPORT, /* (Private) Loop w/o FLPort */ 63 FC_PORTTYPE_LPORT, /* (Private) Loop w/o FLPort */
64 FC_PORTTYPE_PTP, /* Point to Point w/ another NPort */ 64 FC_PORTTYPE_PTP, /* Point to Point w/ another NPort */
65 FC_PORTTYPE_NPIV, /* VPORT based on NPIV */
65}; 66};
66 67
68
67/* 69/*
68 * fc_port_state: If you alter this, you also need to alter scsi_transport_fc.c 70 * fc_port_state: If you alter this, you also need to alter scsi_transport_fc.c
69 * (for the ascii descriptions). 71 * (for the ascii descriptions).
@@ -83,7 +85,26 @@ enum fc_port_state {
83}; 85};
84 86
85 87
86/* 88/*
89 * fc_vport_state: If you alter this, you also need to alter
90 * scsi_transport_fc.c (for the ascii descriptions).
91 */
92enum fc_vport_state {
93 FC_VPORT_UNKNOWN,
94 FC_VPORT_ACTIVE,
95 FC_VPORT_DISABLED,
96 FC_VPORT_LINKDOWN,
97 FC_VPORT_INITIALIZING,
98 FC_VPORT_NO_FABRIC_SUPP,
99 FC_VPORT_NO_FABRIC_RSCS,
100 FC_VPORT_FABRIC_LOGOUT,
101 FC_VPORT_FABRIC_REJ_WWN,
102 FC_VPORT_FAILED,
103};
104
105
106
107/*
87 * FC Classes of Service 108 * FC Classes of Service
88 * Note: values are not enumerated, as they can be "or'd" together 109 * Note: values are not enumerated, as they can be "or'd" together
89 * for reporting (e.g. report supported_classes). If you alter this list, 110 * for reporting (e.g. report supported_classes). If you alter this list,
@@ -96,7 +117,7 @@ enum fc_port_state {
96#define FC_COS_CLASS4 0x10 117#define FC_COS_CLASS4 0x10
97#define FC_COS_CLASS6 0x40 118#define FC_COS_CLASS6 0x40
98 119
99/* 120/*
100 * FC Port Speeds 121 * FC Port Speeds
101 * Note: values are not enumerated, as they can be "or'd" together 122 * Note: values are not enumerated, as they can be "or'd" together
102 * for reporting (e.g. report supported_speeds). If you alter this list, 123 * for reporting (e.g. report supported_speeds). If you alter this list,
@@ -124,16 +145,114 @@ enum fc_tgtid_binding_type {
124}; 145};
125 146
126/* 147/*
127 * FC Remote Port Roles 148 * FC Port Roles
128 * Note: values are not enumerated, as they can be "or'd" together 149 * Note: values are not enumerated, as they can be "or'd" together
129 * for reporting (e.g. report roles). If you alter this list, 150 * for reporting (e.g. report roles). If you alter this list,
130 * you also need to alter scsi_transport_fc.c (for the ascii descriptions). 151 * you also need to alter scsi_transport_fc.c (for the ascii descriptions).
131 */ 152 */
132#define FC_RPORT_ROLE_UNKNOWN 0x00 153#define FC_PORT_ROLE_UNKNOWN 0x00
133#define FC_RPORT_ROLE_FCP_TARGET 0x01 154#define FC_PORT_ROLE_FCP_TARGET 0x01
134#define FC_RPORT_ROLE_FCP_INITIATOR 0x02 155#define FC_PORT_ROLE_FCP_INITIATOR 0x02
135#define FC_RPORT_ROLE_IP_PORT 0x04 156#define FC_PORT_ROLE_IP_PORT 0x04
157
158/* The following are for compatibility */
159#define FC_RPORT_ROLE_UNKNOWN FC_PORT_ROLE_UNKNOWN
160#define FC_RPORT_ROLE_FCP_TARGET FC_PORT_ROLE_FCP_TARGET
161#define FC_RPORT_ROLE_FCP_INITIATOR FC_PORT_ROLE_FCP_INITIATOR
162#define FC_RPORT_ROLE_IP_PORT FC_PORT_ROLE_IP_PORT
163
164
165/* Macro for use in defining Virtual Port attributes */
166#define FC_VPORT_ATTR(_name,_mode,_show,_store) \
167struct class_device_attribute class_device_attr_vport_##_name = \
168 __ATTR(_name,_mode,_show,_store)
169
170
171/*
172 * FC Virtual Port Attributes
173 *
174 * This structure exists for each FC port is a virtual FC port. Virtual
175 * ports share the physical link with the Physical port. Each virtual
176 * ports has a unique presense on the SAN, and may be instantiated via
177 * NPIV, Virtual Fabrics, or via additional ALPAs. As the vport is a
178 * unique presense, each vport has it's own view of the fabric,
179 * authentication priviledge, and priorities.
180 *
181 * A virtual port may support 1 or more FC4 roles. Typically it is a
182 * FCP Initiator. It could be a FCP Target, or exist sole for an IP over FC
183 * roles. FC port attributes for the vport will be reported on any
184 * fc_host class object allocated for an FCP Initiator.
185 *
186 * --
187 *
188 * Fixed attributes are not expected to change. The driver is
189 * expected to set these values after receiving the fc_vport structure
190 * via the vport_create() call from the transport.
191 * The transport fully manages all get functions w/o driver interaction.
192 *
193 * Dynamic attributes are expected to change. The driver participates
194 * in all get/set operations via functions provided by the driver.
195 *
196 * Private attributes are transport-managed values. They are fully
197 * managed by the transport w/o driver interaction.
198 */
136 199
200#define FC_VPORT_SYMBOLIC_NAMELEN 64
201struct fc_vport {
202 /* Fixed Attributes */
203
204 /* Dynamic Attributes */
205
206 /* Private (Transport-managed) Attributes */
207 enum fc_vport_state vport_state;
208 enum fc_vport_state vport_last_state;
209 u64 node_name;
210 u64 port_name;
211 u32 roles;
212 u32 vport_id; /* Admin Identifier for the vport */
213 enum fc_port_type vport_type;
214 char symbolic_name[FC_VPORT_SYMBOLIC_NAMELEN];
215
216 /* exported data */
217 void *dd_data; /* Used for driver-specific storage */
218
219 /* internal data */
220 struct Scsi_Host *shost; /* Physical Port Parent */
221 unsigned int channel;
222 u32 number;
223 u8 flags;
224 struct list_head peers;
225 struct device dev;
226 struct work_struct vport_delete_work;
227} __attribute__((aligned(sizeof(unsigned long))));
228
229/* bit field values for struct fc_vport "flags" field: */
230#define FC_VPORT_CREATING 0x01
231#define FC_VPORT_DELETING 0x02
232#define FC_VPORT_DELETED 0x04
233#define FC_VPORT_DEL 0x06 /* Any DELETE state */
234
235#define dev_to_vport(d) \
236 container_of(d, struct fc_vport, dev)
237#define transport_class_to_vport(classdev) \
238 dev_to_vport(classdev->dev)
239#define vport_to_shost(v) \
240 (v->shost)
241#define vport_to_shost_channel(v) \
242 (v->channel)
243#define vport_to_parent(v) \
244 (v->dev.parent)
245
246
247/* Error return codes for vport_create() callback */
248#define VPCERR_UNSUPPORTED -ENOSYS /* no driver/adapter
249 support */
250#define VPCERR_BAD_WWN -ENOTUNIQ /* driver validation
251 of WWNs failed */
252#define VPCERR_NO_FABRIC_SUPP -EOPNOTSUPP /* Fabric connection
253 is loop or the
254 Fabric Port does
255 not support NPIV */
137 256
138/* 257/*
139 * fc_rport_identifiers: This set of data contains all elements 258 * fc_rport_identifiers: This set of data contains all elements
@@ -149,6 +268,7 @@ struct fc_rport_identifiers {
149 u32 roles; 268 u32 roles;
150}; 269};
151 270
271
152/* Macro for use in defining Remote Port attributes */ 272/* Macro for use in defining Remote Port attributes */
153#define FC_RPORT_ATTR(_name,_mode,_show,_store) \ 273#define FC_RPORT_ATTR(_name,_mode,_show,_store) \
154struct class_device_attribute class_device_attr_rport_##_name = \ 274struct class_device_attribute class_device_attr_rport_##_name = \
@@ -278,7 +398,7 @@ struct fc_host_statistics {
278 u64 prim_seq_protocol_err_count; 398 u64 prim_seq_protocol_err_count;
279 u64 invalid_tx_word_count; 399 u64 invalid_tx_word_count;
280 u64 invalid_crc_count; 400 u64 invalid_crc_count;
281 401
282 /* fc4 statistics (only FCP supported currently) */ 402 /* fc4 statistics (only FCP supported currently) */
283 u64 fcp_input_requests; 403 u64 fcp_input_requests;
284 u64 fcp_output_requests; 404 u64 fcp_output_requests;
@@ -343,6 +463,7 @@ struct fc_host_attrs {
343 u8 supported_fc4s[FC_FC4_LIST_SIZE]; 463 u8 supported_fc4s[FC_FC4_LIST_SIZE];
344 u32 supported_speeds; 464 u32 supported_speeds;
345 u32 maxframe_size; 465 u32 maxframe_size;
466 u16 max_npiv_vports;
346 char serial_number[FC_SERIAL_NUMBER_SIZE]; 467 char serial_number[FC_SERIAL_NUMBER_SIZE];
347 468
348 /* Dynamic Attributes */ 469 /* Dynamic Attributes */
@@ -361,8 +482,11 @@ struct fc_host_attrs {
361 /* internal data */ 482 /* internal data */
362 struct list_head rports; 483 struct list_head rports;
363 struct list_head rport_bindings; 484 struct list_head rport_bindings;
485 struct list_head vports;
364 u32 next_rport_number; 486 u32 next_rport_number;
365 u32 next_target_id; 487 u32 next_target_id;
488 u32 next_vport_number;
489 u16 npiv_vports_inuse;
366 490
367 /* work queues for rport state manipulation */ 491 /* work queues for rport state manipulation */
368 char work_q_name[KOBJ_NAME_LEN]; 492 char work_q_name[KOBJ_NAME_LEN];
@@ -388,6 +512,8 @@ struct fc_host_attrs {
388 (((struct fc_host_attrs *)(x)->shost_data)->supported_speeds) 512 (((struct fc_host_attrs *)(x)->shost_data)->supported_speeds)
389#define fc_host_maxframe_size(x) \ 513#define fc_host_maxframe_size(x) \
390 (((struct fc_host_attrs *)(x)->shost_data)->maxframe_size) 514 (((struct fc_host_attrs *)(x)->shost_data)->maxframe_size)
515#define fc_host_max_npiv_vports(x) \
516 (((struct fc_host_attrs *)(x)->shost_data)->max_npiv_vports)
391#define fc_host_serial_number(x) \ 517#define fc_host_serial_number(x) \
392 (((struct fc_host_attrs *)(x)->shost_data)->serial_number) 518 (((struct fc_host_attrs *)(x)->shost_data)->serial_number)
393#define fc_host_port_id(x) \ 519#define fc_host_port_id(x) \
@@ -412,10 +538,16 @@ struct fc_host_attrs {
412 (((struct fc_host_attrs *)(x)->shost_data)->rports) 538 (((struct fc_host_attrs *)(x)->shost_data)->rports)
413#define fc_host_rport_bindings(x) \ 539#define fc_host_rport_bindings(x) \
414 (((struct fc_host_attrs *)(x)->shost_data)->rport_bindings) 540 (((struct fc_host_attrs *)(x)->shost_data)->rport_bindings)
541#define fc_host_vports(x) \
542 (((struct fc_host_attrs *)(x)->shost_data)->vports)
415#define fc_host_next_rport_number(x) \ 543#define fc_host_next_rport_number(x) \
416 (((struct fc_host_attrs *)(x)->shost_data)->next_rport_number) 544 (((struct fc_host_attrs *)(x)->shost_data)->next_rport_number)
417#define fc_host_next_target_id(x) \ 545#define fc_host_next_target_id(x) \
418 (((struct fc_host_attrs *)(x)->shost_data)->next_target_id) 546 (((struct fc_host_attrs *)(x)->shost_data)->next_target_id)
547#define fc_host_next_vport_number(x) \
548 (((struct fc_host_attrs *)(x)->shost_data)->next_vport_number)
549#define fc_host_npiv_vports_inuse(x) \
550 (((struct fc_host_attrs *)(x)->shost_data)->npiv_vports_inuse)
419#define fc_host_work_q_name(x) \ 551#define fc_host_work_q_name(x) \
420 (((struct fc_host_attrs *)(x)->shost_data)->work_q_name) 552 (((struct fc_host_attrs *)(x)->shost_data)->work_q_name)
421#define fc_host_work_q(x) \ 553#define fc_host_work_q(x) \
@@ -452,14 +584,20 @@ struct fc_function_template {
452 void (*dev_loss_tmo_callbk)(struct fc_rport *); 584 void (*dev_loss_tmo_callbk)(struct fc_rport *);
453 void (*terminate_rport_io)(struct fc_rport *); 585 void (*terminate_rport_io)(struct fc_rport *);
454 586
587 void (*set_vport_symbolic_name)(struct fc_vport *);
588 int (*vport_create)(struct fc_vport *, bool);
589 int (*vport_disable)(struct fc_vport *, bool);
590 int (*vport_delete)(struct fc_vport *);
591
455 /* allocation lengths for host-specific data */ 592 /* allocation lengths for host-specific data */
456 u32 dd_fcrport_size; 593 u32 dd_fcrport_size;
594 u32 dd_fcvport_size;
457 595
458 /* 596 /*
459 * The driver sets these to tell the transport class it 597 * The driver sets these to tell the transport class it
460 * wants the attributes displayed in sysfs. If the show_ flag 598 * wants the attributes displayed in sysfs. If the show_ flag
461 * is not set, the attribute will be private to the transport 599 * is not set, the attribute will be private to the transport
462 * class 600 * class
463 */ 601 */
464 602
465 /* remote port fixed attributes */ 603 /* remote port fixed attributes */
@@ -512,7 +650,7 @@ fc_remote_port_chkready(struct fc_rport *rport)
512 650
513 switch (rport->port_state) { 651 switch (rport->port_state) {
514 case FC_PORTSTATE_ONLINE: 652 case FC_PORTSTATE_ONLINE:
515 if (rport->roles & FC_RPORT_ROLE_FCP_TARGET) 653 if (rport->roles & FC_PORT_ROLE_FCP_TARGET)
516 result = 0; 654 result = 0;
517 else if (rport->flags & FC_RPORT_DEVLOSS_PENDING) 655 else if (rport->flags & FC_RPORT_DEVLOSS_PENDING)
518 result = DID_IMM_RETRY << 16; 656 result = DID_IMM_RETRY << 16;
@@ -549,6 +687,27 @@ static inline void u64_to_wwn(u64 inm, u8 *wwn)
549 wwn[7] = inm & 0xff; 687 wwn[7] = inm & 0xff;
550} 688}
551 689
690/**
691 * fc_vport_set_state() - called to set a vport's state. Saves the old state,
692 * excepting the transitory states of initializing and sending the ELS
693 * traffic to instantiate the vport on the link.
694 *
695 * Assumes the driver has surrounded this with the proper locking to ensure
696 * a coherent state change.
697 *
698 * @vport: virtual port whose state is changing
699 * @new_state: new state
700 **/
701static inline void
702fc_vport_set_state(struct fc_vport *vport, enum fc_vport_state new_state)
703{
704 if ((new_state != FC_VPORT_UNKNOWN) &&
705 (new_state != FC_VPORT_INITIALIZING))
706 vport->vport_last_state = vport->vport_state;
707 vport->vport_state = new_state;
708}
709
710
552struct scsi_transport_template *fc_attach_transport( 711struct scsi_transport_template *fc_attach_transport(
553 struct fc_function_template *); 712 struct fc_function_template *);
554void fc_release_transport(struct scsi_transport_template *); 713void fc_release_transport(struct scsi_transport_template *);
@@ -567,5 +726,6 @@ void fc_host_post_vendor_event(struct Scsi_Host *shost, u32 event_number,
567 * be sure to read the Vendor Type and ID formatting requirements 726 * be sure to read the Vendor Type and ID formatting requirements
568 * specified in scsi_netlink.h 727 * specified in scsi_netlink.h
569 */ 728 */
729int fc_vport_terminate(struct fc_vport *vport);
570 730
571#endif /* SCSI_TRANSPORT_FC_H */ 731#endif /* SCSI_TRANSPORT_FC_H */